diff --git a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/forefront/typing.py b/spaces/101-5/gpt4free/g4f/.v1/gpt4free/forefront/typing.py deleted file mode 100644 index b572e2c252db380effc5863015ed78d9479a5bb4..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/forefront/typing.py +++ /dev/null @@ -1,32 +0,0 @@ -from typing import Any, List - -from pydantic import BaseModel - - -class Choice(BaseModel): - text: str - index: int - logprobs: Any - finish_reason: str - - -class Usage(BaseModel): - prompt_tokens: int - completion_tokens: int - total_tokens: int - - -class ForeFrontResponse(BaseModel): - id: str - object: str - created: int - model: str - choices: List[Choice] - usage: Usage - text: str - - -class AccountData(BaseModel): - token: str - user_id: str - session_id: str diff --git a/spaces/17TheWord/RealESRGAN/realesrgan/models/__init__.py b/spaces/17TheWord/RealESRGAN/realesrgan/models/__init__.py deleted file mode 100644 index 0be7105dc75d150c49976396724085f678dc0675..0000000000000000000000000000000000000000 --- a/spaces/17TheWord/RealESRGAN/realesrgan/models/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -import importlib -from basicsr.utils import scandir -from os import path as osp - -# automatically scan and import model modules for registry -# scan all the files that end with '_model.py' under the model folder -model_folder = osp.dirname(osp.abspath(__file__)) -model_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(model_folder) if v.endswith('_model.py')] -# import all the model modules -_model_modules = [importlib.import_module(f'realesrgan.models.{file_name}') for file_name in model_filenames] diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/El Secreto Oculto Pelicula Mexicana Descargarl.md b/spaces/1gistliPinn/ChatGPT4/Examples/El Secreto Oculto Pelicula Mexicana Descargarl.md deleted file mode 100644 index 21bae8b0202dae33ee89d267c35b40905b225e68..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/El Secreto Oculto Pelicula Mexicana Descargarl.md +++ /dev/null @@ -1,6 +0,0 @@ -

El Secreto Oculto Pelicula Mexicana Descargarl


Download Zip >>>>> https://imgfil.com/2uy0TS



-
-Al llegar a vivir con su tía, se da cuenta que guarda un secreto, que ha permanecido oculto y resguardado por siglos; por el que los hombres ... 4d29de3e1b
-
-
-

diff --git a/spaces/1line/AutoGPT/autogpt/processing/html.py b/spaces/1line/AutoGPT/autogpt/processing/html.py deleted file mode 100644 index 81387b12adab5023150c55f2075ddd40b554f386..0000000000000000000000000000000000000000 --- a/spaces/1line/AutoGPT/autogpt/processing/html.py +++ /dev/null @@ -1,33 +0,0 @@ -"""HTML processing functions""" -from __future__ import annotations - -from bs4 import BeautifulSoup -from requests.compat import urljoin - - -def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]: - """Extract hyperlinks from a BeautifulSoup object - - Args: - soup (BeautifulSoup): The BeautifulSoup object - base_url (str): The base URL - - Returns: - List[Tuple[str, str]]: The extracted hyperlinks - """ - return [ - (link.text, urljoin(base_url, link["href"])) - for link in soup.find_all("a", href=True) - ] - - -def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]: - """Format hyperlinks to be displayed to the user - - Args: - hyperlinks (List[Tuple[str, str]]): The hyperlinks to format - - Returns: - List[str]: The formatted hyperlinks - """ - return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks] diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APK Free Download Global Truck Simulator - The Most Realistic Truck Simulation Game.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APK Free Download Global Truck Simulator - The Most Realistic Truck Simulation Game.md deleted file mode 100644 index 234b782bb2e22f151b6dac8132bc190ec591e4e9..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APK Free Download Global Truck Simulator - The Most Realistic Truck Simulation Game.md +++ /dev/null @@ -1,114 +0,0 @@ -
-

Global Truck Simulator APK Download: How to Enjoy Driving a Big Rig on Your Mobile Device

-

Do you love driving trucks and delivering cargo across different countries and continents? Do you want to experience the thrill and challenge of driving a big rig on your mobile device? If you answered yes, then you should try Global Truck Simulator, one of the best truck simulator games for Android devices. In this article, we will tell you everything you need to know about this game, including what it is, what features it has, how to download and install it, and how to play it. We will also share some tips and tricks to help you become a successful truck driver in the game.

-

What is Global Truck Simulator?

-

Global Truck Simulator is a realistic and immersive truck driving game for Android devices. It is developed by Ocypode Studios, a company that specializes in creating simulation games. The game lets you drive various trucks and deliver different cargoes across the world, from Europe to America, from Asia to Africa. You can choose from iconic American models like Chevrolet, Western Star, and Hummer, or European models like Renault, Volvo, and Mercedes-Benz. You can also customize your trucks with optional lights, bars, horns, beacons, smoke exhausts, and more.

-

global truck simulator apk download


Download Zip ✔✔✔ https://urlin.us/2uSTMJ



-

Features of Global Truck Simulator

-

Global Truck Simulator has many features that make it stand out from other truck simulator games. Here are some of them:

-

Various truck models and customization options

-

The game offers a wide range of truck models that you can choose from, each with its own specifications, performance, and appearance. You can also customize your trucks with different parts and accessories, such as engines, transmissions, tires, wheels, paint jobs, decals, etc. You can even design your own truck from scratch using the in-game editor.

-

Diverse and challenging terrains and routes

-

The game features realistic terrains that react to the movement and weight of your truck. You will have to drive through rivers, muddy roads, snowy mountains, deserts, forests, cities, highways, and more. You will also have to deal with different weather conditions, such as rain, fog, snow, wind, etc. The game also has dynamic day-night cycles that affect the visibility and traffic on the roads.

-

Career mode and multiplayer mode

-

The game has two modes that you can play: career mode and multiplayer mode. In career mode, you can start your own trucking business and manage it for maximum profits. You can hire drivers, buy garages, accept contracts, deliver cargoes, upgrade your trucks, etc. You can also compete with other players in leaderboards and achievements. In multiplayer mode, you can join or create online sessions with up to three other players. You can chat with them, cooperate with them, or challenge them in races or missions.

-

How to Download and Install Global Truck Simulator APK?

-

If you want to play Global Truck Simulator on your Android device, you will need to download and install its APK file. An APK file is an application package file that contains all the files needed to run an Android app. There are two ways to download and install Global Truck Simulator APK:

-

Steps to download and install the game from the official website or Google Play Store

-

The easiest way to download and install Global Truck Simulator APK is to get it from its official website or Google Play Store. Here are the steps to do so:

-

global truck simulator mod apk download
-global truck simulator 2023 apk download
-global truck simulator pro apk download
-global truck simulator offline apk download
-global truck simulator latest version apk download
-global truck simulator free apk download
-global truck simulator hack apk download
-global truck simulator unlimited money apk download
-global truck simulator online apk download
-global truck simulator 3d apk download
-global truck simulator europe apk download
-global truck simulator android apk download
-global truck simulator pc apk download
-global truck simulator game apk download
-global truck simulator full apk download
-global truck simulator premium apk download
-global truck simulator hd apk download
-global truck simulator real apk download
-global truck simulator new apk download
-global truck simulator best apk download
-global truck simulator world apk download
-global truck simulator usa apk download
-global truck simulator india apk download
-global truck simulator brazil apk download
-global truck simulator russia apk download
-global truck simulator china apk download
-global truck simulator japan apk download
-global truck simulator canada apk download
-global truck simulator australia apk download
-global truck simulator africa apk download
-global truck simulator uk apk download
-global truck simulator germany apk download
-global truck simulator france apk download
-global truck simulator italy apk download
-global truck simulator spain apk download
-global truck simulator turkey apk download
-global truck simulator sweden apk download
-global truck simulator norway apk download
-global truck simulator finland apk download
-global truck simulator denmark apk download
-global truck simulator netherlands apk download
-global truck simulator belgium apk download
-global truck simulator switzerland apk download
-global truck simulator austria apk download
-global truck simulator poland apk download
-global truck simulator czechia apk download
-global truck simulator hungary apk download
-global truck simulator romania apk download

-
    -
  1. Go to the official website of Global Truck Simulator at https://globaltrucksimulator.com/ or search for it on Google Play Store.
  2. -
  3. Click on the download button or the install button to start the download process.
  4. -
  5. Wait for the download to finish and then open the APK file.
  6. -
  7. Follow the instructions on the screen to install the game on your device.
  8. -
  9. Launch the game and enjoy driving a big rig on your mobile device.
  10. -
-

Tips to avoid malware and viruses when downloading APK files from third-party sources

-

If you want to download Global Truck Simulator APK from a third-party source, such as a website or a file-sharing platform, you need to be careful and follow some precautions. This is because some APK files may contain malware or viruses that can harm your device or steal your personal information. Here are some tips to avoid malware and viruses when downloading APK files from third-party sources:

- -

How to Play Global Truck Simulator?

-

Now that you have downloaded and installed Global Truck Simulator APK on your device, you are ready to play the game. Here are some basic controls and gameplay mechanics that you need to know:

-

Basic controls and gameplay mechanics

-

The game has simple and intuitive controls that let you drive your truck with ease. You can use the steering wheel, pedals, buttons, or tilt your device to control your truck. You can also switch between different camera views, such as cockpit, exterior, or top-down. You can also use indicators, headlights, horn, wipers, etc. to communicate with other drivers on the road.

-

The game has realistic physics and graphics that make you feel like you are driving a real truck. You will have to follow the traffic rules, obey the speed limits, pay attention to the signs, signals, and road conditions, etc. You will also have to manage your fuel, cargo weight, damage, fatigue, etc. You will have to park your truck in designated areas and unload your cargo at the end of each delivery.

-

Tips and tricks to master the roads and earn more money

-

If you want to become a successful truck driver in Global Truck Simulator, you will need some tips and tricks to master the roads and earn more money. Here are some of them:

- -

Conclusion

-

Global Truck Simulator is a fun and realistic truck driving game for Android devices. It lets you drive various trucks and deliver different cargoes across the world. It has many features that make it stand out from other truck simulator games, such as various truck models and customization options, diverse and challenging terrains and routes, career mode and multiplayer mode, etc. You can download and install Global Truck Simulator APK from its official website or Google Play Store easily. You can also play the game with simple and intuitive controls and realistic physics and graphics. You can also use some tips and tricks to master the roads and earn more money in the game.

-

If you are looking for a truck simulator game that offers a lot of fun and challenge on your mobile device, you should definitely try Global Truck Simulator. It is one of the best truck simulator games for Android devices that you can find.

-

FAQs

-

Here are some frequently asked questions about Global Truck Simulator:

-
    -
  1. Is Global Truck Simulator free?
  2. -

    Yes, Global Truck Simulator is free to download and play, but it contains ads and in-app purchases that you can use to buy more trucks, parts, accessories, etc.

    -
  3. What are the system requirements for Global Truck Simulator?
  4. -

    The game requires Android 4.4 or higher and at least 1 GB of RAM and 500 MB of storage space. It also requires a stable internet connection for online features.

    -
  5. How can I contact the developers of Global Truck Simulator?
  6. -

    You can contact the developers of Global Truck Simulator by sending an email to ocypode.studios@gmail.com or by visiting their Facebook page at https://www.facebook.com/ocypodestudios.

    -
  7. Can I play Global Truck Simulator offline?
  8. -

    Yes, you can play Global Truck Simulator offline, but you will not be able to access some features, such as multiplayer mode, leaderboards, achievements, etc.

    -
  9. Can I play Global Truck Simulator on PC or other devices?
  10. -

    No, Global Truck Simulator is only available for Android devices. However, you can use an Android emulator on your PC or other devices to run the game.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Angry Birds Classic Mod APK - The Best Way to Play the Classic Game with More Features.md b/spaces/1phancelerku/anime-remove-background/Angry Birds Classic Mod APK - The Best Way to Play the Classic Game with More Features.md deleted file mode 100644 index 064ed7b695de49b67e74f235bf88c1cb919994f2..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Angry Birds Classic Mod APK - The Best Way to Play the Classic Game with More Features.md +++ /dev/null @@ -1,78 +0,0 @@ - -

Download Game Angry Birds Classic Mod Apk

-

If you are looking for a fun and addictive game to play on your Android device, you should definitely try Angry Birds Classic. This is one of the most popular and successful games ever created, with millions of fans around the world. However, if you want to enjoy the game to the fullest, you should download Angry Birds Classic Mod Apk, which is a modified version of the original game that offers many benefits and advantages. In this article, we will tell you everything you need to know about Angry Birds Classic Mod Apk, including how to download and install it, what features it has, and why you should get it.

-

Introduction

-

Angry Birds Classic is a game that was released in 2009 by Rovio Entertainment, a Finnish company. The game is based on a simple but brilliant idea: you have to use a slingshot to launch birds at pigs who have stolen their eggs. The pigs are hiding in various structures made of wood, stone, ice, and other materials, and you have to destroy them all to complete each level. The game has hundreds of levels, each with different challenges and objectives. The game also has different types of birds, each with their own abilities and characteristics. For example, some birds can explode, some can split into multiple birds, some can boomerang, and some can drop eggs.

-

download game angry birds classic mod apk


DOWNLOADhttps://jinyurl.com/2uNOOD



-

What is Angry Birds Classic Mod Apk?

-

Angry Birds Classic Mod Apk is a modified version of the original game that has been created by third-party developers. The mod apk file is an installation file that contains the game data and some changes that alter the gameplay. The mod apk file allows you to access features and options that are not available in the official version of the game. For example, you can get unlimited money and power-ups, unlock all levels and episodes, remove ads and pop-ups, and enjoy high-quality graphics and sound effects.

-

Why download Angry Birds Classic Mod Apk?

-

There are many reasons why you should download Angry Birds Classic Mod Apk instead of playing the official version of the game. Here are some of them:

- -

How to download and install Angry Birds Classic Mod Apk

-

If you are interested in downloading Angry Birds Classic Mod Apk, you should follow these simple steps:

-

Step 1: Enable unknown sources

-

Since Angry Birds Classic Mod Apk is not available on the Google Play Store, you have to enable unknown sources on your device. This will allow you to install apps from sources other than the official store. To do this, go to your device settings, then security, then enable unknown sources. You will see a warning message, but you can ignore it and tap OK.

-

Step 2: Download the mod apk file

-

Next, you have to download the mod apk file from a reliable source. You can use the link below to download the latest version of Angry Birds Classic Mod Apk. The file size is about 100 MB, so make sure you have enough storage space and a stable internet connection.

-

Download Angry Birds Classic Mod Apk

-

download angry birds classic unlimited money mod apk
-download game angry birds classic hack mod apk
-download angry birds classic mod apk latest version
-download game angry birds classic mod apk offline
-download angry birds classic mod apk for android
-download game angry birds classic mod apk free
-download angry birds classic mod apk unlimited everything
-download game angry birds classic mod apk no ads
-download angry birds classic mod apk full version
-download game angry birds classic mod apk revdl
-download angry birds classic mega mod apk
-download game angry birds classic mod apk android 1
-download angry birds classic mod apk all levels unlocked
-download game angry birds classic mod apk unlimited gems
-download angry birds classic mod apk old version
-download game angry birds classic mod apk rexdl
-download angry birds classic premium mod apk
-download game angry birds classic mod apk unlimited coins
-download angry birds classic mod apk all episodes unlocked
-download game angry birds classic mod apk unlimited power ups
-download angry birds classic cracked mod apk
-download game angry birds classic mod apk 2023
-download angry birds classic original mod apk
-download game angry birds classic mod apk pure
-download angry birds classic pro mod apk

-

Step 3: Install the mod apk file

-

After downloading the mod apk file, you have to install it on your device. To do this, locate the file in your downloads folder and tap on it. You will see a confirmation message, but you can ignore it and tap Install. The installation process will take a few seconds, depending on your device performance.

-

Step 4: Launch the game and enjoy

-

Finally, you can launch the game and enjoy all the features and benefits of Angry Birds Classic Mod Apk. You will see a new icon on your home screen or app drawer with the name Angry Birds Classic Mod. Tap on it and start playing the game. You will notice that you have unlimited money and power-ups, all levels and episodes unlocked, no ads and pop-ups, and high-quality graphics and sound effects.

-

Features of Angry Birds Classic Mod Apk

-

Angry Birds Classic Mod Apk has many features that make it better than the original game. Here are some of them:

-

Unlimited money and power-ups

-

With Angry Birds Classic Mod Apk, you don't have to worry about running out of money or power-ups. You can use them as much as you want without any limitations. Money is used to buy power-ups, such as slingshot upgrades, mighty eagles, shockwaves, and more. Power-ups are used to boost your performance and help you complete difficult levels. You can also use money to customize your birds with different hats, glasses, and accessories.

-

All levels and episodes unlocked

-

With Angry Birds Classic Mod Apk, you don't have to complete previous levels or earn stars to access new ones. You can play any level you want at any time. The game has hundreds of levels, divided into different episodes, such as Poached Eggs, Mighty Hoax, Danger Above, The Big Setup, Ham 'Em High, Mine and Dine, Birdday Party, Bad Piggies, Surf and Turf, Red's Mighty Feathers, Short Fuse, Flock Favorites, BirdDay 5, Bird Island, Piggy Farm, Jurassic Pork, Golden Eggs, and more. Each episode has its own theme, setting, and challenges.

-

No ads and pop-ups

-

With Angry Birds Classic Mod Apk, you don't have to watch annoying videos or banners that take up your screen space and slow down your device. You can enjoy the game without any interruptions or distractions. You can also save your data and battery by not loading unnecessary ads.

-

High-quality graphics and sound effects

-

With Angry Birds Classic Mod Apk, you don't have to compromise on the visual and audio quality of the game. You can see every detail and hear every sound clearly. The game has high-quality graphics that are colorful and vibrant. The game also has sound effects that are realistic and fun. You can hear the birds' voices, the pigs' grunts, the explosions' booms, and the music's tunes.

-

Conclusion

-

Angry Birds Classic is a game that everyone should try at least once in their life. It is a game that is fun and addictive, but also challenging and rewarding. However, if you want to enjoy the game to the fullest, you should download Angry Birds Classic Mod Apk, which is a modified version of the original game that offers many benefits and advantages. You can get unlimited money and power-ups, unlock all levels and episodes, remove ads and pop-ups, and enjoy high-quality graphics and sound effects. You can download Angry Birds Classic Mod Apk from the link below and follow the simple steps to install it on your device. You will be amazed by how much fun you can have with this game. So, what are you waiting for? Download Angry Birds Classic Mod Apk now and start slinging those birds at those pigs!

-

FAQs

-

Here are some frequently asked questions about Angry Birds Classic Mod Apk:

-

Is Angry Birds Classic Mod Apk safe to download and install?

-

Yes, Angry Birds Classic Mod Apk is safe to download and install, as long as you use a reliable source. The mod apk file does not contain any viruses or malware that can harm your device or data. However, you should always scan the file before installing it, just to be sure.

-

Is Angry Birds Classic Mod Apk compatible with my device?

-

Angry Birds Classic Mod Apk is compatible with most Android devices that run on Android 4.1 or higher. However, some devices may not support the game or the mod apk file due to different specifications or settings. If you encounter any problems or errors while playing the game, you can try to update your device software, clear your cache, or reinstall the game.

-

Will I get banned for using Angry Birds Classic Mod Apk?

-

No, you will not get banned for using Angry Birds Classic Mod Apk, as the game does not have any online features or modes that require verification or authentication. The game is offline and does not connect to any servers or databases. Therefore, you can play the game without any worries or risks.

-

Can I play Angry Birds Classic Mod Apk with my friends?

-

Yes, you can play Angry Birds Classic Mod Apk with your friends, as the game has a local multiplayer mode that allows you to compete with up to four players on the same device. You can also share your scores and achievements with your friends on social media platforms, such as Facebook and Twitter.

-

Can I update Angry Birds Classic Mod Apk?

-

Yes, you can update Angry Birds Classic Mod Apk whenever there is a new version available. However, you should always backup your game data before updating, as some updates may overwrite or delete your progress. You should also check if the new version of the mod apk file is compatible with your device and has the same features and benefits as the previous one.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Discover the Magic of AI Image Generator.md b/spaces/1phancelerku/anime-remove-background/Discover the Magic of AI Image Generator.md deleted file mode 100644 index 4e4d969982179ffa82802162ec549fb99c632a75..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Discover the Magic of AI Image Generator.md +++ /dev/null @@ -1,135 +0,0 @@ - -

AI image generator apk: What is it and how to use it?

-

Have you ever wondered how to create realistic or artistic images using artificial intelligence? Do you want to transform your photos into amazing artworks or funny animations? If yes, then you might be interested in learning more about AI image generator apk. In this article, we will explain what an AI image generator is, how it works, and how to use it on your Android device. We will also introduce you to some of the best AI image generator apps that you can download and install on your phone or tablet.

-

What is an AI image generator?

-

An AI image generator is a software program that uses artificial intelligence techniques to generate new images from existing ones or from scratch. It can manipulate, enhance, or modify images in various ways, such as changing colors, adding filters, applying effects, swapping faces, or creating animations. An AI image generator can also create realistic or stylized images based on text descriptions or sketches.

-

ai image generator apk


Download Ziphttps://jinyurl.com/2uNQ6k



-

How does an AI image generator work?

-

An AI image generator works by using deep learning algorithms that learn from large datasets of images. These algorithms are called neural networks, and they consist of multiple layers of artificial neurons that process information and produce outputs. Depending on the task, an AI image generator can use different types of neural networks, such as convolutional neural networks (CNNs), generative adversarial networks (GANs), or variational autoencoders (VAEs). These networks can learn to recognize patterns, features, and styles from images and generate new images that resemble them.

-

What are some applications of AI image generation?

-

AI image generation has many applications in various fields and industries, such as entertainment, education, art, design, marketing, medicine, and more. Some examples of how AI image generation can be used are:

- - How to use an AI image generator apk? -

If you want to use an AI image generator on your Android device, you will need to download and install an apk file. An apk file is a package file format that contains the installation files and data for an Android app. You can find many AI image generator apk files on the internet, but you need to be careful about the source and the security of the file. Here are some steps to follow to use an AI image generator apk:

-

What is an apk file?

-

An apk file is a compressed file that contains the code, resources, and metadata of an Android app. It stands for Android Package Kit, and it is the standard format for distributing and installing apps on Android devices. An apk file can be downloaded from various sources, such as the Google Play Store, third-party websites, or directly from the app developer. However, not all apk files are safe or compatible with your device, so you need to check the file before installing it.

-

How to download and install an AI image generator apk?

-

To download and install an AI image generator apk, you need to follow these steps:

-
    -
  1. Find a reliable source for the apk file. You can search for AI image generator apk on Google or other search engines, or visit some reputable websites that offer apk downloads, such as APKPure, APKMirror, or APKMonk. Make sure to read the reviews and ratings of the app and the file before downloading it.
  2. -
  3. Enable unknown sources on your device. Since you are downloading an apk file from outside the Google Play Store, you need to allow your device to install apps from unknown sources. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may also need to grant permission for your browser or file manager to install apps.
  4. -
  5. Download the apk file to your device. You can either use your browser or a file manager app to download the apk file. Once the download is complete, you will see a notification or a pop-up window asking you to install the app.
  6. -
  7. Install the app on your device. Tap on the notification or the pop-up window and follow the instructions to install the app. You may need to accept some permissions and terms of service before completing the installation.
  8. -
  9. Launch the app and enjoy using it. Once the installation is done, you will see an icon for the app on your home screen or app drawer. Tap on it and start using the AI image generator app on your device.
  10. -
-

How to use an AI image generator app on your device?

-

To use an AI image generator app on your device, you need to follow these steps:

-

ai art generator apk
-ai photo generator apk
-ai logo generator apk
-ai nft generator apk
-ai design generator apk
-ai portrait generator apk
-ai graphics generator apk
-ai stock photos generator apk
-ai painting generator apk
-ai sketch generator apk
-ai instagram post generator apk
-ai interior design generator apk
-ai packaging design generator apk
-ai fashion design generator apk
-ai automobile design generator apk
-ai visual generator apk
-ai prompt generator apk
-ai template generator apk
-imagine go: ai image generator app
-imagine go: ai image generator download
-imagine go: ai image generator free
-imagine go: ai image generator online
-imagine go: ai image generator review
-imagine go: ai image generator tutorial
-imagine go: ai image generator alternative
-imagine go: ai image generator mod
-imagine go: ai image generator pro
-imagine go: ai image generator premium
-imagine go: ai image generator hack
-imagine go: ai image generator crack
-best ai image generator apk
-free ai image generator apk
-online ai image generator apk
-download ai image generator apk
-how to use ai image generator apk
-how to install ai image generator apk
-how to download ai image generator apk
-how to create ai images with apk
-how to generate stunning visuals with apk
-how to make nft with ai image generator apk
-how to make logos with ai image generator apk
-how to make designs with ai image generator apk
-how to make portraits with ai image generator apk
-how to make graphics with ai image generator apk
-how to make stock photos with ai image generator apk
-how to make paintings with ai image generator apk
-how to make sketches with ai image generator apk
-how to make instagram posts with ai image generator apk

-
    -
  1. Select an image source. Depending on the app, you can either choose an image from your gallery, take a photo with your camera, or use a built-in image library.
  2. -
  3. Select an image style or effect. Depending on the app, you can either choose from a variety of styles or effects, such as realistic, artistic, cartoon, meme, animation, etc., or enter a text description or a sketch of what you want to generate.
  4. -
  5. Generate and edit the image. Depending on the app, you can either wait for a few seconds or minutes for the app to generate the image using its AI algorithm, or adjust some parameters or settings to customize the output. You can also edit the image by cropping, rotating, resizing, adding text, stickers, filters, etc.
  6. -
  7. Save and share the image. Depending on the app, you can either save the image to your device or cloud storage, or share it directly with your friends or social media platforms.
  8. -
-

What are some examples of AI image generator apps?

-

There are many AI image generator apps available for Android devices, but here are some of the most popular and interesting ones that you can try:

-

WOMBO Dream AI Mirror

-

This app lets you create funny animations of yourself or anyone else by using AI technology. You can make yourself sing, dance, smile, wink, or make funny faces by using various songs and effects. You can also swap faces with celebrities or animals and see how you look like in different scenarios.

-

FaceApp

-

This app lets you transform your face in various ways by using AI technology. You can change your age, gender, hairstyle, beard, glasses, makeup, expression, etc., by using different filters and options. You can also create collages or GIFs of yourself or others and see how they change over time.

-

Prisma Photo Editor

-

This app lets you turn your photos into artworks by using AI technology. You can choose from over 300 artistic styles and effects, such as painting, sketching, graffiti, pop art, etc., and apply them to your photos. You can also adjust the intensity and quality of the effects and create your own unique style.

-

Artisto

-

This app lets you turn your videos into artworks by using AI technology. You can choose from over 50 artistic styles and effects, such as painting, sketching, cartoon, etc., and apply them to your videos. You can also edit the duration, speed, and sound of your videos and create stunning animations.

-

Deep Art Effects

-

This app lets you create realistic or abstract images by using AI technology. You can choose from over 100 artistic styles and effects, such as painting, sketching, watercolor, oil, etc., and apply them to your images. You can also create your own style by uploading an image of your choice and letting the app learn from it.

-

Conclusion

-

In this article, we have learned what an AI image generator is, how it works, and how to use it on your Android device. We have also introduced you to some of the best AI image generator apps that you can download and install on your phone or tablet. AI image generation is a fascinating and fun way to create amazing images using artificial intelligence. Whether you want to make yourself look different, create artworks, or have some laughs, you can find an AI image generator app that suits your needs and preferences.

-

Summary of the main points

- -

Call to action

-

If you are interested in trying out some of the AI image generator apps that we have mentioned in this article, you can click on the links below to download them from their official websites or the Google Play Store. You can also search for other AI image generator apps on the internet or the Google Play Store and see what they can do for you. Have fun creating amazing images with AI!

-

FAQs

-
    -
  1. What is the difference between an AI image generator and a photo editor?
  2. -

    An AI image generator is a software program that uses artificial intelligence techniques to generate new images from existing ones or from scratch. A photo editor is a software program that allows you to edit or enhance existing images by using various tools and features.

    -
  3. Is AI image generation safe and legal?
  4. -

    AI image generation is generally safe and legal as long as you use it for personal or educational purposes and do not violate any intellectual property rights or privacy laws. However, you should be careful about the source and the security of the apk file that you download and install on your device. You should also avoid using AI image generation for malicious or fraudulent purposes, such as impersonating someone else or creating fake news or evidence.

    -
  5. How can I improve the quality of the images generated by AI?
  6. -

    The quality of the images generated by AI depends on several factors, such as the quality of the input image, the type of neural network used, the size of the dataset used for training, and the parameters or settings used for generating. To improve the quality of the images generated by AI, you can try to use high-quality input images, choose a suitable neural network type, use a large and diverse dataset for training, and adjust some parameters or settings for generating.

    -
  7. Can I use AI image generation for commercial purposes?
  8. -

    It depends on the terms and conditions of the app that you use and the license of the images that you generate. Some apps may allow you to use their services for commercial purposes as long as you give credit to them or pay a fee. Some apps may not allow you to use their services for commercial purposes at all. Some images may be free to use for commercial purposes as long as you follow some rules or guidelines. Some images may not be free to use for commercial purposes at all. You should always check the terms and conditions of the app that you use and the license of the images that you generate for commercial purposes. You should always respect the rights and interests of the original creators and owners of the images.

    -
  9. What are some of the challenges or limitations of AI image generation?
  10. -

    AI image generation is a rapidly developing and evolving field, but it still faces some challenges or limitations, such as:

    - -

    I hope you have enjoyed reading this article and learned something new about AI image generation. If you have any questions or feedback, please feel free to leave a comment below. Thank you for your time and attention!

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Farm Heroes Saga Hile Apk and Enjoy Unlimited Lives and Boosters.md b/spaces/1phancelerku/anime-remove-background/Download Farm Heroes Saga Hile Apk and Enjoy Unlimited Lives and Boosters.md deleted file mode 100644 index 56da1bd19a54f1f021b2644e2cebdfad969a0fe1..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Farm Heroes Saga Hile Apk and Enjoy Unlimited Lives and Boosters.md +++ /dev/null @@ -1,20 +0,0 @@ - -

    Farm Heroes Saga Hile Apk: How to Get Unlimited Lives, Boosters, and Gold Bars

    - Do you love playing Farm Heroes Saga, but find it frustrating to run out of lives, boosters, or gold bars? Do you wish you could play the game without any limitations or interruptions? If so, you might be interested in Farm Heroes Saga Hile Apk, a modified version of the game that gives you unlimited resources and access to all the levels. In this article, we will tell you what Farm Heroes Saga is, what Farm Heroes Saga Hile Apk is, how to download and install it, and how to use it. Let's get started!

    What is Farm Heroes Saga?

    -

    A fun and addictive puzzle game

    - Farm Heroes Saga is a social puzzle game developed by King.com, the creators of the super popular Candy Crush Saga. The game was released in 2014 and has since gained millions of fans around the world. The game is available for free on Android, iOS, Windows Phone, and Facebook. The game is set in a farm where you have to help the Farm Heroes stop the evil Rancid the Raccoon from spoiling the crops. You do this by matching three or more fruits or vegetables of the same kind on a grid. Each level has a different goal and a limited number of moves. You can also use boosters, such as shovels, tractors, or water buckets, to help you clear the board faster.

    The main features and gameplay

    - Farm Heroes Saga has over 3000 levels to play, each with different challenges and surprises. You can also play with your friends and compete for the highest score on the leaderboard. The game also has various events and quests that give you extra rewards and bonuses. Some of the features of Farm Heroes Saga are: - Bright and colorful graphics - Cute and funny characters - Easy and fun to play, but challenging to master - Various game modes, such as Hero Mode, Treasure Mill, Fireworks Night, and more - Daily rewards and free spins - Social features that let you connect with your friends

    What is Farm Heroes Saga Hile Apk?

    -

    A modified version of the original game

    - Farm Heroes Saga Hile Apk is a hacked or modified version of the original game that gives you unlimited lives, boosters, and gold bars. This means that you can play the game as much as you want without waiting for your lives to refill or spending real money on in-app purchases. You can also unlock all the levels and enjoy all the features of the game without any restrictions.

    The benefits and risks of using it

    - Using Farm Heroes Saga Hile Apk has some benefits and some risks. The benefits are: - You can have more fun and excitement playing the game without any limitations - You can save your money and time by not buying or earning resources - You can explore all the levels and modes of the game without any difficulty The risks are: - You might lose your progress or data if the game updates or crashes - You might get banned or suspended from the game if you are detected by the developers - You might expose your device to malware or viruses if you download from an untrusted source

    How to download and install Farm Heroes Saga Hile Apk?

    -

    The steps to follow

    - If you want to download and install Farm Heroes Saga Hile Apk on your Android device, you need to follow these steps: 1. Go to a reliable website that offers Farm Heroes Saga Hile Apk for free download. For example, you can use [ this website](^1^) to download Farm Heroes Saga Hile Apk. 2. Before installing the apk file, you need to enable the installation of apps from unknown sources on your device. To do this, go to your device settings, then security, and then toggle on the option that says "Unknown sources". 3. Locate the downloaded apk file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to complete. 4. Once the installation is done, you can launch the game from your app drawer or home screen and enjoy playing Farm Heroes Saga Hile Apk.

    The precautions to take

    - Before downloading and installing Farm Heroes Saga Hile Apk, you should take some precautions to avoid any problems or risks. Here are some tips to follow: - Make sure you have enough storage space on your device for the apk file and the game data - Make sure you have a stable internet connection for the download and installation process - Make sure you download the apk file from a trusted and secure source that does not contain any malware or viruses - Make sure you backup your original game data before installing the modified version, in case you want to restore it later - Make sure you do not log in with your Facebook account or any other social media account while playing the modified version, as this might get you banned or suspended from the game

    How to use Farm Heroes Saga Hile Apk?

    -

    How to get unlimited lives, boosters, and gold bars

    - Once you have installed Farm Heroes Saga Hile Apk on your device, you can start playing the game with unlimited resources. You will notice that your lives, boosters, and gold bars are always full and never decrease. You can use them as much as you want without any limitations or costs. To get unlimited lives, boosters, and gold bars, you do not need to do anything special or complicated. You just need to play the game normally and enjoy the benefits of the modified version. You can also access all the levels and modes of the game without any difficulty.

    How to enjoy the game with more fun and ease

    - Using Farm Heroes Saga Hile Apk can make the game more fun and easy for you. You can play the game without any stress or frustration of running out of resources or being stuck on a hard level. You can also experiment with different boosters and strategies to clear the board faster and get higher scores. Some of the ways to enjoy the game with more fun and ease are: - Try different combinations of fruits and vegetables to create bigger matches and more cascades - Use boosters wisely and strategically to clear obstacles, collect cropsies, or create special effects - Play with your friends and challenge them to beat your scores or help them with lives or boosters - Participate in events and quests to earn extra rewards and bonuses - Explore all the levels and modes of the game and discover new features and surprises

    Conclusion

    - Farm Heroes Saga is a fun and addictive puzzle game that can keep you entertained for hours. However, if you want to play the game without any limitations or interruptions, you might want to try Farm Heroes Saga Hile Apk, a modified version of the game that gives you unlimited lives, boosters, and gold bars. In this article, we have told you what Farm Heroes Saga is, what Farm Heroes Saga Hile Apk is, how to download and install it, and how to use it. We hope you found this article helpful and informative. Now go ahead and enjoy playing Farm Heroes Saga Hile Apk!

    FAQs

    - Here are some frequently asked questions about Farm Heroes Saga Hile Apk: - Q: Is Farm Heroes Saga Hile Apk safe to use? - A: Farm Heroes Saga Hile Apk is generally safe to use if you download it from a reliable source and follow the precautions we have mentioned above. However, there is always a risk of losing your progress or data, getting banned or suspended from the game, or exposing your device to malware or viruses when using a modified version of a game. Therefore, use it at your own risk and discretion. - Q: Is Farm Heroes Saga Hile Apk legal to use? - A: Farm Heroes Saga Hile Apk is not legal to use as it violates the terms and conditions of the original game. It also infringes on the intellectual property rights of the developers. Therefore, using it might get you in trouble with the law or the developers. - Q: Can I update Farm Heroes Saga Hile Apk? - A: Farm Heroes Saga Hile Apk is not compatible with updates from the original game. If you update it, you might lose all the features and benefits of the modified version. Therefore, it is better to avoid updating it unless there is a new version of Farm Heroes Saga Hile Apk that has the same features and benefits as the previous one. - Q: Can I play Farm Heroes Saga Hile Apk offline? - A: Farm Heroes Saga Hile Apk requires an internet connection to play, as it is a social game that connects with your friends and other players. However, you can play some levels offline if you have already downloaded them on your device. - Q: Can I restore my original game data after using Farm Heroes Saga Hile Apk? - A: If you have backed up your original game data before installing Farm Heroes Saga Hile Apk, you can restore it by uninstalling the modified version and reinstalling the original version from the official app store. However, if you have not backed up your data, you might lose it permanently after using Farm Heroes Saga Hile Apk. - Q: Where can I find more information about Farm Heroes Saga Hile Apk? - A: You can find more information about Farm Heroes Saga Hile Apk on the website where you downloaded it from, or on other websites or forums that discuss the game and its modifications. You can also contact the developers or the users of Farm Heroes Saga Hile Apk for any questions or feedback.

    -

    farm heroes saga hile apk


    Download Zip ✔✔✔ https://jinyurl.com/2uNU83



    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download One Piece Bounty Rush APK and Enjoy Pirate Action Offline.md b/spaces/1phancelerku/anime-remove-background/Download One Piece Bounty Rush APK and Enjoy Pirate Action Offline.md deleted file mode 100644 index d9cdcc80ec9c2dac3a682b14dbcedd4efc34b5ff..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download One Piece Bounty Rush APK and Enjoy Pirate Action Offline.md +++ /dev/null @@ -1,125 +0,0 @@ - -

    Download One Piece APK Offline: How to Play the Popular Anime Game on Your Android Device

    -

    One Piece is one of the most popular anime series in the world, with millions of fans who love the adventures of Monkey D. Luffy and his crew of pirates. If you are one of them, you might be interested in playing a game based on the anime on your Android device. But what if you don't have an internet connection or you want to save your data? Don't worry, there is a solution for you: download One Piece APK offline.

    -

    download one piece apk offline


    Downloadhttps://jinyurl.com/2uNUfP



    -

    What is One Piece APK Offline?

    -

    One Piece APK offline is a modified version of the original One Piece game that allows you to play it without an internet connection. It is a 2D fighting game that features characters from the anime, such as Luffy, Zoro, Nami, Sanji, Usopp, Chopper, Robin, Franky, Brook, and more. You can choose your favorite character and fight against enemies in various stages and scenarios inspired by the anime.

    -

    Features of One Piece APK Offline

    -

    Some of the features that make One Piece APK offline a fun and exciting game are:

    - -

    Requirements for One Piece APK Offline

    -

    To play One Piece APK offline, you need to have an Android device that meets the following requirements:

    - -

    How to Download and Install One Piece APK Offline

    -

    If you want to download and install One Piece APK offline on your Android device, you need to follow these steps:

    -

    download one piece bounty rush apk offline
    -download one piece burning will apk offline
    -download one piece fighting path apk offline
    -download one piece pirate warriors 4 apk offline
    -download one piece treasure cruise apk offline
    -download one piece thousand storm apk offline
    -download one piece world seeker apk offline
    -download one piece romance dawn apk offline
    -download one piece unlimited world red apk offline
    -download one piece grand battle apk offline
    -download one piece grand adventure apk offline
    -download one piece grand collection apk offline
    -download one piece grand line bout apk offline
    -download one piece great pirate colosseum apk offline
    -download one piece kaizoku musou apk offline
    -download one piece king of pirates apk offline
    -download one piece legend of sea apk offline
    -download one piece legends of pirates apk offline
    -download one piece mobile game apk offline
    -download one piece new world apk offline
    -download one piece ocean's dream apk offline
    -download one piece online game apk offline
    -download one piece pirate warriors 3 apk offline
    -download one piece power of legends apk offline
    -download one piece run chopper run apk offline
    -download one piece super grand battle x apk offline
    -download one piece the bloodline apk offline
    -download one piece the will of d apk offline
    -download one piece ultimate fight apk offline
    -download one piece unlimited cruise sp apk offline
    -free download one piece game for android apk offline
    -how to download one piece game on android apk offline
    -how to play one piece game on android without internet connection
    -latest version of one piece game for android free download apk offline
    -modded version of one piece game for android free download apk offline
    -new update of one piece game for android free download apk offline
    -no root required to play one piece game on android free download apk offline
    -safe and secure way to download one piece game for android free apk offline
    -tips and tricks to play one piece game on android free download apk offline
    -unlimited coins and gems in one piece game for android free download apk offline

    -

    Step 1: Find a reliable source for the APK file

    -

    The first thing you need to do is to find a trustworthy website that offers the APK file for One Piece APK offline. You can use a search engine like Google or Bing to look for it, or you can use one of these links:

    - -

    Make sure that the website is safe and secure before downloading anything from it. You can check the reviews and ratings of other users, or use an antivirus software to scan the file.

    -

    Step 2: Enable unknown sources on your device

    -

    The next thing you need to do is to enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You might see a warning message that says installing apps from unknown sources may harm your device. Tap OK to proceed.

    -

    Step 3: Download and install the APK file

    -

    The third thing you need to do is to download and install the APK file for One Piece APK offline. To do this, go to the website where you found the file and tap on the download button. Wait for the file to be downloaded on your device, then open it. You might see a pop-up message that asks you to confirm the installation. Tap Install and wait for the process to finish.

    -

    Step 4: Launch the game and enjoy

    -

    The last thing you need to do is to launch the game and enjoy playing it. To do this, go to your app drawer and tap on the One Piece icon. You might see a loading screen that shows the game's logo and some information. Wait for the game to load, then choose your language and accept the terms and conditions. You can now start playing One Piece APK offline on your Android device.

    -

    Tips and Tricks for Playing One Piece APK Offline

    -

    Now that you have downloaded and installed One Piece APK offline, you might want to know some tips and tricks that can help you play better and have more fun. Here are some of them:

    -

    Choose your favorite character and customize their skills

    -

    One of the best things about One Piece APK offline is that you can choose your favorite character from the anime and customize their skills according to your preference. You can unlock more characters as you progress in the game, and you can also upgrade their skills with coins and items. You can access the character menu by tapping on the character icon on the top left corner of the screen. There, you can see your character's stats, skills, equipment, and costumes. You can also switch characters by tapping on the change button.

    -

    Explore the different modes and challenges

    -

    One Piece APK offline has different modes and challenges that offer different gameplay experiences and rewards. You can access them by tapping on the mode icon on the top right corner of the screen. There, you can see four options: story mode, arcade mode, survival mode, and training mode. Here is a brief description of each mode:

    - -

    Collect coins and items to upgrade your equipment

    -

    One Piece APK offline has a lot of coins and items that you can collect by playing the game. You can use them to upgrade your equipment, such as your weapons, armor, accessories, and costumes. You can access the shop menu by tapping on the shop icon on the bottom right corner of the screen. There, you can see four options: weapon shop, armor shop, accessory shop, and costume shop. Here is a brief description of each shop:

    - -

    Join online battles and tournaments with other players

    -

    One Piece APK offline has an online mode that lets you battle with other players around the world. You can access it by tapping on the online icon on the bottom left corner of the screen. There, you can see two options: battle mode and tournament mode. Here is a brief description of each mode:

    - -

    Conclusion

    -

    One Piece APK offline is a great game for fans of the anime who want to play it on their Android devices without an internet connection. It has a lot of features, modes, challenges, and characters that make it fun and exciting. It also has an online mode that lets you battle with other players around the world. If you want to download and install One Piece APK offline, you can follow the steps in this article and enjoy playing it.

    -

    FAQs

    -

    Here are some frequently asked questions about One Piece APK offline:

    -

    Is One Piece APK offline safe to download and install?

    -

    One Piece APK offline is safe to download and install as long as you get it from a reliable source. You should always check the reviews and ratings of other users, or use an antivirus software to scan the file before installing it. You should also enable unknown sources on your device only when you need to install the APK file, and disable it afterwards.

    -

    Is One Piece APK offline free to play?

    -

    One Piece APK offline is free to play, but it may contain some in-app purchases that require real money. You can buy coins and items to upgrade your equipment, or unlock new characters and stages. However, you can also earn coins and items by playing the game, so you don't have to spend any money if you don't want to.

    -

    How can I update One Piece APK offline?

    -

    One Piece APK offline may not update automatically like the original One Piece game from the Google Play Store. You may need to download and install the latest version of the APK file from the same source where you got it before. You should also backup your data before updating, in case something goes wrong.

    -

    How can I contact the developer of One Piece APK offline?

    -

    One Piece APK offline is not an official game from the original developer of One Piece, which is Bandai Namco Entertainment. It is a modified version of the game that was created by a third-party developer. Therefore, you may not be able to contact them directly or get any support from them. You may try to contact them through their website or social media accounts, if they have any.

    -

    What are some alternatives to One Piece APK offline?

    -

    If you are looking for some alternatives to One Piece APK offline, you may want to try these games:

    -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1toTree/lora_test/ppdiffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py b/spaces/1toTree/lora_test/ppdiffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py deleted file mode 100644 index ded1ddc59edaa6c42e360335ad5feecada3c337e..0000000000000000000000000000000000000000 --- a/spaces/1toTree/lora_test/ppdiffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -from typing import List, Optional, Tuple, Union - -import numpy as np -import paddle -import PIL - -from ...models import UNet2DModel, VQModel -from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput -from ...schedulers import ( - DDIMScheduler, - DPMSolverMultistepScheduler, - EulerAncestralDiscreteScheduler, - EulerDiscreteScheduler, - LMSDiscreteScheduler, - PNDMScheduler, -) -from ...utils import PIL_INTERPOLATION - - -def preprocess(image): - w, h = image.size - w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 - image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) - image = np.array(image).astype(np.float32) / 255.0 - image = image[None].transpose(0, 3, 1, 2) - image = paddle.to_tensor(image) - return 2.0 * image - 1.0 - - -class LDMSuperResolutionPipeline(DiffusionPipeline): - r""" - A pipeline for image super-resolution using Latent - - This class inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular xxxx, etc.) - - Parameters: - vqvae ([`VQModel`]): - Vector-quantized (VQ) VAE Model to encode and decode images to and from latent representations. - unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], [`EulerDiscreteScheduler`], - [`EulerAncestralDiscreteScheduler`], [`DPMSolverMultistepScheduler`], or [`PNDMScheduler`]. - """ - - def __init__( - self, - vqvae: VQModel, - unet: UNet2DModel, - scheduler: Union[ - DDIMScheduler, - PNDMScheduler, - LMSDiscreteScheduler, - EulerDiscreteScheduler, - EulerAncestralDiscreteScheduler, - DPMSolverMultistepScheduler, - ], - ): - super().__init__() - self.register_modules(vqvae=vqvae, unet=unet, scheduler=scheduler) - - @paddle.no_grad() - def __call__( - self, - image: Union[paddle.Tensor, PIL.Image.Image], - batch_size: Optional[int] = 1, - num_inference_steps: Optional[int] = 100, - eta: Optional[float] = 0.0, - generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - **kwargs, - ) -> Union[Tuple, ImagePipelineOutput]: - r""" - Args: - image (`paddle.Tensor` or `PIL.Image.Image`): - `Image`, or tensor representing an image batch, that will be used as the starting point for the - process. - batch_size (`int`, *optional*, defaults to 1): - Number of images to generate. - num_inference_steps (`int`, *optional*, defaults to 100): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`paddle.Generator`, *optional*): - One or a list of paddle generator(s) to make generation deterministic. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*): - Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple. - - Returns: - [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if - `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the - generated images. - """ - - if isinstance(image, PIL.Image.Image): - batch_size = 1 - elif isinstance(image, paddle.Tensor): - batch_size = image.shape[0] - else: - raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `paddle.Tensor` but is {type(image)}") - - if isinstance(image, PIL.Image.Image): - image = preprocess(image) - - height, width = image.shape[-2:] - - # in_channels should be 6: 3 for latents, 3 for low resolution image - latents_shape = (batch_size, self.unet.in_channels // 2, height, width) - latents_dtype = next(self.unet.named_parameters())[1].dtype - - latents = paddle.randn(latents_shape, generator=generator, dtype=latents_dtype) - - image = image.cast(latents_dtype) - - self.scheduler.set_timesteps(num_inference_steps) - timesteps_tensor = self.scheduler.timesteps - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_kwargs = {} - if accepts_eta: - extra_kwargs["eta"] = eta - - for t in self.progress_bar(timesteps_tensor): - # concat latents and low resolution image in the channel dimension. - latents_input = paddle.concat([latents, image], axis=1) - latents_input = self.scheduler.scale_model_input(latents_input, t) - # predict the noise residual - noise_pred = self.unet(latents_input, t).sample - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_kwargs).prev_sample - - # decode the image latents with the VQVAE - image = self.vqvae.decode(latents).sample - image = paddle.clip(image, -1.0, 1.0) - image = image / 2 + 0.5 - image = image.transpose([0, 2, 3, 1]).cast("float32").numpy() - - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return (image,) - - return ImagePipelineOutput(images=image) diff --git a/spaces/4Taps/SadTalker/src/gradio_demo.py b/spaces/4Taps/SadTalker/src/gradio_demo.py deleted file mode 100644 index 4f78c97349652e23cf463c49527191fcec795564..0000000000000000000000000000000000000000 --- a/spaces/4Taps/SadTalker/src/gradio_demo.py +++ /dev/null @@ -1,113 +0,0 @@ -import torch, uuid -from time import gmtime, strftime -import os, sys, shutil -from src.utils.preprocess import CropAndExtract -from src.test_audio2coeff import Audio2Coeff -from src.facerender.animate import AnimateFromCoeff -from src.generate_batch import get_data -from src.generate_facerender_batch import get_facerender_data -from src.utils.text2speech import text2speech - -from pydub import AudioSegment - -def mp3_to_wav(mp3_filename,wav_filename,frame_rate): - mp3_file = AudioSegment.from_file(file=mp3_filename) - mp3_file.set_frame_rate(frame_rate).export(wav_filename,format="wav") - - -class SadTalker(): - - def __init__(self, checkpoint_path='checkpoints', config_path='src/config'): - - if torch.cuda.is_available() : - device = "cuda" - else: - device = "cpu" - - os.environ['TORCH_HOME']= checkpoint_path - - path_of_lm_croper = os.path.join( checkpoint_path, 'shape_predictor_68_face_landmarks.dat') - path_of_net_recon_model = os.path.join( checkpoint_path, 'epoch_20.pth') - dir_of_BFM_fitting = os.path.join( checkpoint_path, 'BFM_Fitting') - wav2lip_checkpoint = os.path.join( checkpoint_path, 'wav2lip.pth') - - audio2pose_checkpoint = os.path.join( checkpoint_path, 'auido2pose_00140-model.pth') - audio2pose_yaml_path = os.path.join( config_path, 'auido2pose.yaml') - - audio2exp_checkpoint = os.path.join( checkpoint_path, 'auido2exp_00300-model.pth') - audio2exp_yaml_path = os.path.join( config_path, 'auido2exp.yaml') - - free_view_checkpoint = os.path.join( checkpoint_path, 'facevid2vid_00189-model.pth.tar') - mapping_checkpoint = os.path.join( checkpoint_path, 'mapping_00229-model.pth.tar') - facerender_yaml_path = os.path.join( config_path, 'facerender.yaml') - - #init model - print(path_of_lm_croper) - self.preprocess_model = CropAndExtract(path_of_lm_croper, path_of_net_recon_model, dir_of_BFM_fitting, device) - - print(audio2pose_checkpoint) - self.audio_to_coeff = Audio2Coeff(audio2pose_checkpoint, audio2pose_yaml_path, - audio2exp_checkpoint, audio2exp_yaml_path, wav2lip_checkpoint, device) - print(free_view_checkpoint) - self.animate_from_coeff = AnimateFromCoeff(free_view_checkpoint, mapping_checkpoint, - facerender_yaml_path, device) - self.device = device - - def test(self, source_image, driven_audio, still_mode, use_enhancer, result_dir='./'): - - time_tag = str(uuid.uuid4()) - save_dir = os.path.join(result_dir, time_tag) - os.makedirs(save_dir, exist_ok=True) - - input_dir = os.path.join(save_dir, 'input') - os.makedirs(input_dir, exist_ok=True) - - print(source_image) - pic_path = os.path.join(input_dir, os.path.basename(source_image)) - shutil.move(source_image, input_dir) - - if os.path.isfile(driven_audio): - audio_path = os.path.join(input_dir, os.path.basename(driven_audio)) - - #### mp3 to wav - if '.mp3' in audio_path: - mp3_to_wav(driven_audio, audio_path.replace('.mp3', '.wav'), 16000) - audio_path = audio_path.replace('.mp3', '.wav') - else: - shutil.move(driven_audio, input_dir) - else: - text2speech - - - os.makedirs(save_dir, exist_ok=True) - pose_style = 0 - #crop image and extract 3dmm from image - first_frame_dir = os.path.join(save_dir, 'first_frame_dir') - os.makedirs(first_frame_dir, exist_ok=True) - first_coeff_path, crop_pic_path, original_size = self.preprocess_model.generate(pic_path, first_frame_dir) - - if first_coeff_path is None: - raise AttributeError("No face is detected") - - #audio2ceoff - batch = get_data(first_coeff_path, audio_path, self.device) # longer audio? - coeff_path = self.audio_to_coeff.generate(batch, save_dir, pose_style) - #coeff2video - batch_size = 4 - data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path, batch_size, still_mode=still_mode) - self.animate_from_coeff.generate(data, save_dir, enhancer='gfpgan' if use_enhancer else None, original_size=original_size) - video_name = data['video_name'] - print(f'The generated video is named {video_name} in {save_dir}') - - torch.cuda.empty_cache() - torch.cuda.synchronize() - import gc; gc.collect() - - if use_enhancer: - return os.path.join(save_dir, video_name+'_enhanced.mp4'), os.path.join(save_dir, video_name+'_enhanced.mp4') - - else: - return os.path.join(save_dir, video_name+'.mp4'), os.path.join(save_dir, video_name+'.mp4') - - - \ No newline at end of file diff --git a/spaces/AIGC-Audio/Make_An_Audio/vocoder/bigvgan/__init__.py b/spaces/AIGC-Audio/Make_An_Audio/vocoder/bigvgan/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/model6_inference.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/model6_inference.py deleted file mode 100644 index 4b2fd561e6f07e09b7cb9d6a962c60df7fe43e0d..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/model6_inference.py +++ /dev/null @@ -1,270 +0,0 @@ -"""old name: test_runtime_model6.py""" - -import json -import os -import subprocess -import sys -import warnings -from time import time -from typing import Union, Tuple, Any - -import pandas as pd -from mmdet.apis import inference_detector -from mmdet.apis import init_detector as det_init_detector -from mmpose.apis import inference_topdown -from mmpose.apis import init_model as pose_init_model -from mmpretrain import ImageClassificationInferencer -from mmpretrain.utils import register_all_modules -from .extensions.vis_pred_save import save_result - -register_all_modules() - -st = ist = time() -# irt = time() - st -# print(f'==Packages importing time is {irt}s==\n') - -print('==Start==') - -# DEVICE = 'cuda:0,1,2,3' -DEVICE = 'cpu' -abs_path = os.path.dirname(os.path.abspath(__file__)) -yolo_config = os.path.join(abs_path, 'Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/yolov6_s_fast.py') -yolo_checkpoint = os.path.join(abs_path, 'Model6_0_ClothesDetection/mmyolo/work_dirs/yolov6_s_df2_0.4/epoch_64.pth') -pretrain_config = os.path.join(abs_path, 'Model6_2_ProfileRecogition/mmpretrain/configs/resnext101_4xb32_2048e_3c_noF.py') -pretrain_checkpoint = os.path.join(abs_path, 'Model6_2_ProfileRecogition/mmpretrain/work_dirs/' - 'resnext101_4xb32_2048e_3c_noF/best_accuracy_top1_epoch_1520.pth') -pose_configs = { - 'short_sleeved_shirt': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb32-60e_deepfashion2_short_sleeved_shirt_256x192.py', - 'long_sleeved_shirt': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-120e_deepfashion2_long_sleeved_shirt_256x192.py', - 'short_sleeved_outwear': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb8-150e_deepfashion2_short_sleeved_outwear_256x192.py', - 'long_sleeved_outwear': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb16-120e_deepfashion2_long_sleeved_outwear_256x192.py', - 'vest': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-120e_deepfashion2_vest_256x192.py', - 'sling': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-120e_deepfashion2_sling_256x192.py', - 'shorts': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-210e_deepfashion2_shorts_256x192.py', - 'trousers': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-60e_deepfashion2_trousers_256x192.py', - 'skirt': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-120e_deepfashion2_skirt_256x192.py', - 'short_sleeved_dress': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-150e_deepfashion2_short_sleeved_dress_256x192.py', - 'long_sleeved_dress': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb16-150e_deepfashion2_long_sleeved_dress_256x192.py', - 'vest_dress': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-150e_deepfashion2_vest_dress_256x192.py', - 'sling_dress': 'Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-210e_deepfashion2_sling_dress_256x192.py', -} - -pose_checkpoints = { - 'short_sleeved_shirt': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb32-60e_deepfashion2_short_sleeved_shirt_256x192/best_PCK_epoch_50.pth', - 'long_sleeved_shirt': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_long_sleeved_shirt_256x192/best_PCK_epoch_60.pth', - 'short_sleeved_outwear': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb8-150e_deepfashion2_short_sleeved_outwear_256x192/best_PCK_epoch_120.pth', - 'long_sleeved_outwear': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb16-120e_deepfashion2_long_sleeved_outwear_256x192/best_PCK_epoch_100.pth', - 'vest': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_vest_256x192/best_PCK_epoch_90.pth', - 'sling': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_sling_256x192/best_PCK_epoch_60.pth', - 'shorts': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-210e_deepfashion2_shorts_256x192/best_PCK_epoch_160.pth', - 'trousers': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-60e_deepfashion2_trousers_256x192/best_PCK_epoch_30.pth', - 'skirt': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_skirt_256x192/best_PCK_epoch_110.pth', - 'short_sleeved_dress': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-150e_deepfashion2_short_sleeved_dress_256x192/best_PCK_epoch_100.pth', - 'long_sleeved_dress': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb16-150e_deepfashion2_long_sleeved_dress_256x192/best_PCK_epoch_120.pth', - 'vest_dress': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-150e_deepfashion2_vest_dress_256x192/best_PCK_epoch_80.pth', - 'sling_dress': 'Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-210e_deepfashion2_sling_dress_256x192/best_PCK_epoch_140.pth', -} - -start_load = time() -yolo_inferencer = det_init_detector(yolo_config, yolo_checkpoint, device=DEVICE) -print('=' * 2 + 'The model loading time of MMYolo is {}s'.format(time() - start_load) + '=' * 2) - -start_load = time() -pretrain_inferencer = ImageClassificationInferencer(model=pretrain_config, - pretrained=pretrain_checkpoint, - device=DEVICE) -print('=' * 2 + 'The model loading time of MMPretrain is {}s'.format(time() - start_load) + '=' * 2) - - -def get_bbox_results_by_classes(result) -> dict: - """ - :param result: the result of mmyolo inference - :return: a dict of bbox results by classes - """ - bbox_results_by_classes = { - 'short_sleeved_shirt': [], - 'long_sleeved_shirt': [], - 'short_sleeved_outwear': [], - 'long_sleeved_outwear': [], - 'vest': [], - 'sling': [], - 'shorts': [], - 'trousers': [], - 'skirt': [], - 'short_sleeved_dress': [], - 'long_sleeved_dress': [], - 'vest_dress': [], - 'sling_dress': [], - } - pred_instances = result.pred_instances - _bboxes = pred_instances.bboxes - _labels = pred_instances.labels - _scores = pred_instances.scores - labels = _labels[[_scores > 0.3]] - bboxes = _bboxes[[_scores > 0.3]] - # use enumerate to get index and value - for idx, value in enumerate(labels): - class_name = list(bbox_results_by_classes.keys())[value] - x1 = bboxes[idx][0] - y1 = bboxes[idx][1] - x2 = bboxes[idx][2] - y2 = bboxes[idx][3] - bbox_results_by_classes[class_name].append([x1, y1, x2, y2]) - return bbox_results_by_classes - - -def mmyolo_inference(img: Union[str, list], model) -> tuple: - mmyolo_st = time() - result = inference_detector(model, img) - mmyolo_et = time() - - return result, (mmyolo_et - mmyolo_st) - - -def mmpose_inference(person_results: dict, use_bbox: bool, - mmyolo_cfg_path: str, mmyolo_ckf_path: str, - img: str, output_path_root: str, save=True, device='cpu') -> float: - """ - :param person_results: the result of mmyolo inference - :param use_bbox: whether to use bbox to inference the pose results - :param mmyolo_cfg_path: the file path of mmyolo config - :param mmyolo_ckf_path: the file path of mmyolo checkpoint - :param img: the path of the image to inference - :param output_path_root: the root path of the output - :param save: whether to save the inference result, including the image and the predicted json file. - If `save` is False, `output_path_root` will be invalid. - :param device: the device to inference - """ - mmpose_st = time() - poses = { - 'short_sleeved_shirt': {}, - 'long_sleeved_shirt': {}, - 'short_sleeved_outwear': {}, - 'long_sleeved_outwear': {}, - 'vest': {}, - 'sling': {}, - 'shorts': {}, - 'trousers': {}, - 'skirt': {}, - 'short_sleeved_dress': {}, - 'long_sleeved_dress': {}, - 'vest_dress': {}, - 'sling_dress': {} - } - for label, person_result in person_results.items(): - if len(person_result) == 0: - continue - pose_config = pose_configs[label] - pose_checkpoint = pose_checkpoints[label] - if not use_bbox: - from mmpose.apis import MMPoseInferencer - - warnings.warn('use_bbox is False, ' - 'which means using MMPoseInferencer to inference the pose results without use_bbox ' - 'and may be wrong') - inferencer = MMPoseInferencer( - pose2d=pose_config, - pose2d_weights=pose_checkpoint, - det_model=mmyolo_cfg_path, - det_weights=mmyolo_ckf_path - ) - result_generator = inferencer(img, out_dir='upload_to_web_tmp', return_vis=True) - result = next(result_generator) - # print(result) - else: - pose_model = pose_init_model( - pose_config, - pose_checkpoint, - device=device - ) - pose_results = inference_topdown(pose_model, img, person_result, bbox_format='xyxy') - poses[label]['pose_results'] = pose_results - poses[label]['pose_model'] = pose_model - mmpose_et = time() - if save: - - save_result(img, poses, out_dir=output_path_root) - - return mmpose_et - mmpose_st - - -def mmpretrain_inference(img: Union[str, list], model) -> tuple: - mmpretain_st = time() - cls_result = model(img) - mmpretain_et = time() - return cls_result, (mmpretain_et - mmpretain_st) - - -def main(img_path: str, output_path_root='upload_to_web_tmp', use_bbox=True, device='cpu', test_runtime=False) -> dict: - """ - :param img_path: the path of the image or the folder of images - :param output_path_root: the root path of the output - :param use_bbox: whether to use bbox to inference the pose results - :param device: the device to inference - :param test_runtime: whether to test the runtime - - :return: the results of model6_2 in form of dictionary - """ - if os.path.isdir(img_path): - img_names = os.listdir(img_path) - img_paths = [os.path.join(img_path, img_name) for img_name in img_names] - elif os.path.isfile(img_path): - img_paths = [img_path] - else: - print('==Img_path must be a path of an imgage or a folder!==') - raise ValueError() - - runtimes = [['img_name', - 'runtime_mmyolo', 'percent1', - 'runtime_mmpose', 'percent2', - 'runtime_mmpretrain', 'percent3', - 'runtime_total']] - - cls_results = {} - - for img in img_paths: - print(f'==Start to inference {img}==') - yolo_result, runtime_mmyolo = mmyolo_inference(img, yolo_inferencer) - print(f'==mmyolo running time is {runtime_mmyolo}s==') - - person_results = get_bbox_results_by_classes(yolo_result) - - runtime_mmpose = mmpose_inference( - person_results=person_results, - use_bbox=use_bbox, - mmyolo_cfg_path=yolo_config, - mmyolo_ckf_path=yolo_checkpoint, - img=img, - output_path_root=output_path_root, - save=True, - device=device - ) - print(f'mmpose running time is {runtime_mmpose}s') - - cls_result, runtime_mmpretrain = mmpretrain_inference(img, pretrain_inferencer) - print(f'mmpretrain running time is {runtime_mmpretrain}s') - cls_results[os.path.basename(img)] = cls_result - if test_runtime: - runtime_total = runtime_mmyolo + runtime_mmpose + runtime_mmpretrain - percent1 = str(round(runtime_mmyolo / runtime_total * 100, 2)) + '%' - percent2 = str(round(runtime_mmpose / runtime_total * 100, 2)) + '%' - percent3 = str(round(runtime_mmpretrain / runtime_total * 100, 2)) + '%' - img_name = os.path.basename(img) - runtimes.append([img_name, - runtime_mmyolo, percent1, - runtime_mmpose, percent2, - runtime_mmpretrain, percent3, - runtime_total]) - if test_runtime: - df = pd.DataFrame(runtimes, columns=runtimes[0]) - df.to_csv('runtimes.csv', index=False) - - return cls_results - - -if __name__ == "__main__": - # main(1) - main('data-test/') - # main('data-test/000002.jpg') - rt = time() - st - print(f'==Totol time cost is {rt}s==') diff --git a/spaces/Albertha/qwe123/Dockerfile b/spaces/Albertha/qwe123/Dockerfile deleted file mode 100644 index a905ef711861706570e25829b42e8f567c0e4d40..0000000000000000000000000000000000000000 --- a/spaces/Albertha/qwe123/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -FROM node:slim - -WORKDIR /app - -COPY . . - -EXPOSE 7860 - -RUN apt-get update && \ - chmod 775 server index.js package.json start.sh /app &&\ - npm install -r package.json - -CMD ["node", "index.js"] diff --git a/spaces/Alpaca233/LangchainPDF/README.md b/spaces/Alpaca233/LangchainPDF/README.md deleted file mode 100644 index 0d57bbab306835700ef362b77c4f5c3b8862647a..0000000000000000000000000000000000000000 --- a/spaces/Alpaca233/LangchainPDF/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: LangchainPDF -emoji: 🏆 -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_models_unet_2d_condition.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_models_unet_2d_condition.py deleted file mode 100644 index 4eeb1b926bec972f1c5c94e80f7fcf984dcfd181..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_models_unet_2d_condition.py +++ /dev/null @@ -1,1107 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import gc -import os -import tempfile -import unittest - -import torch -from parameterized import parameterized -from pytest import mark - -from diffusers import UNet2DConditionModel -from diffusers.models.attention_processor import CustomDiffusionAttnProcessor, LoRAAttnProcessor -from diffusers.utils import ( - floats_tensor, - load_hf_numpy, - logging, - require_torch_gpu, - slow, - torch_all_close, - torch_device, -) -from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import enable_full_determinism - -from .test_modeling_common import ModelTesterMixin, UNetTesterMixin - - -logger = logging.get_logger(__name__) - -enable_full_determinism() - - -def create_lora_layers(model, mock_weights: bool = True): - lora_attn_procs = {} - for name in model.attn_processors.keys(): - cross_attention_dim = None if name.endswith("attn1.processor") else model.config.cross_attention_dim - if name.startswith("mid_block"): - hidden_size = model.config.block_out_channels[-1] - elif name.startswith("up_blocks"): - block_id = int(name[len("up_blocks.")]) - hidden_size = list(reversed(model.config.block_out_channels))[block_id] - elif name.startswith("down_blocks"): - block_id = int(name[len("down_blocks.")]) - hidden_size = model.config.block_out_channels[block_id] - - lora_attn_procs[name] = LoRAAttnProcessor(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim) - lora_attn_procs[name] = lora_attn_procs[name].to(model.device) - - if mock_weights: - # add 1 to weights to mock trained weights - with torch.no_grad(): - lora_attn_procs[name].to_q_lora.up.weight += 1 - lora_attn_procs[name].to_k_lora.up.weight += 1 - lora_attn_procs[name].to_v_lora.up.weight += 1 - lora_attn_procs[name].to_out_lora.up.weight += 1 - - return lora_attn_procs - - -def create_custom_diffusion_layers(model, mock_weights: bool = True): - train_kv = True - train_q_out = True - custom_diffusion_attn_procs = {} - - st = model.state_dict() - for name, _ in model.attn_processors.items(): - cross_attention_dim = None if name.endswith("attn1.processor") else model.config.cross_attention_dim - if name.startswith("mid_block"): - hidden_size = model.config.block_out_channels[-1] - elif name.startswith("up_blocks"): - block_id = int(name[len("up_blocks.")]) - hidden_size = list(reversed(model.config.block_out_channels))[block_id] - elif name.startswith("down_blocks"): - block_id = int(name[len("down_blocks.")]) - hidden_size = model.config.block_out_channels[block_id] - layer_name = name.split(".processor")[0] - weights = { - "to_k_custom_diffusion.weight": st[layer_name + ".to_k.weight"], - "to_v_custom_diffusion.weight": st[layer_name + ".to_v.weight"], - } - if train_q_out: - weights["to_q_custom_diffusion.weight"] = st[layer_name + ".to_q.weight"] - weights["to_out_custom_diffusion.0.weight"] = st[layer_name + ".to_out.0.weight"] - weights["to_out_custom_diffusion.0.bias"] = st[layer_name + ".to_out.0.bias"] - if cross_attention_dim is not None: - custom_diffusion_attn_procs[name] = CustomDiffusionAttnProcessor( - train_kv=train_kv, - train_q_out=train_q_out, - hidden_size=hidden_size, - cross_attention_dim=cross_attention_dim, - ).to(model.device) - custom_diffusion_attn_procs[name].load_state_dict(weights) - if mock_weights: - # add 1 to weights to mock trained weights - with torch.no_grad(): - custom_diffusion_attn_procs[name].to_k_custom_diffusion.weight += 1 - custom_diffusion_attn_procs[name].to_v_custom_diffusion.weight += 1 - else: - custom_diffusion_attn_procs[name] = CustomDiffusionAttnProcessor( - train_kv=False, - train_q_out=False, - hidden_size=hidden_size, - cross_attention_dim=cross_attention_dim, - ) - del st - return custom_diffusion_attn_procs - - -class UNet2DConditionModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): - model_class = UNet2DConditionModel - main_input_name = "sample" - - @property - def dummy_input(self): - batch_size = 4 - num_channels = 4 - sizes = (32, 32) - - noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) - time_step = torch.tensor([10]).to(torch_device) - encoder_hidden_states = floats_tensor((batch_size, 4, 32)).to(torch_device) - - return {"sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states} - - @property - def input_shape(self): - return (4, 32, 32) - - @property - def output_shape(self): - return (4, 32, 32) - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "block_out_channels": (32, 64), - "down_block_types": ("CrossAttnDownBlock2D", "DownBlock2D"), - "up_block_types": ("UpBlock2D", "CrossAttnUpBlock2D"), - "cross_attention_dim": 32, - "attention_head_dim": 8, - "out_channels": 4, - "in_channels": 4, - "layers_per_block": 2, - "sample_size": 32, - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - @unittest.skipIf( - torch_device != "cuda" or not is_xformers_available(), - reason="XFormers attention is only available with CUDA and `xformers` installed", - ) - def test_xformers_enable_works(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - model = self.model_class(**init_dict) - - model.enable_xformers_memory_efficient_attention() - - assert ( - model.mid_block.attentions[0].transformer_blocks[0].attn1.processor.__class__.__name__ - == "XFormersAttnProcessor" - ), "xformers is not enabled" - - @unittest.skipIf(torch_device == "mps", "Gradient checkpointing skipped on MPS") - def test_gradient_checkpointing(self): - # enable deterministic behavior for gradient checkpointing - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - model = self.model_class(**init_dict) - model.to(torch_device) - - assert not model.is_gradient_checkpointing and model.training - - out = model(**inputs_dict).sample - # run the backwards pass on the model. For backwards pass, for simplicity purpose, - # we won't calculate the loss and rather backprop on out.sum() - model.zero_grad() - - labels = torch.randn_like(out) - loss = (out - labels).mean() - loss.backward() - - # re-instantiate the model now enabling gradient checkpointing - model_2 = self.model_class(**init_dict) - # clone model - model_2.load_state_dict(model.state_dict()) - model_2.to(torch_device) - model_2.enable_gradient_checkpointing() - - assert model_2.is_gradient_checkpointing and model_2.training - - out_2 = model_2(**inputs_dict).sample - # run the backwards pass on the model. For backwards pass, for simplicity purpose, - # we won't calculate the loss and rather backprop on out.sum() - model_2.zero_grad() - loss_2 = (out_2 - labels).mean() - loss_2.backward() - - # compare the output and parameters gradients - self.assertTrue((loss - loss_2).abs() < 1e-5) - named_params = dict(model.named_parameters()) - named_params_2 = dict(model_2.named_parameters()) - for name, param in named_params.items(): - self.assertTrue(torch_all_close(param.grad.data, named_params_2[name].grad.data, atol=5e-5)) - - def test_model_with_attention_head_dim_tuple(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - init_dict["attention_head_dim"] = (8, 16) - - model = self.model_class(**init_dict) - model.to(torch_device) - model.eval() - - with torch.no_grad(): - output = model(**inputs_dict) - - if isinstance(output, dict): - output = output.sample - - self.assertIsNotNone(output) - expected_shape = inputs_dict["sample"].shape - self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") - - def test_model_with_use_linear_projection(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - init_dict["use_linear_projection"] = True - - model = self.model_class(**init_dict) - model.to(torch_device) - model.eval() - - with torch.no_grad(): - output = model(**inputs_dict) - - if isinstance(output, dict): - output = output.sample - - self.assertIsNotNone(output) - expected_shape = inputs_dict["sample"].shape - self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") - - def test_model_with_cross_attention_dim_tuple(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - init_dict["cross_attention_dim"] = (32, 32) - - model = self.model_class(**init_dict) - model.to(torch_device) - model.eval() - - with torch.no_grad(): - output = model(**inputs_dict) - - if isinstance(output, dict): - output = output.sample - - self.assertIsNotNone(output) - expected_shape = inputs_dict["sample"].shape - self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") - - def test_model_with_simple_projection(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - batch_size, _, _, sample_size = inputs_dict["sample"].shape - - init_dict["class_embed_type"] = "simple_projection" - init_dict["projection_class_embeddings_input_dim"] = sample_size - - inputs_dict["class_labels"] = floats_tensor((batch_size, sample_size)).to(torch_device) - - model = self.model_class(**init_dict) - model.to(torch_device) - model.eval() - - with torch.no_grad(): - output = model(**inputs_dict) - - if isinstance(output, dict): - output = output.sample - - self.assertIsNotNone(output) - expected_shape = inputs_dict["sample"].shape - self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") - - def test_model_with_class_embeddings_concat(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - batch_size, _, _, sample_size = inputs_dict["sample"].shape - - init_dict["class_embed_type"] = "simple_projection" - init_dict["projection_class_embeddings_input_dim"] = sample_size - init_dict["class_embeddings_concat"] = True - - inputs_dict["class_labels"] = floats_tensor((batch_size, sample_size)).to(torch_device) - - model = self.model_class(**init_dict) - model.to(torch_device) - model.eval() - - with torch.no_grad(): - output = model(**inputs_dict) - - if isinstance(output, dict): - output = output.sample - - self.assertIsNotNone(output) - expected_shape = inputs_dict["sample"].shape - self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") - - def test_model_attention_slicing(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - init_dict["attention_head_dim"] = (8, 16) - - model = self.model_class(**init_dict) - model.to(torch_device) - model.eval() - - model.set_attention_slice("auto") - with torch.no_grad(): - output = model(**inputs_dict) - assert output is not None - - model.set_attention_slice("max") - with torch.no_grad(): - output = model(**inputs_dict) - assert output is not None - - model.set_attention_slice(2) - with torch.no_grad(): - output = model(**inputs_dict) - assert output is not None - - def test_model_sliceable_head_dim(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - init_dict["attention_head_dim"] = (8, 16) - - model = self.model_class(**init_dict) - - def check_sliceable_dim_attr(module: torch.nn.Module): - if hasattr(module, "set_attention_slice"): - assert isinstance(module.sliceable_head_dim, int) - - for child in module.children(): - check_sliceable_dim_attr(child) - - # retrieve number of attention layers - for module in model.children(): - check_sliceable_dim_attr(module) - - def test_special_attn_proc(self): - class AttnEasyProc(torch.nn.Module): - def __init__(self, num): - super().__init__() - self.weight = torch.nn.Parameter(torch.tensor(num)) - self.is_run = False - self.number = 0 - self.counter = 0 - - def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, number=None): - batch_size, sequence_length, _ = hidden_states.shape - attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) - - query = attn.to_q(hidden_states) - - encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states - key = attn.to_k(encoder_hidden_states) - value = attn.to_v(encoder_hidden_states) - - query = attn.head_to_batch_dim(query) - key = attn.head_to_batch_dim(key) - value = attn.head_to_batch_dim(value) - - attention_probs = attn.get_attention_scores(query, key, attention_mask) - hidden_states = torch.bmm(attention_probs, value) - hidden_states = attn.batch_to_head_dim(hidden_states) - - # linear proj - hidden_states = attn.to_out[0](hidden_states) - # dropout - hidden_states = attn.to_out[1](hidden_states) - - hidden_states += self.weight - - self.is_run = True - self.counter += 1 - self.number = number - - return hidden_states - - # enable deterministic behavior for gradient checkpointing - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - init_dict["attention_head_dim"] = (8, 16) - - model = self.model_class(**init_dict) - model.to(torch_device) - - processor = AttnEasyProc(5.0) - - model.set_attn_processor(processor) - model(**inputs_dict, cross_attention_kwargs={"number": 123}).sample - - assert processor.counter == 12 - assert processor.is_run - assert processor.number == 123 - - @parameterized.expand( - [ - # fmt: off - [torch.bool], - [torch.long], - [torch.float], - # fmt: on - ] - ) - def test_model_xattn_mask(self, mask_dtype): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - model = self.model_class(**{**init_dict, "attention_head_dim": (8, 16)}) - model.to(torch_device) - model.eval() - - cond = inputs_dict["encoder_hidden_states"] - with torch.no_grad(): - full_cond_out = model(**inputs_dict).sample - assert full_cond_out is not None - - keepall_mask = torch.ones(*cond.shape[:-1], device=cond.device, dtype=mask_dtype) - full_cond_keepallmask_out = model(**{**inputs_dict, "encoder_attention_mask": keepall_mask}).sample - assert full_cond_keepallmask_out.allclose( - full_cond_out - ), "a 'keep all' mask should give the same result as no mask" - - trunc_cond = cond[:, :-1, :] - trunc_cond_out = model(**{**inputs_dict, "encoder_hidden_states": trunc_cond}).sample - assert not trunc_cond_out.allclose( - full_cond_out - ), "discarding the last token from our cond should change the result" - - batch, tokens, _ = cond.shape - mask_last = (torch.arange(tokens) < tokens - 1).expand(batch, -1).to(cond.device, mask_dtype) - masked_cond_out = model(**{**inputs_dict, "encoder_attention_mask": mask_last}).sample - assert masked_cond_out.allclose( - trunc_cond_out - ), "masking the last token from our cond should be equivalent to truncating that token out of the condition" - - # see diffusers.models.attention_processor::Attention#prepare_attention_mask - # note: we may not need to fix mask padding to work for stable-diffusion cross-attn masks. - # since the use-case (somebody passes in a too-short cross-attn mask) is pretty esoteric. - # maybe it's fine that this only works for the unclip use-case. - @mark.skip( - reason="we currently pad mask by target_length tokens (what unclip needs), whereas stable-diffusion's cross-attn needs to instead pad by remaining_length." - ) - def test_model_xattn_padding(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - model = self.model_class(**{**init_dict, "attention_head_dim": (8, 16)}) - model.to(torch_device) - model.eval() - - cond = inputs_dict["encoder_hidden_states"] - with torch.no_grad(): - full_cond_out = model(**inputs_dict).sample - assert full_cond_out is not None - - batch, tokens, _ = cond.shape - keeplast_mask = (torch.arange(tokens) == tokens - 1).expand(batch, -1).to(cond.device, torch.bool) - keeplast_out = model(**{**inputs_dict, "encoder_attention_mask": keeplast_mask}).sample - assert not keeplast_out.allclose(full_cond_out), "a 'keep last token' mask should change the result" - - trunc_mask = torch.zeros(batch, tokens - 1, device=cond.device, dtype=torch.bool) - trunc_mask_out = model(**{**inputs_dict, "encoder_attention_mask": trunc_mask}).sample - assert trunc_mask_out.allclose( - keeplast_out - ), "a mask with fewer tokens than condition, will be padded with 'keep' tokens. a 'discard-all' mask missing the final token is thus equivalent to a 'keep last' mask." - - def test_lora_processors(self): - # enable deterministic behavior for gradient checkpointing - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - init_dict["attention_head_dim"] = (8, 16) - - model = self.model_class(**init_dict) - model.to(torch_device) - - with torch.no_grad(): - sample1 = model(**inputs_dict).sample - - lora_attn_procs = create_lora_layers(model) - - # make sure we can set a list of attention processors - model.set_attn_processor(lora_attn_procs) - model.to(torch_device) - - # test that attn processors can be set to itself - model.set_attn_processor(model.attn_processors) - - with torch.no_grad(): - sample2 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.0}).sample - sample3 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample - sample4 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample - - assert (sample1 - sample2).abs().max() < 3e-3 - assert (sample3 - sample4).abs().max() < 3e-3 - - # sample 2 and sample 3 should be different - assert (sample2 - sample3).abs().max() > 1e-4 - - def test_lora_save_load(self): - # enable deterministic behavior for gradient checkpointing - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - init_dict["attention_head_dim"] = (8, 16) - - torch.manual_seed(0) - model = self.model_class(**init_dict) - model.to(torch_device) - - with torch.no_grad(): - old_sample = model(**inputs_dict).sample - - lora_attn_procs = create_lora_layers(model) - model.set_attn_processor(lora_attn_procs) - - with torch.no_grad(): - sample = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample - - with tempfile.TemporaryDirectory() as tmpdirname: - model.save_attn_procs(tmpdirname) - self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))) - torch.manual_seed(0) - new_model = self.model_class(**init_dict) - new_model.to(torch_device) - new_model.load_attn_procs(tmpdirname) - - with torch.no_grad(): - new_sample = new_model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample - - assert (sample - new_sample).abs().max() < 1e-4 - - # LoRA and no LoRA should NOT be the same - assert (sample - old_sample).abs().max() > 1e-4 - - def test_lora_save_load_safetensors(self): - # enable deterministic behavior for gradient checkpointing - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - init_dict["attention_head_dim"] = (8, 16) - - torch.manual_seed(0) - model = self.model_class(**init_dict) - model.to(torch_device) - - with torch.no_grad(): - old_sample = model(**inputs_dict).sample - - lora_attn_procs = create_lora_layers(model) - model.set_attn_processor(lora_attn_procs) - - with torch.no_grad(): - sample = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample - - with tempfile.TemporaryDirectory() as tmpdirname: - model.save_attn_procs(tmpdirname, safe_serialization=True) - self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) - torch.manual_seed(0) - new_model = self.model_class(**init_dict) - new_model.to(torch_device) - new_model.load_attn_procs(tmpdirname) - - with torch.no_grad(): - new_sample = new_model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample - - assert (sample - new_sample).abs().max() < 1e-4 - - # LoRA and no LoRA should NOT be the same - assert (sample - old_sample).abs().max() > 1e-4 - - def test_lora_save_safetensors_load_torch(self): - # enable deterministic behavior for gradient checkpointing - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - init_dict["attention_head_dim"] = (8, 16) - - torch.manual_seed(0) - model = self.model_class(**init_dict) - model.to(torch_device) - - lora_attn_procs = create_lora_layers(model, mock_weights=False) - model.set_attn_processor(lora_attn_procs) - # Saving as torch, properly reloads with directly filename - with tempfile.TemporaryDirectory() as tmpdirname: - model.save_attn_procs(tmpdirname) - self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))) - torch.manual_seed(0) - new_model = self.model_class(**init_dict) - new_model.to(torch_device) - new_model.load_attn_procs(tmpdirname, weight_name="pytorch_lora_weights.bin") - - def test_lora_save_torch_force_load_safetensors_error(self): - # enable deterministic behavior for gradient checkpointing - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - init_dict["attention_head_dim"] = (8, 16) - - torch.manual_seed(0) - model = self.model_class(**init_dict) - model.to(torch_device) - - lora_attn_procs = create_lora_layers(model, mock_weights=False) - model.set_attn_processor(lora_attn_procs) - # Saving as torch, properly reloads with directly filename - with tempfile.TemporaryDirectory() as tmpdirname: - model.save_attn_procs(tmpdirname) - self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))) - torch.manual_seed(0) - new_model = self.model_class(**init_dict) - new_model.to(torch_device) - with self.assertRaises(IOError) as e: - new_model.load_attn_procs(tmpdirname, use_safetensors=True) - self.assertIn("Error no file named pytorch_lora_weights.safetensors", str(e.exception)) - - def test_lora_on_off(self): - # enable deterministic behavior for gradient checkpointing - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - init_dict["attention_head_dim"] = (8, 16) - - torch.manual_seed(0) - model = self.model_class(**init_dict) - model.to(torch_device) - - with torch.no_grad(): - old_sample = model(**inputs_dict).sample - - lora_attn_procs = create_lora_layers(model) - model.set_attn_processor(lora_attn_procs) - - with torch.no_grad(): - sample = model(**inputs_dict, cross_attention_kwargs={"scale": 0.0}).sample - - model.set_default_attn_processor() - - with torch.no_grad(): - new_sample = model(**inputs_dict).sample - - assert (sample - new_sample).abs().max() < 1e-4 - assert (sample - old_sample).abs().max() < 3e-3 - - @unittest.skipIf( - torch_device != "cuda" or not is_xformers_available(), - reason="XFormers attention is only available with CUDA and `xformers` installed", - ) - def test_lora_xformers_on_off(self): - # enable deterministic behavior for gradient checkpointing - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - init_dict["attention_head_dim"] = (8, 16) - - torch.manual_seed(0) - model = self.model_class(**init_dict) - model.to(torch_device) - lora_attn_procs = create_lora_layers(model) - model.set_attn_processor(lora_attn_procs) - - # default - with torch.no_grad(): - sample = model(**inputs_dict).sample - - model.enable_xformers_memory_efficient_attention() - on_sample = model(**inputs_dict).sample - - model.disable_xformers_memory_efficient_attention() - off_sample = model(**inputs_dict).sample - - assert (sample - on_sample).abs().max() < 1e-4 - assert (sample - off_sample).abs().max() < 1e-4 - - def test_custom_diffusion_processors(self): - # enable deterministic behavior for gradient checkpointing - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - init_dict["attention_head_dim"] = (8, 16) - - model = self.model_class(**init_dict) - model.to(torch_device) - - with torch.no_grad(): - sample1 = model(**inputs_dict).sample - - custom_diffusion_attn_procs = create_custom_diffusion_layers(model, mock_weights=False) - - # make sure we can set a list of attention processors - model.set_attn_processor(custom_diffusion_attn_procs) - model.to(torch_device) - - # test that attn processors can be set to itself - model.set_attn_processor(model.attn_processors) - - with torch.no_grad(): - sample2 = model(**inputs_dict).sample - - assert (sample1 - sample2).abs().max() < 3e-3 - - def test_custom_diffusion_save_load(self): - # enable deterministic behavior for gradient checkpointing - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - init_dict["attention_head_dim"] = (8, 16) - - torch.manual_seed(0) - model = self.model_class(**init_dict) - model.to(torch_device) - - with torch.no_grad(): - old_sample = model(**inputs_dict).sample - - custom_diffusion_attn_procs = create_custom_diffusion_layers(model, mock_weights=False) - model.set_attn_processor(custom_diffusion_attn_procs) - - with torch.no_grad(): - sample = model(**inputs_dict).sample - - with tempfile.TemporaryDirectory() as tmpdirname: - model.save_attn_procs(tmpdirname) - self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_custom_diffusion_weights.bin"))) - torch.manual_seed(0) - new_model = self.model_class(**init_dict) - new_model.to(torch_device) - new_model.load_attn_procs(tmpdirname, weight_name="pytorch_custom_diffusion_weights.bin") - - with torch.no_grad(): - new_sample = new_model(**inputs_dict).sample - - assert (sample - new_sample).abs().max() < 1e-4 - - # custom diffusion and no custom diffusion should be the same - assert (sample - old_sample).abs().max() < 3e-3 - - @unittest.skipIf( - torch_device != "cuda" or not is_xformers_available(), - reason="XFormers attention is only available with CUDA and `xformers` installed", - ) - def test_custom_diffusion_xformers_on_off(self): - # enable deterministic behavior for gradient checkpointing - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - init_dict["attention_head_dim"] = (8, 16) - - torch.manual_seed(0) - model = self.model_class(**init_dict) - model.to(torch_device) - custom_diffusion_attn_procs = create_custom_diffusion_layers(model, mock_weights=False) - model.set_attn_processor(custom_diffusion_attn_procs) - - # default - with torch.no_grad(): - sample = model(**inputs_dict).sample - - model.enable_xformers_memory_efficient_attention() - on_sample = model(**inputs_dict).sample - - model.disable_xformers_memory_efficient_attention() - off_sample = model(**inputs_dict).sample - - assert (sample - on_sample).abs().max() < 1e-4 - assert (sample - off_sample).abs().max() < 1e-4 - - def test_pickle(self): - # enable deterministic behavior for gradient checkpointing - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - init_dict["attention_head_dim"] = (8, 16) - - model = self.model_class(**init_dict) - model.to(torch_device) - - with torch.no_grad(): - sample = model(**inputs_dict).sample - - sample_copy = copy.copy(sample) - - assert (sample - sample_copy).abs().max() < 1e-4 - - -@slow -class UNet2DConditionModelIntegrationTests(unittest.TestCase): - def get_file_format(self, seed, shape): - return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" - - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def get_latents(self, seed=0, shape=(4, 4, 64, 64), fp16=False): - dtype = torch.float16 if fp16 else torch.float32 - image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) - return image - - def get_unet_model(self, fp16=False, model_id="CompVis/stable-diffusion-v1-4"): - revision = "fp16" if fp16 else None - torch_dtype = torch.float16 if fp16 else torch.float32 - - model = UNet2DConditionModel.from_pretrained( - model_id, subfolder="unet", torch_dtype=torch_dtype, revision=revision - ) - model.to(torch_device).eval() - - return model - - def test_set_attention_slice_auto(self): - torch.cuda.empty_cache() - torch.cuda.reset_max_memory_allocated() - torch.cuda.reset_peak_memory_stats() - - unet = self.get_unet_model() - unet.set_attention_slice("auto") - - latents = self.get_latents(33) - encoder_hidden_states = self.get_encoder_hidden_states(33) - timestep = 1 - - with torch.no_grad(): - _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample - - mem_bytes = torch.cuda.max_memory_allocated() - - assert mem_bytes < 5 * 10**9 - - def test_set_attention_slice_max(self): - torch.cuda.empty_cache() - torch.cuda.reset_max_memory_allocated() - torch.cuda.reset_peak_memory_stats() - - unet = self.get_unet_model() - unet.set_attention_slice("max") - - latents = self.get_latents(33) - encoder_hidden_states = self.get_encoder_hidden_states(33) - timestep = 1 - - with torch.no_grad(): - _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample - - mem_bytes = torch.cuda.max_memory_allocated() - - assert mem_bytes < 5 * 10**9 - - def test_set_attention_slice_int(self): - torch.cuda.empty_cache() - torch.cuda.reset_max_memory_allocated() - torch.cuda.reset_peak_memory_stats() - - unet = self.get_unet_model() - unet.set_attention_slice(2) - - latents = self.get_latents(33) - encoder_hidden_states = self.get_encoder_hidden_states(33) - timestep = 1 - - with torch.no_grad(): - _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample - - mem_bytes = torch.cuda.max_memory_allocated() - - assert mem_bytes < 5 * 10**9 - - def test_set_attention_slice_list(self): - torch.cuda.empty_cache() - torch.cuda.reset_max_memory_allocated() - torch.cuda.reset_peak_memory_stats() - - # there are 32 sliceable layers - slice_list = 16 * [2, 3] - unet = self.get_unet_model() - unet.set_attention_slice(slice_list) - - latents = self.get_latents(33) - encoder_hidden_states = self.get_encoder_hidden_states(33) - timestep = 1 - - with torch.no_grad(): - _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample - - mem_bytes = torch.cuda.max_memory_allocated() - - assert mem_bytes < 5 * 10**9 - - def get_encoder_hidden_states(self, seed=0, shape=(4, 77, 768), fp16=False): - dtype = torch.float16 if fp16 else torch.float32 - hidden_states = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) - return hidden_states - - @parameterized.expand( - [ - # fmt: off - [33, 4, [-0.4424, 0.1510, -0.1937, 0.2118, 0.3746, -0.3957, 0.0160, -0.0435]], - [47, 0.55, [-0.1508, 0.0379, -0.3075, 0.2540, 0.3633, -0.0821, 0.1719, -0.0207]], - [21, 0.89, [-0.6479, 0.6364, -0.3464, 0.8697, 0.4443, -0.6289, -0.0091, 0.1778]], - [9, 1000, [0.8888, -0.5659, 0.5834, -0.7469, 1.1912, -0.3923, 1.1241, -0.4424]], - # fmt: on - ] - ) - @require_torch_gpu - def test_compvis_sd_v1_4(self, seed, timestep, expected_slice): - model = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4") - latents = self.get_latents(seed) - encoder_hidden_states = self.get_encoder_hidden_states(seed) - - timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) - - with torch.no_grad(): - sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample - - assert sample.shape == latents.shape - - output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() - expected_output_slice = torch.tensor(expected_slice) - - assert torch_all_close(output_slice, expected_output_slice, atol=1e-3) - - @parameterized.expand( - [ - # fmt: off - [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], - [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], - [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], - [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], - # fmt: on - ] - ) - @require_torch_gpu - def test_compvis_sd_v1_4_fp16(self, seed, timestep, expected_slice): - model = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4", fp16=True) - latents = self.get_latents(seed, fp16=True) - encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True) - - timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) - - with torch.no_grad(): - sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample - - assert sample.shape == latents.shape - - output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() - expected_output_slice = torch.tensor(expected_slice) - - assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) - - @parameterized.expand( - [ - # fmt: off - [33, 4, [-0.4430, 0.1570, -0.1867, 0.2376, 0.3205, -0.3681, 0.0525, -0.0722]], - [47, 0.55, [-0.1415, 0.0129, -0.3136, 0.2257, 0.3430, -0.0536, 0.2114, -0.0436]], - [21, 0.89, [-0.7091, 0.6664, -0.3643, 0.9032, 0.4499, -0.6541, 0.0139, 0.1750]], - [9, 1000, [0.8878, -0.5659, 0.5844, -0.7442, 1.1883, -0.3927, 1.1192, -0.4423]], - # fmt: on - ] - ) - @require_torch_gpu - def test_compvis_sd_v1_5(self, seed, timestep, expected_slice): - model = self.get_unet_model(model_id="runwayml/stable-diffusion-v1-5") - latents = self.get_latents(seed) - encoder_hidden_states = self.get_encoder_hidden_states(seed) - - timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) - - with torch.no_grad(): - sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample - - assert sample.shape == latents.shape - - output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() - expected_output_slice = torch.tensor(expected_slice) - - assert torch_all_close(output_slice, expected_output_slice, atol=1e-3) - - @parameterized.expand( - [ - # fmt: off - [83, 4, [-0.2695, -0.1669, 0.0073, -0.3181, -0.1187, -0.1676, -0.1395, -0.5972]], - [17, 0.55, [-0.1290, -0.2588, 0.0551, -0.0916, 0.3286, 0.0238, -0.3669, 0.0322]], - [8, 0.89, [-0.5283, 0.1198, 0.0870, -0.1141, 0.9189, -0.0150, 0.5474, 0.4319]], - [3, 1000, [-0.5601, 0.2411, -0.5435, 0.1268, 1.1338, -0.2427, -0.0280, -1.0020]], - # fmt: on - ] - ) - @require_torch_gpu - def test_compvis_sd_v1_5_fp16(self, seed, timestep, expected_slice): - model = self.get_unet_model(model_id="runwayml/stable-diffusion-v1-5", fp16=True) - latents = self.get_latents(seed, fp16=True) - encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True) - - timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) - - with torch.no_grad(): - sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample - - assert sample.shape == latents.shape - - output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() - expected_output_slice = torch.tensor(expected_slice) - - assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) - - @parameterized.expand( - [ - # fmt: off - [33, 4, [-0.7639, 0.0106, -0.1615, -0.3487, -0.0423, -0.7972, 0.0085, -0.4858]], - [47, 0.55, [-0.6564, 0.0795, -1.9026, -0.6258, 1.8235, 1.2056, 1.2169, 0.9073]], - [21, 0.89, [0.0327, 0.4399, -0.6358, 0.3417, 0.4120, -0.5621, -0.0397, -1.0430]], - [9, 1000, [0.1600, 0.7303, -1.0556, -0.3515, -0.7440, -1.2037, -1.8149, -1.8931]], - # fmt: on - ] - ) - @require_torch_gpu - def test_compvis_sd_inpaint(self, seed, timestep, expected_slice): - model = self.get_unet_model(model_id="runwayml/stable-diffusion-inpainting") - latents = self.get_latents(seed, shape=(4, 9, 64, 64)) - encoder_hidden_states = self.get_encoder_hidden_states(seed) - - timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) - - with torch.no_grad(): - sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample - - assert sample.shape == (4, 4, 64, 64) - - output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() - expected_output_slice = torch.tensor(expected_slice) - - assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) - - @parameterized.expand( - [ - # fmt: off - [83, 4, [-0.1047, -1.7227, 0.1067, 0.0164, -0.5698, -0.4172, -0.1388, 1.1387]], - [17, 0.55, [0.0975, -0.2856, -0.3508, -0.4600, 0.3376, 0.2930, -0.2747, -0.7026]], - [8, 0.89, [-0.0952, 0.0183, -0.5825, -0.1981, 0.1131, 0.4668, -0.0395, -0.3486]], - [3, 1000, [0.4790, 0.4949, -1.0732, -0.7158, 0.7959, -0.9478, 0.1105, -0.9741]], - # fmt: on - ] - ) - @require_torch_gpu - def test_compvis_sd_inpaint_fp16(self, seed, timestep, expected_slice): - model = self.get_unet_model(model_id="runwayml/stable-diffusion-inpainting", fp16=True) - latents = self.get_latents(seed, shape=(4, 9, 64, 64), fp16=True) - encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True) - - timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) - - with torch.no_grad(): - sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample - - assert sample.shape == (4, 4, 64, 64) - - output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() - expected_output_slice = torch.tensor(expected_slice) - - assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) - - @parameterized.expand( - [ - # fmt: off - [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], - [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], - [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], - [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], - # fmt: on - ] - ) - @require_torch_gpu - def test_stabilityai_sd_v2_fp16(self, seed, timestep, expected_slice): - model = self.get_unet_model(model_id="stabilityai/stable-diffusion-2", fp16=True) - latents = self.get_latents(seed, shape=(4, 4, 96, 96), fp16=True) - encoder_hidden_states = self.get_encoder_hidden_states(seed, shape=(4, 77, 1024), fp16=True) - - timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) - - with torch.no_grad(): - sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample - - assert sample.shape == latents.shape - - output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() - expected_output_slice = torch.tensor(expected_slice) - - assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/test_kandinsky_prior.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/test_kandinsky_prior.py deleted file mode 100644 index 7b1acc9fc03e06fe8fbc4dbb93ad465e54201a77..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/test_kandinsky_prior.py +++ /dev/null @@ -1,246 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -import torch -from torch import nn -from transformers import ( - CLIPImageProcessor, - CLIPTextConfig, - CLIPTextModelWithProjection, - CLIPTokenizer, - CLIPVisionConfig, - CLIPVisionModelWithProjection, -) - -from diffusers import KandinskyPriorPipeline, PriorTransformer, UnCLIPScheduler -from diffusers.utils import torch_device -from diffusers.utils.testing_utils import enable_full_determinism, skip_mps - -from ..test_pipelines_common import PipelineTesterMixin - - -enable_full_determinism() - - -class Dummies: - @property - def text_embedder_hidden_size(self): - return 32 - - @property - def time_input_dim(self): - return 32 - - @property - def block_out_channels_0(self): - return self.time_input_dim - - @property - def time_embed_dim(self): - return self.time_input_dim * 4 - - @property - def cross_attention_dim(self): - return 100 - - @property - def dummy_tokenizer(self): - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - return tokenizer - - @property - def dummy_text_encoder(self): - torch.manual_seed(0) - config = CLIPTextConfig( - bos_token_id=0, - eos_token_id=2, - hidden_size=self.text_embedder_hidden_size, - projection_dim=self.text_embedder_hidden_size, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - pad_token_id=1, - vocab_size=1000, - ) - return CLIPTextModelWithProjection(config) - - @property - def dummy_prior(self): - torch.manual_seed(0) - - model_kwargs = { - "num_attention_heads": 2, - "attention_head_dim": 12, - "embedding_dim": self.text_embedder_hidden_size, - "num_layers": 1, - } - - model = PriorTransformer(**model_kwargs) - # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 - model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape)) - return model - - @property - def dummy_image_encoder(self): - torch.manual_seed(0) - config = CLIPVisionConfig( - hidden_size=self.text_embedder_hidden_size, - image_size=224, - projection_dim=self.text_embedder_hidden_size, - intermediate_size=37, - num_attention_heads=4, - num_channels=3, - num_hidden_layers=5, - patch_size=14, - ) - - model = CLIPVisionModelWithProjection(config) - return model - - @property - def dummy_image_processor(self): - image_processor = CLIPImageProcessor( - crop_size=224, - do_center_crop=True, - do_normalize=True, - do_resize=True, - image_mean=[0.48145466, 0.4578275, 0.40821073], - image_std=[0.26862954, 0.26130258, 0.27577711], - resample=3, - size=224, - ) - - return image_processor - - def get_dummy_components(self): - prior = self.dummy_prior - image_encoder = self.dummy_image_encoder - text_encoder = self.dummy_text_encoder - tokenizer = self.dummy_tokenizer - image_processor = self.dummy_image_processor - - scheduler = UnCLIPScheduler( - variance_type="fixed_small_log", - prediction_type="sample", - num_train_timesteps=1000, - clip_sample=True, - clip_sample_range=10.0, - ) - - components = { - "prior": prior, - "image_encoder": image_encoder, - "text_encoder": text_encoder, - "tokenizer": tokenizer, - "scheduler": scheduler, - "image_processor": image_processor, - } - - return components - - def get_dummy_inputs(self, device, seed=0): - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - inputs = { - "prompt": "horse", - "generator": generator, - "guidance_scale": 4.0, - "num_inference_steps": 2, - "output_type": "np", - } - return inputs - - -class KandinskyPriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = KandinskyPriorPipeline - params = ["prompt"] - batch_params = ["prompt", "negative_prompt"] - required_optional_params = [ - "num_images_per_prompt", - "generator", - "num_inference_steps", - "latents", - "negative_prompt", - "guidance_scale", - "output_type", - "return_dict", - ] - test_xformers_attention = False - - def get_dummy_components(self): - dummy = Dummies() - return dummy.get_dummy_components() - - def get_dummy_inputs(self, device, seed=0): - dummy = Dummies() - return dummy.get_dummy_inputs(device=device, seed=seed) - - def test_kandinsky_prior(self): - device = "cpu" - - components = self.get_dummy_components() - - pipe = self.pipeline_class(**components) - pipe = pipe.to(device) - - pipe.set_progress_bar_config(disable=None) - - output = pipe(**self.get_dummy_inputs(device)) - image = output.image_embeds - - image_from_tuple = pipe( - **self.get_dummy_inputs(device), - return_dict=False, - )[0] - - image_slice = image[0, -10:] - image_from_tuple_slice = image_from_tuple[0, -10:] - - assert image.shape == (1, 32) - - expected_slice = np.array( - [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] - ) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - - @skip_mps - def test_inference_batch_single_identical(self): - test_max_difference = torch_device == "cpu" - relax_max_difference = True - test_mean_pixel_difference = False - - self._test_inference_batch_single_identical( - test_max_difference=test_max_difference, - relax_max_difference=relax_max_difference, - test_mean_pixel_difference=test_mean_pixel_difference, - ) - - @skip_mps - def test_attention_slicing_forward_pass(self): - test_max_difference = torch_device == "cpu" - test_mean_pixel_difference = False - - self._test_attention_slicing_forward_pass( - test_max_difference=test_max_difference, - test_mean_pixel_difference=test_mean_pixel_difference, - ) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py deleted file mode 100644 index 17f27d0d7804dc7d05e0be440306b749fcaf61d6..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py +++ /dev/null @@ -1,285 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import random -import unittest - -import numpy as np -import torch -from PIL import Image - -from diffusers import ( - DDIMScheduler, - KandinskyV22Img2ImgPipeline, - KandinskyV22PriorPipeline, - UNet2DConditionModel, - VQModel, -) -from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu - -from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference - - -enable_full_determinism() - - -class Dummies: - @property - def text_embedder_hidden_size(self): - return 32 - - @property - def time_input_dim(self): - return 32 - - @property - def block_out_channels_0(self): - return self.time_input_dim - - @property - def time_embed_dim(self): - return self.time_input_dim * 4 - - @property - def cross_attention_dim(self): - return 32 - - @property - def dummy_unet(self): - torch.manual_seed(0) - - model_kwargs = { - "in_channels": 4, - # Out channels is double in channels because predicts mean and variance - "out_channels": 8, - "addition_embed_type": "image", - "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), - "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), - "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", - "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), - "layers_per_block": 1, - "encoder_hid_dim": self.text_embedder_hidden_size, - "encoder_hid_dim_type": "image_proj", - "cross_attention_dim": self.cross_attention_dim, - "attention_head_dim": 4, - "resnet_time_scale_shift": "scale_shift", - "class_embed_type": None, - } - - model = UNet2DConditionModel(**model_kwargs) - return model - - @property - def dummy_movq_kwargs(self): - return { - "block_out_channels": [32, 64], - "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], - "in_channels": 3, - "latent_channels": 4, - "layers_per_block": 1, - "norm_num_groups": 8, - "norm_type": "spatial", - "num_vq_embeddings": 12, - "out_channels": 3, - "up_block_types": [ - "AttnUpDecoderBlock2D", - "UpDecoderBlock2D", - ], - "vq_embed_dim": 4, - } - - @property - def dummy_movq(self): - torch.manual_seed(0) - model = VQModel(**self.dummy_movq_kwargs) - return model - - def get_dummy_components(self): - unet = self.dummy_unet - movq = self.dummy_movq - - ddim_config = { - "num_train_timesteps": 1000, - "beta_schedule": "linear", - "beta_start": 0.00085, - "beta_end": 0.012, - "clip_sample": False, - "set_alpha_to_one": False, - "steps_offset": 0, - "prediction_type": "epsilon", - "thresholding": False, - } - - scheduler = DDIMScheduler(**ddim_config) - - components = { - "unet": unet, - "scheduler": scheduler, - "movq": movq, - } - - return components - - def get_dummy_inputs(self, device, seed=0): - image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) - negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( - device - ) - # create init_image - image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) - image = image.cpu().permute(0, 2, 3, 1)[0] - init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) - - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - inputs = { - "image": init_image, - "image_embeds": image_embeds, - "negative_image_embeds": negative_image_embeds, - "generator": generator, - "height": 64, - "width": 64, - "num_inference_steps": 10, - "guidance_scale": 7.0, - "strength": 0.2, - "output_type": "np", - } - return inputs - - -class KandinskyV22Img2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = KandinskyV22Img2ImgPipeline - params = ["image_embeds", "negative_image_embeds", "image"] - batch_params = [ - "image_embeds", - "negative_image_embeds", - "image", - ] - required_optional_params = [ - "generator", - "height", - "width", - "strength", - "guidance_scale", - "num_inference_steps", - "return_dict", - "guidance_scale", - "num_images_per_prompt", - "output_type", - "return_dict", - ] - test_xformers_attention = False - - def get_dummy_components(self): - dummies = Dummies() - return dummies.get_dummy_components() - - def get_dummy_inputs(self, device, seed=0): - dummies = Dummies() - return dummies.get_dummy_inputs(device=device, seed=seed) - - def test_kandinsky_img2img(self): - device = "cpu" - - components = self.get_dummy_components() - - pipe = self.pipeline_class(**components) - pipe = pipe.to(device) - - pipe.set_progress_bar_config(disable=None) - - output = pipe(**self.get_dummy_inputs(device)) - image = output.images - - image_from_tuple = pipe( - **self.get_dummy_inputs(device), - return_dict=False, - )[0] - - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - - assert image.shape == (1, 64, 64, 3) - - expected_slice = np.array([0.5712, 0.5443, 0.4725, 0.6195, 0.5184, 0.4651, 0.4473, 0.4590, 0.5016]) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - assert ( - np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" - - -@slow -@require_torch_gpu -class KandinskyV22Img2ImgPipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_kandinsky_img2img(self): - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" - "/kandinskyv22/kandinskyv22_img2img_frog.npy" - ) - - init_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" - ) - prompt = "A red cartoon frog, 4k" - - pipe_prior = KandinskyV22PriorPipeline.from_pretrained( - "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 - ) - pipe_prior.to(torch_device) - - pipeline = KandinskyV22Img2ImgPipeline.from_pretrained( - "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 - ) - pipeline = pipeline.to(torch_device) - - pipeline.set_progress_bar_config(disable=None) - - generator = torch.Generator(device="cpu").manual_seed(0) - image_emb, zero_image_emb = pipe_prior( - prompt, - generator=generator, - num_inference_steps=5, - negative_prompt="", - ).to_tuple() - - output = pipeline( - image=init_image, - image_embeds=image_emb, - negative_image_embeds=zero_image_emb, - generator=generator, - num_inference_steps=100, - height=768, - width=768, - strength=0.2, - output_type="np", - ) - - image = output.images[0] - - assert image.shape == (768, 768, 3) - - assert_mean_pixel_difference(image, expected_image) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_model_editing.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_model_editing.py deleted file mode 100644 index 81d1baed5df65dcc0ee6a0848b559ce94761f489..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_model_editing.py +++ /dev/null @@ -1,256 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import unittest - -import numpy as np -import torch -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer - -from diffusers import ( - AutoencoderKL, - DDIMScheduler, - EulerAncestralDiscreteScheduler, - PNDMScheduler, - StableDiffusionModelEditingPipeline, - UNet2DConditionModel, -) -from diffusers.utils import slow, torch_device -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps - -from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS -from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin - - -enable_full_determinism() - - -@skip_mps -class StableDiffusionModelEditingPipelineFastTests( - PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase -): - pipeline_class = StableDiffusionModelEditingPipeline - params = TEXT_TO_IMAGE_PARAMS - batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - image_params = TEXT_TO_IMAGE_IMAGE_PARAMS - image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS - - def get_dummy_components(self): - torch.manual_seed(0) - unet = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=4, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - ) - scheduler = DDIMScheduler() - torch.manual_seed(0) - vae = AutoencoderKL( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=4, - ) - torch.manual_seed(0) - text_encoder_config = CLIPTextConfig( - bos_token_id=0, - eos_token_id=2, - hidden_size=32, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - pad_token_id=1, - vocab_size=1000, - ) - text_encoder = CLIPTextModel(text_encoder_config) - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - components = { - "unet": unet, - "scheduler": scheduler, - "vae": vae, - "text_encoder": text_encoder, - "tokenizer": tokenizer, - "safety_checker": None, - "feature_extractor": None, - } - return components - - def get_dummy_inputs(self, device, seed=0): - generator = torch.manual_seed(seed) - inputs = { - "prompt": "A field of roses", - "generator": generator, - # Setting height and width to None to prevent OOMs on CPU. - "height": None, - "width": None, - "num_inference_steps": 2, - "guidance_scale": 6.0, - "output_type": "numpy", - } - return inputs - - def test_stable_diffusion_model_editing_default_case(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - sd_pipe = StableDiffusionModelEditingPipeline(**components) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - image = sd_pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1] - assert image.shape == (1, 64, 64, 3) - - expected_slice = np.array([0.4755, 0.5132, 0.4976, 0.3904, 0.3554, 0.4765, 0.5139, 0.5158, 0.4889]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - def test_stable_diffusion_model_editing_negative_prompt(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - sd_pipe = StableDiffusionModelEditingPipeline(**components) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - negative_prompt = "french fries" - output = sd_pipe(**inputs, negative_prompt=negative_prompt) - image = output.images - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 64, 64, 3) - - expected_slice = np.array([0.4992, 0.5101, 0.5004, 0.3949, 0.3604, 0.4735, 0.5216, 0.5204, 0.4913]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - def test_stable_diffusion_model_editing_euler(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - components["scheduler"] = EulerAncestralDiscreteScheduler( - beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" - ) - sd_pipe = StableDiffusionModelEditingPipeline(**components) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - image = sd_pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 64, 64, 3) - - expected_slice = np.array([0.4747, 0.5372, 0.4779, 0.4982, 0.5543, 0.4816, 0.5238, 0.4904, 0.5027]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - def test_stable_diffusion_model_editing_pndm(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - components["scheduler"] = PNDMScheduler() - sd_pipe = StableDiffusionModelEditingPipeline(**components) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - # the pipeline does not expect pndm so test if it raises error. - with self.assertRaises(ValueError): - _ = sd_pipe(**inputs).images - - def test_inference_batch_single_identical(self): - super().test_inference_batch_single_identical(expected_max_diff=5e-3) - - def test_attention_slicing_forward_pass(self): - super().test_attention_slicing_forward_pass(expected_max_diff=5e-3) - - -@slow -@require_torch_gpu -class StableDiffusionModelEditingSlowTests(unittest.TestCase): - def tearDown(self): - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def get_inputs(self, seed=0): - generator = torch.manual_seed(seed) - inputs = { - "prompt": "A field of roses", - "generator": generator, - "num_inference_steps": 3, - "guidance_scale": 7.5, - "output_type": "numpy", - } - return inputs - - def test_stable_diffusion_model_editing_default(self): - model_ckpt = "CompVis/stable-diffusion-v1-4" - pipe = StableDiffusionModelEditingPipeline.from_pretrained(model_ckpt, safety_checker=None) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing() - - inputs = self.get_inputs() - image = pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1].flatten() - - assert image.shape == (1, 512, 512, 3) - - expected_slice = np.array( - [0.6749496, 0.6386453, 0.51443267, 0.66094905, 0.61921215, 0.5491332, 0.5744417, 0.58075106, 0.5174658] - ) - - assert np.abs(expected_slice - image_slice).max() < 1e-2 - - # make sure image changes after editing - pipe.edit_model("A pack of roses", "A pack of blue roses") - - image = pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1].flatten() - - assert image.shape == (1, 512, 512, 3) - - assert np.abs(expected_slice - image_slice).max() > 1e-1 - - def test_stable_diffusion_model_editing_pipeline_with_sequential_cpu_offloading(self): - torch.cuda.empty_cache() - torch.cuda.reset_max_memory_allocated() - torch.cuda.reset_peak_memory_stats() - - model_ckpt = "CompVis/stable-diffusion-v1-4" - scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler") - pipe = StableDiffusionModelEditingPipeline.from_pretrained( - model_ckpt, scheduler=scheduler, safety_checker=None - ) - pipe = pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing(1) - pipe.enable_sequential_cpu_offload() - - inputs = self.get_inputs() - _ = pipe(**inputs) - - mem_bytes = torch.cuda.max_memory_allocated() - # make sure that less than 4.4 GB is allocated - assert mem_bytes < 4.4 * 10**9 diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_panorama.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_panorama.py deleted file mode 100644 index 131e9402c7eb73f795bb5f260a1c8ae7e8a0d7f9..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_panorama.py +++ /dev/null @@ -1,409 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import unittest - -import numpy as np -import torch -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer - -from diffusers import ( - AutoencoderKL, - DDIMScheduler, - EulerAncestralDiscreteScheduler, - LMSDiscreteScheduler, - PNDMScheduler, - StableDiffusionPanoramaPipeline, - UNet2DConditionModel, -) -from diffusers.utils import slow, torch_device -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps - -from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS -from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin - - -enable_full_determinism() - - -@skip_mps -class StableDiffusionPanoramaPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): - pipeline_class = StableDiffusionPanoramaPipeline - params = TEXT_TO_IMAGE_PARAMS - batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - image_params = TEXT_TO_IMAGE_IMAGE_PARAMS - image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS - - def get_dummy_components(self): - torch.manual_seed(0) - unet = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=1, - sample_size=32, - in_channels=4, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - ) - scheduler = DDIMScheduler() - torch.manual_seed(0) - vae = AutoencoderKL( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=4, - ) - torch.manual_seed(0) - text_encoder_config = CLIPTextConfig( - bos_token_id=0, - eos_token_id=2, - hidden_size=32, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - pad_token_id=1, - vocab_size=1000, - ) - text_encoder = CLIPTextModel(text_encoder_config) - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - components = { - "unet": unet, - "scheduler": scheduler, - "vae": vae, - "text_encoder": text_encoder, - "tokenizer": tokenizer, - "safety_checker": None, - "feature_extractor": None, - } - return components - - def get_dummy_inputs(self, device, seed=0): - generator = torch.manual_seed(seed) - inputs = { - "prompt": "a photo of the dolomites", - "generator": generator, - # Setting height and width to None to prevent OOMs on CPU. - "height": None, - "width": None, - "num_inference_steps": 1, - "guidance_scale": 6.0, - "output_type": "numpy", - } - return inputs - - def test_stable_diffusion_panorama_default_case(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - sd_pipe = StableDiffusionPanoramaPipeline(**components) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - image = sd_pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1] - assert image.shape == (1, 64, 64, 3) - - expected_slice = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - def test_stable_diffusion_panorama_circular_padding_case(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - sd_pipe = StableDiffusionPanoramaPipeline(**components) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - image = sd_pipe(**inputs, circular_padding=True).images - image_slice = image[0, -3:, -3:, -1] - assert image.shape == (1, 64, 64, 3) - - expected_slice = np.array([0.6127, 0.6299, 0.4595, 0.4051, 0.4543, 0.3925, 0.5510, 0.5693, 0.5031]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - # override to speed the overall test timing up. - def test_inference_batch_consistent(self): - super().test_inference_batch_consistent(batch_sizes=[1, 2]) - - # override to speed the overall test timing up. - def test_inference_batch_single_identical(self): - super().test_inference_batch_single_identical(batch_size=2, expected_max_diff=3.25e-3) - - def test_stable_diffusion_panorama_negative_prompt(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - sd_pipe = StableDiffusionPanoramaPipeline(**components) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - negative_prompt = "french fries" - output = sd_pipe(**inputs, negative_prompt=negative_prompt) - image = output.images - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 64, 64, 3) - - expected_slice = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - def test_stable_diffusion_panorama_views_batch(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - sd_pipe = StableDiffusionPanoramaPipeline(**components) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - output = sd_pipe(**inputs, view_batch_size=2) - image = output.images - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 64, 64, 3) - - expected_slice = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - def test_stable_diffusion_panorama_views_batch_circular_padding(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - sd_pipe = StableDiffusionPanoramaPipeline(**components) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - output = sd_pipe(**inputs, circular_padding=True, view_batch_size=2) - image = output.images - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 64, 64, 3) - - expected_slice = np.array([0.6127, 0.6299, 0.4595, 0.4051, 0.4543, 0.3925, 0.5510, 0.5693, 0.5031]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - def test_stable_diffusion_panorama_euler(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - components["scheduler"] = EulerAncestralDiscreteScheduler( - beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" - ) - sd_pipe = StableDiffusionPanoramaPipeline(**components) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - image = sd_pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 64, 64, 3) - - expected_slice = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - def test_stable_diffusion_panorama_pndm(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - components["scheduler"] = PNDMScheduler( - beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True - ) - sd_pipe = StableDiffusionPanoramaPipeline(**components) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - image = sd_pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 64, 64, 3) - - expected_slice = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - -@slow -@require_torch_gpu -class StableDiffusionPanoramaSlowTests(unittest.TestCase): - def tearDown(self): - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def get_inputs(self, seed=0): - generator = torch.manual_seed(seed) - inputs = { - "prompt": "a photo of the dolomites", - "generator": generator, - "num_inference_steps": 3, - "guidance_scale": 7.5, - "output_type": "numpy", - } - return inputs - - def test_stable_diffusion_panorama_default(self): - model_ckpt = "stabilityai/stable-diffusion-2-base" - scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler") - pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing() - - inputs = self.get_inputs() - image = pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1].flatten() - - assert image.shape == (1, 512, 2048, 3) - - expected_slice = np.array( - [ - 0.36968392, - 0.27025372, - 0.32446766, - 0.28379387, - 0.36363274, - 0.30733347, - 0.27100027, - 0.27054125, - 0.25536096, - ] - ) - - assert np.abs(expected_slice - image_slice).max() < 1e-2 - - def test_stable_diffusion_panorama_k_lms(self): - pipe = StableDiffusionPanoramaPipeline.from_pretrained( - "stabilityai/stable-diffusion-2-base", safety_checker=None - ) - pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing() - - inputs = self.get_inputs() - image = pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1].flatten() - - assert image.shape == (1, 512, 2048, 3) - - expected_slice = np.array( - [ - [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - ] - ] - ) - - assert np.abs(expected_slice - image_slice).max() < 1e-3 - - def test_stable_diffusion_panorama_intermediate_state(self): - number_of_steps = 0 - - def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: - callback_fn.has_been_called = True - nonlocal number_of_steps - number_of_steps += 1 - if step == 1: - latents = latents.detach().cpu().numpy() - assert latents.shape == (1, 4, 64, 256) - latents_slice = latents[0, -3:, -3:, -1] - - expected_slice = np.array( - [ - 0.18681869, - 0.33907816, - 0.5361276, - 0.14432865, - -0.02856611, - -0.73941123, - 0.23397987, - 0.47322682, - -0.37823164, - ] - ) - assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 - elif step == 2: - latents = latents.detach().cpu().numpy() - assert latents.shape == (1, 4, 64, 256) - latents_slice = latents[0, -3:, -3:, -1] - - expected_slice = np.array( - [ - 0.18539645, - 0.33987248, - 0.5378559, - 0.14437142, - -0.02455261, - -0.7338317, - 0.23990755, - 0.47356272, - -0.3786505, - ] - ) - - assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 - - callback_fn.has_been_called = False - - model_ckpt = "stabilityai/stable-diffusion-2-base" - scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler") - pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None) - pipe = pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing() - - inputs = self.get_inputs() - pipe(**inputs, callback=callback_fn, callback_steps=1) - assert callback_fn.has_been_called - assert number_of_steps == 3 - - def test_stable_diffusion_panorama_pipeline_with_sequential_cpu_offloading(self): - torch.cuda.empty_cache() - torch.cuda.reset_max_memory_allocated() - torch.cuda.reset_peak_memory_stats() - - model_ckpt = "stabilityai/stable-diffusion-2-base" - scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler") - pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None) - pipe = pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing(1) - pipe.enable_sequential_cpu_offload() - - inputs = self.get_inputs() - _ = pipe(**inputs) - - mem_bytes = torch.cuda.max_memory_allocated() - # make sure that less than 5.2 GB is allocated - assert mem_bytes < 5.5 * 10**9 diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/test_pipeline_utils.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/test_pipeline_utils.py deleted file mode 100644 index 51d987d8bb1151862f910822eb2c173ce4ff313c..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/test_pipeline_utils.py +++ /dev/null @@ -1,134 +0,0 @@ -import unittest - -from diffusers.pipelines.pipeline_utils import is_safetensors_compatible - - -class IsSafetensorsCompatibleTests(unittest.TestCase): - def test_all_is_compatible(self): - filenames = [ - "safety_checker/pytorch_model.bin", - "safety_checker/model.safetensors", - "vae/diffusion_pytorch_model.bin", - "vae/diffusion_pytorch_model.safetensors", - "text_encoder/pytorch_model.bin", - "text_encoder/model.safetensors", - "unet/diffusion_pytorch_model.bin", - "unet/diffusion_pytorch_model.safetensors", - ] - self.assertTrue(is_safetensors_compatible(filenames)) - - def test_diffusers_model_is_compatible(self): - filenames = [ - "unet/diffusion_pytorch_model.bin", - "unet/diffusion_pytorch_model.safetensors", - ] - self.assertTrue(is_safetensors_compatible(filenames)) - - def test_diffusers_model_is_not_compatible(self): - filenames = [ - "safety_checker/pytorch_model.bin", - "safety_checker/model.safetensors", - "vae/diffusion_pytorch_model.bin", - "vae/diffusion_pytorch_model.safetensors", - "text_encoder/pytorch_model.bin", - "text_encoder/model.safetensors", - "unet/diffusion_pytorch_model.bin", - # Removed: 'unet/diffusion_pytorch_model.safetensors', - ] - self.assertFalse(is_safetensors_compatible(filenames)) - - def test_transformer_model_is_compatible(self): - filenames = [ - "text_encoder/pytorch_model.bin", - "text_encoder/model.safetensors", - ] - self.assertTrue(is_safetensors_compatible(filenames)) - - def test_transformer_model_is_not_compatible(self): - filenames = [ - "safety_checker/pytorch_model.bin", - "safety_checker/model.safetensors", - "vae/diffusion_pytorch_model.bin", - "vae/diffusion_pytorch_model.safetensors", - "text_encoder/pytorch_model.bin", - # Removed: 'text_encoder/model.safetensors', - "unet/diffusion_pytorch_model.bin", - "unet/diffusion_pytorch_model.safetensors", - ] - self.assertFalse(is_safetensors_compatible(filenames)) - - def test_all_is_compatible_variant(self): - filenames = [ - "safety_checker/pytorch_model.fp16.bin", - "safety_checker/model.fp16.safetensors", - "vae/diffusion_pytorch_model.fp16.bin", - "vae/diffusion_pytorch_model.fp16.safetensors", - "text_encoder/pytorch_model.fp16.bin", - "text_encoder/model.fp16.safetensors", - "unet/diffusion_pytorch_model.fp16.bin", - "unet/diffusion_pytorch_model.fp16.safetensors", - ] - variant = "fp16" - self.assertTrue(is_safetensors_compatible(filenames, variant=variant)) - - def test_diffusers_model_is_compatible_variant(self): - filenames = [ - "unet/diffusion_pytorch_model.fp16.bin", - "unet/diffusion_pytorch_model.fp16.safetensors", - ] - variant = "fp16" - self.assertTrue(is_safetensors_compatible(filenames, variant=variant)) - - def test_diffusers_model_is_compatible_variant_partial(self): - # pass variant but use the non-variant filenames - filenames = [ - "unet/diffusion_pytorch_model.bin", - "unet/diffusion_pytorch_model.safetensors", - ] - variant = "fp16" - self.assertTrue(is_safetensors_compatible(filenames, variant=variant)) - - def test_diffusers_model_is_not_compatible_variant(self): - filenames = [ - "safety_checker/pytorch_model.fp16.bin", - "safety_checker/model.fp16.safetensors", - "vae/diffusion_pytorch_model.fp16.bin", - "vae/diffusion_pytorch_model.fp16.safetensors", - "text_encoder/pytorch_model.fp16.bin", - "text_encoder/model.fp16.safetensors", - "unet/diffusion_pytorch_model.fp16.bin", - # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', - ] - variant = "fp16" - self.assertFalse(is_safetensors_compatible(filenames, variant=variant)) - - def test_transformer_model_is_compatible_variant(self): - filenames = [ - "text_encoder/pytorch_model.fp16.bin", - "text_encoder/model.fp16.safetensors", - ] - variant = "fp16" - self.assertTrue(is_safetensors_compatible(filenames, variant=variant)) - - def test_transformer_model_is_compatible_variant_partial(self): - # pass variant but use the non-variant filenames - filenames = [ - "text_encoder/pytorch_model.bin", - "text_encoder/model.safetensors", - ] - variant = "fp16" - self.assertTrue(is_safetensors_compatible(filenames, variant=variant)) - - def test_transformer_model_is_not_compatible_variant(self): - filenames = [ - "safety_checker/pytorch_model.fp16.bin", - "safety_checker/model.fp16.safetensors", - "vae/diffusion_pytorch_model.fp16.bin", - "vae/diffusion_pytorch_model.fp16.safetensors", - "text_encoder/pytorch_model.fp16.bin", - # 'text_encoder/model.fp16.safetensors', - "unet/diffusion_pytorch_model.fp16.bin", - "unet/diffusion_pytorch_model.fp16.safetensors", - ] - variant = "fp16" - self.assertFalse(is_safetensors_compatible(filenames, variant=variant)) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/legacy_1.x/faster_rcnn_r50_fpn_1x_coco_v1.py b/spaces/Andy1621/uniformer_image_detection/configs/legacy_1.x/faster_rcnn_r50_fpn_1x_coco_v1.py deleted file mode 100644 index fb2f2d1e13b8c97dbf5f785dadebcccf874ff7be..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/legacy_1.x/faster_rcnn_r50_fpn_1x_coco_v1.py +++ /dev/null @@ -1,37 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - type='FasterRCNN', - pretrained='torchvision://resnet50', - rpn_head=dict( - type='RPNHead', - anchor_generator=dict( - type='LegacyAnchorGenerator', - center_offset=0.5, - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict( - type='RoIAlign', - output_size=7, - sampling_ratio=2, - aligned=False), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn_proposal=dict(max_per_img=2000), - rcnn=dict(assigner=dict(match_low_quality=True)))) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py deleted file mode 100644 index 16edd99de295161a3c246243e8c482ede4e5bdae..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py +++ /dev/null @@ -1,30 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py' - -model = dict( - roi_head=dict( - type='PISARoIHead', - bbox_head=dict( - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), - train_cfg=dict( - rpn_proposal=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - sampler=dict( - type='ScoreHLRSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True, - k=0.5, - bias=0.), - isr=dict(k=2, bias=0), - carl=dict(k=1, bias=0.2))), - test_cfg=dict( - rpn=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0))) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/psanet_r50-d8.py b/spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/psanet_r50-d8.py deleted file mode 100644 index 689513fa9d2a40f14bf0ae4ae61f38f0dcc1b3da..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/psanet_r50-d8.py +++ /dev/null @@ -1,49 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='PSAHead', - in_channels=2048, - in_index=3, - channels=512, - mask_size=(97, 97), - psa_type='bi-direction', - compact=False, - shrink_factor=2, - normalization_factor=1.0, - psa_softmax=True, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/Apex-X/ROOPOK/roop/__init__.py b/spaces/Apex-X/ROOPOK/roop/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Apex-X/Tm/roop/processors/frame/face_enhancer.py b/spaces/Apex-X/Tm/roop/processors/frame/face_enhancer.py deleted file mode 100644 index 3ff92ce9d38420e273970c0777a108b14e7fd26b..0000000000000000000000000000000000000000 --- a/spaces/Apex-X/Tm/roop/processors/frame/face_enhancer.py +++ /dev/null @@ -1,81 +0,0 @@ -from typing import Any, List, Callable -import cv2 -import threading -import gfpgan - -import roop.globals -import roop.processors.frame.core -from roop.core import update_status -from roop.face_analyser import get_one_face -from roop.typing import Frame, Face -from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video - -FACE_ENHANCER = None -THREAD_SEMAPHORE = threading.Semaphore() -THREAD_LOCK = threading.Lock() -NAME = 'ROOP.FACE-ENHANCER' - - -def get_face_enhancer() -> Any: - global FACE_ENHANCER - - with THREAD_LOCK: - if FACE_ENHANCER is None: - model_path = resolve_relative_path('../models/GFPGANv1.4.pth') - # todo: set models path https://github.com/TencentARC/GFPGAN/issues/399 - FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1) # type: ignore[attr-defined] - return FACE_ENHANCER - - -def pre_check() -> bool: - download_directory_path = resolve_relative_path('../models') - conditional_download(download_directory_path, ['https://huggingface.co/henryruhs/roop/resolve/main/GFPGANv1.4.pth']) - return True - - -def pre_start() -> bool: - if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path): - update_status('Select an image or video for target path.', NAME) - return False - return True - - -def post_process() -> None: - global FACE_ENHANCER - - FACE_ENHANCER = None - - -def enhance_face(temp_frame: Frame) -> Frame: - with THREAD_SEMAPHORE: - _, _, temp_frame = get_face_enhancer().enhance( - temp_frame, - paste_back=True - ) - return temp_frame - - -def process_frame(source_face: Face, temp_frame: Frame) -> Frame: - target_face = get_one_face(temp_frame) - if target_face: - temp_frame = enhance_face(temp_frame) - return temp_frame - - -def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None: - for temp_frame_path in temp_frame_paths: - temp_frame = cv2.imread(temp_frame_path) - result = process_frame(None, temp_frame) - cv2.imwrite(temp_frame_path, result) - if update: - update() - - -def process_image(source_path: str, target_path: str, output_path: str) -> None: - target_frame = cv2.imread(target_path) - result = process_frame(None, target_frame) - cv2.imwrite(output_path, result) - - -def process_video(source_path: str, temp_frame_paths: List[str]) -> None: - roop.processors.frame.core.process_video(None, temp_frame_paths, process_frames) diff --git a/spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/app_utils.py b/spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/app_utils.py deleted file mode 100644 index 1dca9f1b020b274e6c2596cd4052bb797f59becf..0000000000000000000000000000000000000000 --- a/spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/app_utils.py +++ /dev/null @@ -1,131 +0,0 @@ -import os -import requests -import random -import _thread as thread -from uuid import uuid4 -import urllib - -import numpy as np -import skimage -from skimage.filters import gaussian -from PIL import Image - -def compress_image(image, path_original): - size = 1920, 1080 - width = 1920 - height = 1080 - - name = os.path.basename(path_original).split('.') - first_name = os.path.join(os.path.dirname(path_original), name[0] + '.jpg') - - if image.size[0] > width and image.size[1] > height: - image.thumbnail(size, Image.ANTIALIAS) - image.save(first_name, quality=85) - elif image.size[0] > width: - wpercent = (width/float(image.size[0])) - height = int((float(image.size[1])*float(wpercent))) - image = image.resize((width,height), Image.ANTIALIAS) - image.save(first_name,quality=85) - elif image.size[1] > height: - wpercent = (height/float(image.size[1])) - width = int((float(image.size[0])*float(wpercent))) - image = image.resize((width,height), Image.ANTIALIAS) - image.save(first_name, quality=85) - else: - image.save(first_name, quality=85) - - -def convertToJPG(path_original): - img = Image.open(path_original) - name = os.path.basename(path_original).split('.') - first_name = os.path.join(os.path.dirname(path_original), name[0] + '.jpg') - - if img.format == "JPEG": - image = img.convert('RGB') - compress_image(image, path_original) - img.close() - - elif img.format == "GIF": - i = img.convert("RGBA") - bg = Image.new("RGBA", i.size) - image = Image.composite(i, bg, i) - compress_image(image, path_original) - img.close() - - elif img.format == "PNG": - try: - image = Image.new("RGB", img.size, (255,255,255)) - image.paste(img,img) - compress_image(image, path_original) - except ValueError: - image = img.convert('RGB') - compress_image(image, path_original) - - img.close() - - elif img.format == "BMP": - image = img.convert('RGB') - compress_image(image, path_original) - img.close() - - - -def blur(image, x0, x1, y0, y1, sigma=1, multichannel=True): - y0, y1 = min(y0, y1), max(y0, y1) - x0, x1 = min(x0, x1), max(x0, x1) - im = image.copy() - sub_im = im[y0:y1,x0:x1].copy() - blur_sub_im = gaussian(sub_im, sigma=sigma, multichannel=multichannel) - blur_sub_im = np.round(255 * blur_sub_im) - im[y0:y1,x0:x1] = blur_sub_im - return im - - - -def download(url, filename): - data = requests.get(url).content - with open(filename, 'wb') as handler: - handler.write(data) - - return filename - - -def generate_random_filename(upload_directory, extension): - filename = str(uuid4()) - filename = os.path.join(upload_directory, filename + "." + extension) - return filename - - -def clean_me(filename): - if os.path.exists(filename): - os.remove(filename) - - -def clean_all(files): - for me in files: - clean_me(me) - - -def create_directory(path): - os.makedirs(os.path.dirname(path), exist_ok=True) - - -def get_model_bin(url, output_path): - # print('Getting model dir: ', output_path) - if not os.path.exists(output_path): - create_directory(output_path) - - urllib.request.urlretrieve(url, output_path) - - # cmd = "wget -O %s %s" % (output_path, url) - # print(cmd) - # os.system(cmd) - - return output_path - - -#model_list = [(url, output_path), (url, output_path)] -def get_multi_model_bin(model_list): - for m in model_list: - thread.start_new_thread(get_model_bin, m) - diff --git a/spaces/Arnx/MusicGenXvAKN/audiocraft/data/audio.py b/spaces/Arnx/MusicGenXvAKN/audiocraft/data/audio.py deleted file mode 100644 index 2048df6f175d7303bcf5c7b931922fd297908ead..0000000000000000000000000000000000000000 --- a/spaces/Arnx/MusicGenXvAKN/audiocraft/data/audio.py +++ /dev/null @@ -1,215 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Audio IO methods are defined in this module (info, read, write), -We rely on av library for faster read when possible, otherwise on torchaudio. -""" - -from dataclasses import dataclass -from pathlib import Path -import logging -import typing as tp - -import numpy as np -import soundfile -import torch -from torch.nn import functional as F -import torchaudio as ta - -import av - -from .audio_utils import f32_pcm, i16_pcm, normalize_audio - - -_av_initialized = False - - -def _init_av(): - global _av_initialized - if _av_initialized: - return - logger = logging.getLogger('libav.mp3') - logger.setLevel(logging.ERROR) - _av_initialized = True - - -@dataclass(frozen=True) -class AudioFileInfo: - sample_rate: int - duration: float - channels: int - - -def _av_info(filepath: tp.Union[str, Path]) -> AudioFileInfo: - _init_av() - with av.open(str(filepath)) as af: - stream = af.streams.audio[0] - sample_rate = stream.codec_context.sample_rate - duration = float(stream.duration * stream.time_base) - channels = stream.channels - return AudioFileInfo(sample_rate, duration, channels) - - -def _soundfile_info(filepath: tp.Union[str, Path]) -> AudioFileInfo: - info = soundfile.info(filepath) - return AudioFileInfo(info.samplerate, info.duration, info.channels) - - -def audio_info(filepath: tp.Union[str, Path]) -> AudioFileInfo: - # torchaudio no longer returns useful duration informations for some formats like mp3s. - filepath = Path(filepath) - if filepath.suffix in ['.flac', '.ogg']: # TODO: Validate .ogg can be safely read with av_info - # ffmpeg has some weird issue with flac. - return _soundfile_info(filepath) - else: - return _av_info(filepath) - - -def _av_read(filepath: tp.Union[str, Path], seek_time: float = 0, duration: float = -1.) -> tp.Tuple[torch.Tensor, int]: - """FFMPEG-based audio file reading using PyAV bindings. - Soundfile cannot read mp3 and av_read is more efficient than torchaudio. - - Args: - filepath (str or Path): Path to audio file to read. - seek_time (float): Time at which to start reading in the file. - duration (float): Duration to read from the file. If set to -1, the whole file is read. - Returns: - Tuple[torch.Tensor, int]: Tuple containing audio data and sample rate - """ - _init_av() - with av.open(str(filepath)) as af: - stream = af.streams.audio[0] - sr = stream.codec_context.sample_rate - num_frames = int(sr * duration) if duration >= 0 else -1 - frame_offset = int(sr * seek_time) - # we need a small negative offset otherwise we get some edge artifact - # from the mp3 decoder. - af.seek(int(max(0, (seek_time - 0.1)) / stream.time_base), stream=stream) - frames = [] - length = 0 - for frame in af.decode(streams=stream.index): - current_offset = int(frame.rate * frame.pts * frame.time_base) - strip = max(0, frame_offset - current_offset) - buf = torch.from_numpy(frame.to_ndarray()) - if buf.shape[0] != stream.channels: - buf = buf.view(-1, stream.channels).t() - buf = buf[:, strip:] - frames.append(buf) - length += buf.shape[1] - if num_frames > 0 and length >= num_frames: - break - assert frames - # If the above assert fails, it is likely because we seeked past the end of file point, - # in which case ffmpeg returns a single frame with only zeros, and a weird timestamp. - # This will need proper debugging, in due time. - wav = torch.cat(frames, dim=1) - assert wav.shape[0] == stream.channels - if num_frames > 0: - wav = wav[:, :num_frames] - return f32_pcm(wav), sr - - -def audio_read(filepath: tp.Union[str, Path], seek_time: float = 0., - duration: float = -1., pad: bool = False) -> tp.Tuple[torch.Tensor, int]: - """Read audio by picking the most appropriate backend tool based on the audio format. - - Args: - filepath (str or Path): Path to audio file to read. - seek_time (float): Time at which to start reading in the file. - duration (float): Duration to read from the file. If set to -1, the whole file is read. - pad (bool): Pad output audio if not reaching expected duration. - Returns: - Tuple[torch.Tensor, int]: Tuple containing audio data and sample rate. - """ - fp = Path(filepath) - if fp.suffix in ['.flac', '.ogg']: # TODO: check if we can safely use av_read for .ogg - # There is some bug with ffmpeg and reading flac - info = _soundfile_info(filepath) - frames = -1 if duration <= 0 else int(duration * info.sample_rate) - frame_offset = int(seek_time * info.sample_rate) - wav, sr = soundfile.read(filepath, start=frame_offset, frames=frames, dtype=np.float32) - assert info.sample_rate == sr, f"Mismatch of sample rates {info.sample_rate} {sr}" - wav = torch.from_numpy(wav).t().contiguous() - if len(wav.shape) == 1: - wav = torch.unsqueeze(wav, 0) - elif ( - fp.suffix in ['.wav', '.mp3'] and fp.suffix[1:] in ta.utils.sox_utils.list_read_formats() - and duration <= 0 and seek_time == 0 - ): - # Torchaudio is faster if we load an entire file at once. - wav, sr = ta.load(fp) - else: - wav, sr = _av_read(filepath, seek_time, duration) - if pad and duration > 0: - expected_frames = int(duration * sr) - wav = F.pad(wav, (0, expected_frames - wav.shape[-1])) - return wav, sr - - -def audio_write(stem_name: tp.Union[str, Path], - wav: torch.Tensor, sample_rate: int, - format: str = 'wav', mp3_rate: int = 320, normalize: bool = True, - strategy: str = 'peak', peak_clip_headroom_db: float = 1, - rms_headroom_db: float = 18, loudness_headroom_db: float = 14, - loudness_compressor: bool = False, - log_clipping: bool = True, make_parent_dir: bool = True, - add_suffix: bool = True) -> Path: - """Convenience function for saving audio to disk. Returns the filename the audio was written to. - - Args: - stem_name (str or Path): Filename without extension which will be added automatically. - format (str): Either "wav" or "mp3". - mp3_rate (int): kbps when using mp3s. - normalize (bool): if `True` (default), normalizes according to the prescribed - strategy (see after). If `False`, the strategy is only used in case clipping - would happen. - strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak', - i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square - with extra headroom to avoid clipping. 'clip' just clips. - peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy. - rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger - than the `peak_clip` one to avoid further clipping. - loudness_headroom_db (float): Target loudness for loudness normalization. - loudness_compressor (bool): Uses tanh for soft clipping when strategy is 'loudness'. - when strategy is 'loudness'log_clipping (bool): If True, basic logging on stderr when clipping still - occurs despite strategy (only for 'rms'). - make_parent_dir (bool): Make parent directory if it doesn't exist. - Returns: - Path: Path of the saved audio. - """ - assert wav.dtype.is_floating_point, "wav is not floating point" - if wav.dim() == 1: - wav = wav[None] - elif wav.dim() > 2: - raise ValueError("Input wav should be at most 2 dimension.") - assert wav.isfinite().all() - wav = normalize_audio(wav, normalize, strategy, peak_clip_headroom_db, - rms_headroom_db, loudness_headroom_db, log_clipping=log_clipping, - sample_rate=sample_rate, stem_name=str(stem_name)) - kwargs: dict = {} - if format == 'mp3': - suffix = '.mp3' - kwargs.update({"compression": mp3_rate}) - elif format == 'wav': - wav = i16_pcm(wav) - suffix = '.wav' - kwargs.update({"encoding": "PCM_S", "bits_per_sample": 16}) - else: - raise RuntimeError(f"Invalid format {format}. Only wav or mp3 are supported.") - if not add_suffix: - suffix = '' - path = Path(str(stem_name) + suffix) - if make_parent_dir: - path.parent.mkdir(exist_ok=True, parents=True) - try: - ta.save(path, wav, sample_rate, **kwargs) - except Exception: - if path.exists(): - # we do not want to leave half written files around. - path.unlink() - raise - return path diff --git a/spaces/Audio-AGI/AudioSep/models/base.py b/spaces/Audio-AGI/AudioSep/models/base.py deleted file mode 100644 index 6b70dd804dcf9b9cf3a9aacd84c707852bab2d7c..0000000000000000000000000000000000000000 --- a/spaces/Audio-AGI/AudioSep/models/base.py +++ /dev/null @@ -1,152 +0,0 @@ -import torch.nn as nn -import torch -import numpy as np -import torch.nn.functional as F -import math -from torchlibrosa.stft import magphase - - -def init_layer(layer): - """Initialize a Linear or Convolutional layer. """ - nn.init.xavier_uniform_(layer.weight) - - if hasattr(layer, "bias"): - if layer.bias is not None: - layer.bias.data.fill_(0.0) - - -def init_bn(bn): - """Initialize a Batchnorm layer. """ - bn.bias.data.fill_(0.0) - bn.weight.data.fill_(1.0) - - -def init_embedding(layer): - """Initialize a Linear or Convolutional layer. """ - nn.init.uniform_(layer.weight, -1., 1.) - - if hasattr(layer, 'bias'): - if layer.bias is not None: - layer.bias.data.fill_(0.) - - -def init_gru(rnn): - """Initialize a GRU layer. """ - - def _concat_init(tensor, init_funcs): - (length, fan_out) = tensor.shape - fan_in = length // len(init_funcs) - - for (i, init_func) in enumerate(init_funcs): - init_func(tensor[i * fan_in : (i + 1) * fan_in, :]) - - def _inner_uniform(tensor): - fan_in = nn.init._calculate_correct_fan(tensor, "fan_in") - nn.init.uniform_(tensor, -math.sqrt(3 / fan_in), math.sqrt(3 / fan_in)) - - for i in range(rnn.num_layers): - _concat_init( - getattr(rnn, "weight_ih_l{}".format(i)), - [_inner_uniform, _inner_uniform, _inner_uniform], - ) - torch.nn.init.constant_(getattr(rnn, "bias_ih_l{}".format(i)), 0) - - _concat_init( - getattr(rnn, "weight_hh_l{}".format(i)), - [_inner_uniform, _inner_uniform, nn.init.orthogonal_], - ) - torch.nn.init.constant_(getattr(rnn, "bias_hh_l{}".format(i)), 0) - - -def act(x, activation): - if activation == "relu": - return F.relu_(x) - - elif activation == "leaky_relu": - return F.leaky_relu_(x, negative_slope=0.01) - - elif activation == "swish": - return x * torch.sigmoid(x) - - else: - raise Exception("Incorrect activation!") - - -class Base: - def __init__(self): - pass - - def spectrogram(self, input, eps=0.): - (real, imag) = self.stft(input) - return torch.clamp(real ** 2 + imag ** 2, eps, np.inf) ** 0.5 - - def spectrogram_phase(self, input, eps=0.): - (real, imag) = self.stft(input) - mag = torch.clamp(real ** 2 + imag ** 2, eps, np.inf) ** 0.5 - cos = real / mag - sin = imag / mag - return mag, cos, sin - - - def wav_to_spectrogram_phase(self, input, eps=1e-10): - """Waveform to spectrogram. - - Args: - input: (batch_size, segment_samples, channels_num) - - Outputs: - output: (batch_size, channels_num, time_steps, freq_bins) - """ - sp_list = [] - cos_list = [] - sin_list = [] - channels_num = input.shape[1] - for channel in range(channels_num): - mag, cos, sin = self.spectrogram_phase(input[:, channel, :], eps=eps) - sp_list.append(mag) - cos_list.append(cos) - sin_list.append(sin) - - sps = torch.cat(sp_list, dim=1) - coss = torch.cat(cos_list, dim=1) - sins = torch.cat(sin_list, dim=1) - return sps, coss, sins - - def wav_to_spectrogram(self, input, eps=0.): - """Waveform to spectrogram. - - Args: - input: (batch_size, segment_samples, channels_num) - - Outputs: - output: (batch_size, channels_num, time_steps, freq_bins) - """ - sp_list = [] - channels_num = input.shape[1] - for channel in range(channels_num): - sp_list.append(self.spectrogram(input[:, channel, :], eps=eps)) - - output = torch.cat(sp_list, dim=1) - return output - - - def spectrogram_to_wav(self, input, spectrogram, length=None): - """Spectrogram to waveform. - - Args: - input: (batch_size, segment_samples, channels_num) - spectrogram: (batch_size, channels_num, time_steps, freq_bins) - - Outputs: - output: (batch_size, segment_samples, channels_num) - """ - channels_num = input.shape[1] - wav_list = [] - for channel in range(channels_num): - (real, imag) = self.stft(input[:, channel, :]) - (_, cos, sin) = magphase(real, imag) - wav_list.append(self.istft(spectrogram[:, channel : channel + 1, :, :] * cos, - spectrogram[:, channel : channel + 1, :, :] * sin, length)) - - output = torch.stack(wav_list, dim=1) - return output diff --git a/spaces/AvaterClasher/Food_Classifier_Refined_MONI/model.py b/spaces/AvaterClasher/Food_Classifier_Refined_MONI/model.py deleted file mode 100644 index 2060a8a6ae4f6692cc634c067a876cb1daea285b..0000000000000000000000000000000000000000 --- a/spaces/AvaterClasher/Food_Classifier_Refined_MONI/model.py +++ /dev/null @@ -1,24 +0,0 @@ -import torch -import torchvision - -from torch import nn - -def create_effnetb2_model(num_classes:int=3, # default output classes = 3 (pizza, steak, sushi) - seed:int=42): - # 1, 2, 3 Create EffNetB2 pretrained weights, transforms and model - weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT - transforms = weights.transforms() - model = torchvision.models.efficientnet_b2(weights=weights) - - # 4. Freeze all layers in the base model - for param in model.parameters(): - param.requires_grad = False - - # 5. Change classifier head with random seed for reproducibility - torch.manual_seed(seed) - model.classifier = nn.Sequential( - nn.Dropout(p=0.3, inplace=True), - nn.Linear(in_features=1408, out_features=num_classes) - ) - - return model, transforms diff --git a/spaces/Awesimo/jojogan/e4e/models/encoders/model_irse.py b/spaces/Awesimo/jojogan/e4e/models/encoders/model_irse.py deleted file mode 100644 index 6a94d67542f961ff6533f0335cf4cb0fa54024fb..0000000000000000000000000000000000000000 --- a/spaces/Awesimo/jojogan/e4e/models/encoders/model_irse.py +++ /dev/null @@ -1,84 +0,0 @@ -from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module -from e4e.models.encoders.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm - -""" -Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) -""" - - -class Backbone(Module): - def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True): - super(Backbone, self).__init__() - assert input_size in [112, 224], "input_size should be 112 or 224" - assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152" - assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se" - blocks = get_blocks(num_layers) - if mode == 'ir': - unit_module = bottleneck_IR - elif mode == 'ir_se': - unit_module = bottleneck_IR_SE - self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), - BatchNorm2d(64), - PReLU(64)) - if input_size == 112: - self.output_layer = Sequential(BatchNorm2d(512), - Dropout(drop_ratio), - Flatten(), - Linear(512 * 7 * 7, 512), - BatchNorm1d(512, affine=affine)) - else: - self.output_layer = Sequential(BatchNorm2d(512), - Dropout(drop_ratio), - Flatten(), - Linear(512 * 14 * 14, 512), - BatchNorm1d(512, affine=affine)) - - modules = [] - for block in blocks: - for bottleneck in block: - modules.append(unit_module(bottleneck.in_channel, - bottleneck.depth, - bottleneck.stride)) - self.body = Sequential(*modules) - - def forward(self, x): - x = self.input_layer(x) - x = self.body(x) - x = self.output_layer(x) - return l2_norm(x) - - -def IR_50(input_size): - """Constructs a ir-50 model.""" - model = Backbone(input_size, num_layers=50, mode='ir', drop_ratio=0.4, affine=False) - return model - - -def IR_101(input_size): - """Constructs a ir-101 model.""" - model = Backbone(input_size, num_layers=100, mode='ir', drop_ratio=0.4, affine=False) - return model - - -def IR_152(input_size): - """Constructs a ir-152 model.""" - model = Backbone(input_size, num_layers=152, mode='ir', drop_ratio=0.4, affine=False) - return model - - -def IR_SE_50(input_size): - """Constructs a ir_se-50 model.""" - model = Backbone(input_size, num_layers=50, mode='ir_se', drop_ratio=0.4, affine=False) - return model - - -def IR_SE_101(input_size): - """Constructs a ir_se-101 model.""" - model = Backbone(input_size, num_layers=100, mode='ir_se', drop_ratio=0.4, affine=False) - return model - - -def IR_SE_152(input_size): - """Constructs a ir_se-152 model.""" - model = Backbone(input_size, num_layers=152, mode='ir_se', drop_ratio=0.4, affine=False) - return model diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/transforms/augmentation_impl.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/transforms/augmentation_impl.py deleted file mode 100644 index 652a34a9aef2d4004f46ad7814befe6d1c230bc4..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/transforms/augmentation_impl.py +++ /dev/null @@ -1,614 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. -""" -Implement many useful :class:`Augmentation`. -""" -import numpy as np -import sys -from typing import Tuple -import torch -from fvcore.transforms.transform import ( - BlendTransform, - CropTransform, - HFlipTransform, - NoOpTransform, - PadTransform, - Transform, - TransformList, - VFlipTransform, -) -from PIL import Image - -from .augmentation import Augmentation, _transform_to_aug -from .transform import ExtentTransform, ResizeTransform, RotationTransform - -__all__ = [ - "FixedSizeCrop", - "RandomApply", - "RandomBrightness", - "RandomContrast", - "RandomCrop", - "RandomExtent", - "RandomFlip", - "RandomSaturation", - "RandomLighting", - "RandomRotation", - "Resize", - "ResizeScale", - "ResizeShortestEdge", - "RandomCrop_CategoryAreaConstraint", -] - - -class RandomApply(Augmentation): - """ - Randomly apply an augmentation with a given probability. - """ - - def __init__(self, tfm_or_aug, prob=0.5): - """ - Args: - tfm_or_aug (Transform, Augmentation): the transform or augmentation - to be applied. It can either be a `Transform` or `Augmentation` - instance. - prob (float): probability between 0.0 and 1.0 that - the wrapper transformation is applied - """ - super().__init__() - self.aug = _transform_to_aug(tfm_or_aug) - assert 0.0 <= prob <= 1.0, f"Probablity must be between 0.0 and 1.0 (given: {prob})" - self.prob = prob - - def get_transform(self, *args): - do = self._rand_range() < self.prob - if do: - return self.aug.get_transform(*args) - else: - return NoOpTransform() - - def __call__(self, aug_input): - do = self._rand_range() < self.prob - if do: - return self.aug(aug_input) - else: - return NoOpTransform() - - -class RandomFlip(Augmentation): - """ - Flip the image horizontally or vertically with the given probability. - """ - - def __init__(self, prob=0.5, *, horizontal=True, vertical=False): - """ - Args: - prob (float): probability of flip. - horizontal (boolean): whether to apply horizontal flipping - vertical (boolean): whether to apply vertical flipping - """ - super().__init__() - - if horizontal and vertical: - raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.") - if not horizontal and not vertical: - raise ValueError("At least one of horiz or vert has to be True!") - self._init(locals()) - - def get_transform(self, image): - h, w = image.shape[:2] - do = self._rand_range() < self.prob - if do: - if self.horizontal: - return HFlipTransform(w) - elif self.vertical: - return VFlipTransform(h) - else: - return NoOpTransform() - - -class Resize(Augmentation): - """Resize image to a fixed target size""" - - def __init__(self, shape, interp=Image.BILINEAR): - """ - Args: - shape: (h, w) tuple or a int - interp: PIL interpolation method - """ - if isinstance(shape, int): - shape = (shape, shape) - shape = tuple(shape) - self._init(locals()) - - def get_transform(self, image): - return ResizeTransform( - image.shape[0], image.shape[1], self.shape[0], self.shape[1], self.interp - ) - - -class ResizeShortestEdge(Augmentation): - """ - Resize the image while keeping the aspect ratio unchanged. - It attempts to scale the shorter edge to the given `short_edge_length`, - as long as the longer edge does not exceed `max_size`. - If `max_size` is reached, then downscale so that the longer edge does not exceed max_size. - """ - - @torch.jit.unused - def __init__( - self, short_edge_length, max_size=sys.maxsize, sample_style="range", interp=Image.BILINEAR - ): - """ - Args: - short_edge_length (list[int]): If ``sample_style=="range"``, - a [min, max] interval from which to sample the shortest edge length. - If ``sample_style=="choice"``, a list of shortest edge lengths to sample from. - max_size (int): maximum allowed longest edge length. - sample_style (str): either "range" or "choice". - """ - super().__init__() - assert sample_style in ["range", "choice"], sample_style - - self.is_range = sample_style == "range" - if isinstance(short_edge_length, int): - short_edge_length = (short_edge_length, short_edge_length) - if self.is_range: - assert len(short_edge_length) == 2, ( - "short_edge_length must be two values using 'range' sample style." - f" Got {short_edge_length}!" - ) - self._init(locals()) - - @torch.jit.unused - def get_transform(self, image): - h, w = image.shape[:2] - if self.is_range: - size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1) - else: - size = np.random.choice(self.short_edge_length) - if size == 0: - return NoOpTransform() - - newh, neww = ResizeShortestEdge.get_output_shape(h, w, size, self.max_size) - return ResizeTransform(h, w, newh, neww, self.interp) - - @staticmethod - def get_output_shape( - oldh: int, oldw: int, short_edge_length: int, max_size: int - ) -> Tuple[int, int]: - """ - Compute the output size given input size and target short edge length. - """ - h, w = oldh, oldw - size = short_edge_length * 1.0 - scale = size / min(h, w) - if h < w: - newh, neww = size, scale * w - else: - newh, neww = scale * h, size - if max(newh, neww) > max_size: - scale = max_size * 1.0 / max(newh, neww) - newh = newh * scale - neww = neww * scale - neww = int(neww + 0.5) - newh = int(newh + 0.5) - return (newh, neww) - - -class ResizeScale(Augmentation): - """ - Takes target size as input and randomly scales the given target size between `min_scale` - and `max_scale`. It then scales the input image such that it fits inside the scaled target - box, keeping the aspect ratio constant. - This implements the resize part of the Google's 'resize_and_crop' data augmentation: - https://github.com/tensorflow/tpu/blob/master/models/official/detection/utils/input_utils.py#L127 - """ - - def __init__( - self, - min_scale: float, - max_scale: float, - target_height: int, - target_width: int, - interp: int = Image.BILINEAR, - ): - """ - Args: - min_scale: minimum image scale range. - max_scale: maximum image scale range. - target_height: target image height. - target_width: target image width. - interp: image interpolation method. - """ - super().__init__() - self._init(locals()) - - def _get_resize(self, image: np.ndarray, scale: float) -> Transform: - input_size = image.shape[:2] - - # Compute new target size given a scale. - target_size = (self.target_height, self.target_width) - target_scale_size = np.multiply(target_size, scale) - - # Compute actual rescaling applied to input image and output size. - output_scale = np.minimum( - target_scale_size[0] / input_size[0], target_scale_size[1] / input_size[1] - ) - output_size = np.round(np.multiply(input_size, output_scale)).astype(int) - - return ResizeTransform( - input_size[0], input_size[1], output_size[0], output_size[1], self.interp - ) - - def get_transform(self, image: np.ndarray) -> Transform: - random_scale = np.random.uniform(self.min_scale, self.max_scale) - return self._get_resize(image, random_scale) - - -class RandomRotation(Augmentation): - """ - This method returns a copy of this image, rotated the given - number of degrees counter clockwise around the given center. - """ - - def __init__(self, angle, expand=True, center=None, sample_style="range", interp=None): - """ - Args: - angle (list[float]): If ``sample_style=="range"``, - a [min, max] interval from which to sample the angle (in degrees). - If ``sample_style=="choice"``, a list of angles to sample from - expand (bool): choose if the image should be resized to fit the whole - rotated image (default), or simply cropped - center (list[[float, float]]): If ``sample_style=="range"``, - a [[minx, miny], [maxx, maxy]] relative interval from which to sample the center, - [0, 0] being the top left of the image and [1, 1] the bottom right. - If ``sample_style=="choice"``, a list of centers to sample from - Default: None, which means that the center of rotation is the center of the image - center has no effect if expand=True because it only affects shifting - """ - super().__init__() - assert sample_style in ["range", "choice"], sample_style - self.is_range = sample_style == "range" - if isinstance(angle, (float, int)): - angle = (angle, angle) - if center is not None and isinstance(center[0], (float, int)): - center = (center, center) - self._init(locals()) - - def get_transform(self, image): - h, w = image.shape[:2] - center = None - if self.is_range: - angle = np.random.uniform(self.angle[0], self.angle[1]) - if self.center is not None: - center = ( - np.random.uniform(self.center[0][0], self.center[1][0]), - np.random.uniform(self.center[0][1], self.center[1][1]), - ) - else: - angle = np.random.choice(self.angle) - if self.center is not None: - center = np.random.choice(self.center) - - if center is not None: - center = (w * center[0], h * center[1]) # Convert to absolute coordinates - - if angle % 360 == 0: - return NoOpTransform() - - return RotationTransform(h, w, angle, expand=self.expand, center=center, interp=self.interp) - - -class FixedSizeCrop(Augmentation): - """ - If `crop_size` is smaller than the input image size, then it uses a random crop of - the crop size. If `crop_size` is larger than the input image size, then it pads - the right and the bottom of the image to the crop size if `pad` is True, otherwise - it returns the smaller image. - """ - - def __init__(self, crop_size: Tuple[int], pad: bool = True, pad_value: float = 128.0): - """ - Args: - crop_size: target image (height, width). - pad: if True, will pad images smaller than `crop_size` up to `crop_size` - pad_value: the padding value. - """ - super().__init__() - self._init(locals()) - - def _get_crop(self, image: np.ndarray) -> Transform: - # Compute the image scale and scaled size. - input_size = image.shape[:2] - output_size = self.crop_size - - # Add random crop if the image is scaled up. - max_offset = np.subtract(input_size, output_size) - max_offset = np.maximum(max_offset, 0) - offset = np.multiply(max_offset, np.random.uniform(0.0, 1.0)) - offset = np.round(offset).astype(int) - return CropTransform( - offset[1], offset[0], output_size[1], output_size[0], input_size[1], input_size[0] - ) - - def _get_pad(self, image: np.ndarray) -> Transform: - # Compute the image scale and scaled size. - input_size = image.shape[:2] - output_size = self.crop_size - - # Add padding if the image is scaled down. - pad_size = np.subtract(output_size, input_size) - pad_size = np.maximum(pad_size, 0) - original_size = np.minimum(input_size, output_size) - return PadTransform( - 0, 0, pad_size[1], pad_size[0], original_size[1], original_size[0], self.pad_value - ) - - def get_transform(self, image: np.ndarray) -> TransformList: - transforms = [self._get_crop(image)] - if self.pad: - transforms.append(self._get_pad(image)) - return TransformList(transforms) - - -class RandomCrop(Augmentation): - """ - Randomly crop a rectangle region out of an image. - """ - - def __init__(self, crop_type: str, crop_size): - """ - Args: - crop_type (str): one of "relative_range", "relative", "absolute", "absolute_range". - crop_size (tuple[float, float]): two floats, explained below. - - - "relative": crop a (H * crop_size[0], W * crop_size[1]) region from an input image of - size (H, W). crop size should be in (0, 1] - - "relative_range": uniformly sample two values from [crop_size[0], 1] - and [crop_size[1]], 1], and use them as in "relative" crop type. - - "absolute" crop a (crop_size[0], crop_size[1]) region from input image. - crop_size must be smaller than the input image size. - - "absolute_range", for an input of size (H, W), uniformly sample H_crop in - [crop_size[0], min(H, crop_size[1])] and W_crop in [crop_size[0], min(W, crop_size[1])]. - Then crop a region (H_crop, W_crop). - """ - # TODO style of relative_range and absolute_range are not consistent: - # one takes (h, w) but another takes (min, max) - super().__init__() - assert crop_type in ["relative_range", "relative", "absolute", "absolute_range"] - self._init(locals()) - - def get_transform(self, image): - h, w = image.shape[:2] - croph, cropw = self.get_crop_size((h, w)) - assert h >= croph and w >= cropw, "Shape computation in {} has bugs.".format(self) - h0 = np.random.randint(h - croph + 1) - w0 = np.random.randint(w - cropw + 1) - return CropTransform(w0, h0, cropw, croph) - - def get_crop_size(self, image_size): - """ - Args: - image_size (tuple): height, width - - Returns: - crop_size (tuple): height, width in absolute pixels - """ - h, w = image_size - if self.crop_type == "relative": - ch, cw = self.crop_size - return int(h * ch + 0.5), int(w * cw + 0.5) - elif self.crop_type == "relative_range": - crop_size = np.asarray(self.crop_size, dtype=np.float32) - ch, cw = crop_size + np.random.rand(2) * (1 - crop_size) - return int(h * ch + 0.5), int(w * cw + 0.5) - elif self.crop_type == "absolute": - return (min(self.crop_size[0], h), min(self.crop_size[1], w)) - elif self.crop_type == "absolute_range": - assert self.crop_size[0] <= self.crop_size[1] - ch = np.random.randint(min(h, self.crop_size[0]), min(h, self.crop_size[1]) + 1) - cw = np.random.randint(min(w, self.crop_size[0]), min(w, self.crop_size[1]) + 1) - return ch, cw - else: - raise NotImplementedError("Unknown crop type {}".format(self.crop_type)) - - -class RandomCrop_CategoryAreaConstraint(Augmentation): - """ - Similar to :class:`RandomCrop`, but find a cropping window such that no single category - occupies a ratio of more than `single_category_max_area` in semantic segmentation ground - truth, which can cause unstability in training. The function attempts to find such a valid - cropping window for at most 10 times. - """ - - def __init__( - self, - crop_type: str, - crop_size, - single_category_max_area: float = 1.0, - ignored_category: int = None, - ): - """ - Args: - crop_type, crop_size: same as in :class:`RandomCrop` - single_category_max_area: the maximum allowed area ratio of a - category. Set to 1.0 to disable - ignored_category: allow this category in the semantic segmentation - ground truth to exceed the area ratio. Usually set to the category - that's ignored in training. - """ - self.crop_aug = RandomCrop(crop_type, crop_size) - self._init(locals()) - - def get_transform(self, image, sem_seg): - if self.single_category_max_area >= 1.0: - return self.crop_aug.get_transform(image) - else: - h, w = sem_seg.shape - for _ in range(10): - crop_size = self.crop_aug.get_crop_size((h, w)) - y0 = np.random.randint(h - crop_size[0] + 1) - x0 = np.random.randint(w - crop_size[1] + 1) - sem_seg_temp = sem_seg[y0 : y0 + crop_size[0], x0 : x0 + crop_size[1]] - labels, cnt = np.unique(sem_seg_temp, return_counts=True) - if self.ignored_category is not None: - cnt = cnt[labels != self.ignored_category] - if len(cnt) > 1 and np.max(cnt) < np.sum(cnt) * self.single_category_max_area: - break - crop_tfm = CropTransform(x0, y0, crop_size[1], crop_size[0]) - return crop_tfm - - -class RandomExtent(Augmentation): - """ - Outputs an image by cropping a random "subrect" of the source image. - - The subrect can be parameterized to include pixels outside the source image, - in which case they will be set to zeros (i.e. black). The size of the output - image will vary with the size of the random subrect. - """ - - def __init__(self, scale_range, shift_range): - """ - Args: - output_size (h, w): Dimensions of output image - scale_range (l, h): Range of input-to-output size scaling factor - shift_range (x, y): Range of shifts of the cropped subrect. The rect - is shifted by [w / 2 * Uniform(-x, x), h / 2 * Uniform(-y, y)], - where (w, h) is the (width, height) of the input image. Set each - component to zero to crop at the image's center. - """ - super().__init__() - self._init(locals()) - - def get_transform(self, image): - img_h, img_w = image.shape[:2] - - # Initialize src_rect to fit the input image. - src_rect = np.array([-0.5 * img_w, -0.5 * img_h, 0.5 * img_w, 0.5 * img_h]) - - # Apply a random scaling to the src_rect. - src_rect *= np.random.uniform(self.scale_range[0], self.scale_range[1]) - - # Apply a random shift to the coordinates origin. - src_rect[0::2] += self.shift_range[0] * img_w * (np.random.rand() - 0.5) - src_rect[1::2] += self.shift_range[1] * img_h * (np.random.rand() - 0.5) - - # Map src_rect coordinates into image coordinates (center at corner). - src_rect[0::2] += 0.5 * img_w - src_rect[1::2] += 0.5 * img_h - - return ExtentTransform( - src_rect=(src_rect[0], src_rect[1], src_rect[2], src_rect[3]), - output_size=(int(src_rect[3] - src_rect[1]), int(src_rect[2] - src_rect[0])), - ) - - -class RandomContrast(Augmentation): - """ - Randomly transforms image contrast. - - Contrast intensity is uniformly sampled in (intensity_min, intensity_max). - - intensity < 1 will reduce contrast - - intensity = 1 will preserve the input image - - intensity > 1 will increase contrast - - See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html - """ - - def __init__(self, intensity_min, intensity_max): - """ - Args: - intensity_min (float): Minimum augmentation - intensity_max (float): Maximum augmentation - """ - super().__init__() - self._init(locals()) - - def get_transform(self, image): - w = np.random.uniform(self.intensity_min, self.intensity_max) - return BlendTransform(src_image=image.mean(), src_weight=1 - w, dst_weight=w) - - -class RandomBrightness(Augmentation): - """ - Randomly transforms image brightness. - - Brightness intensity is uniformly sampled in (intensity_min, intensity_max). - - intensity < 1 will reduce brightness - - intensity = 1 will preserve the input image - - intensity > 1 will increase brightness - - See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html - """ - - def __init__(self, intensity_min, intensity_max): - """ - Args: - intensity_min (float): Minimum augmentation - intensity_max (float): Maximum augmentation - """ - super().__init__() - self._init(locals()) - - def get_transform(self, image): - w = np.random.uniform(self.intensity_min, self.intensity_max) - return BlendTransform(src_image=0, src_weight=1 - w, dst_weight=w) - - -class RandomSaturation(Augmentation): - """ - Randomly transforms saturation of an RGB image. - Input images are assumed to have 'RGB' channel order. - - Saturation intensity is uniformly sampled in (intensity_min, intensity_max). - - intensity < 1 will reduce saturation (make the image more grayscale) - - intensity = 1 will preserve the input image - - intensity > 1 will increase saturation - - See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html - """ - - def __init__(self, intensity_min, intensity_max): - """ - Args: - intensity_min (float): Minimum augmentation (1 preserves input). - intensity_max (float): Maximum augmentation (1 preserves input). - """ - super().__init__() - self._init(locals()) - - def get_transform(self, image): - assert image.shape[-1] == 3, "RandomSaturation only works on RGB images" - w = np.random.uniform(self.intensity_min, self.intensity_max) - grayscale = image.dot([0.299, 0.587, 0.114])[:, :, np.newaxis] - return BlendTransform(src_image=grayscale, src_weight=1 - w, dst_weight=w) - - -class RandomLighting(Augmentation): - """ - The "lighting" augmentation described in AlexNet, using fixed PCA over ImageNet. - Input images are assumed to have 'RGB' channel order. - - The degree of color jittering is randomly sampled via a normal distribution, - with standard deviation given by the scale parameter. - """ - - def __init__(self, scale): - """ - Args: - scale (float): Standard deviation of principal component weighting. - """ - super().__init__() - self._init(locals()) - self.eigen_vecs = np.array( - [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]] - ) - self.eigen_vals = np.array([0.2175, 0.0188, 0.0045]) - - def get_transform(self, image): - assert image.shape[-1] == 3, "RandomLighting only works on RGB images" - weights = np.random.normal(scale=self.scale, size=3) - return BlendTransform( - src_image=self.eigen_vecs.dot(weights * self.eigen_vals), src_weight=1.0, dst_weight=1.0 - ) diff --git a/spaces/AyushP/PolicyCompareBot/app.py b/spaces/AyushP/PolicyCompareBot/app.py deleted file mode 100644 index 5f9c66623bbc18f4991cf9d06969c5875563c903..0000000000000000000000000000000000000000 --- a/spaces/AyushP/PolicyCompareBot/app.py +++ /dev/null @@ -1,86 +0,0 @@ -import openai -import streamlit as st -import sqlite3 -from PIL import Image -import pandas as pd - -openai.api_key = "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" -# Database Connection - -conn = sqlite3.connect('bank.db') -c = conn.cursor() - -def policyCompare(): - st.title("Compare Two Policy") - - with st.container(): - st.header("Select Policy 1") - question_2 = "Select the Institution from where you want the Insurance" - options_policy1 = ["Bank of Baroda", "State Bank of India(SBI)", "HDFC Bank", "LIC"] - - st.subheader(question_2) - selected_option_policy1 = st.selectbox("Please enter your option for Policy 1:", options_policy1) - - - - c.execute('SELECT Policy_Name FROM BANK WHERE Bank_Name= "{}"'.format(selected_option_policy1)) - options_3 = c.fetchall() - - - my_options = [] - for row in options_3: - my_options.append(row[0]) - - st.subheader("Select the Policy Name") - selected_policy1 = st.selectbox("Please enter your option for Policy 1:", my_options) - - c.execute('SELECT Policy_doc FROM BANK WHERE Policy_Name = "{}"'.format(selected_policy1)) - policy_doc_link1 = c.fetchone() - - - - - with st.container(): - st.header("Select Policy 2") - question_2 = "Select the Institution from where you want the Insurance" - options_policy2 = ["Bank of Baroda", "State Bank of India(SBI)", "HDFC Bank", "LIC"] - - st.subheader(question_2) - selected_option_policy2 = st.selectbox("Please enter your option for Policy 2:", options_policy2) - - - - c.execute('SELECT Policy_Name FROM BANK WHERE Bank_Name= "{}"'.format(selected_option_policy2)) - options_3 = c.fetchall() - - # st.write(options_3) - my_options2 = [] - for row in options_3: - my_options2.append(row[0]) - - st.subheader("Select the Policy Name") - selected_policy2 = st.selectbox("Please enter your option for Policy 2:", my_options2) - - c.execute('SELECT Policy_doc FROM BANK WHERE Policy_Name = "{}"'.format(selected_policy1)) - policy_doc_link2 = c.fetchone() - - if(selected_policy2 != 0): - st.header("Comparison") - st.subheader("Policy 1 : {}".format(selected_policy1)) - st.subheader("Policy 2 : {}".format(selected_policy2)) - response = openai.Completion.create( - model="text-davinci-003", - prompt="Compare the two health insurance policy using the policy document\nPolicy 1 Document: {},\nPolicy 2 Document: {}\nStrictly show the answer in tabular format:-".format(policy_doc_link1, policy_doc_link2), - temperature=0.05, - max_tokens=300, - top_p=1, - frequency_penalty=0, - presence_penalty=0, - stop=[":-"] - ) - - compare_response = response.choices[0].text - st.write(f"Answer: {compare_response}") - -if __name__ == '__main__': - policyCompare() \ No newline at end of file diff --git a/spaces/Banbri/zcvzcv/src/lib/utils.ts b/spaces/Banbri/zcvzcv/src/lib/utils.ts deleted file mode 100644 index ec79801fe9cdd7711f6dbef26678a134c634a8be..0000000000000000000000000000000000000000 --- a/spaces/Banbri/zcvzcv/src/lib/utils.ts +++ /dev/null @@ -1,6 +0,0 @@ -import { type ClassValue, clsx } from "clsx" -import { twMerge } from "tailwind-merge" - -export function cn(...inputs: ClassValue[]) { - return twMerge(clsx(inputs)) -} diff --git a/spaces/Bart92/RVC_HF/demucs/raw.py b/spaces/Bart92/RVC_HF/demucs/raw.py deleted file mode 100644 index d4941ad2d7ed858f490db441f5b46b12bd61ad78..0000000000000000000000000000000000000000 --- a/spaces/Bart92/RVC_HF/demucs/raw.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import os -from collections import defaultdict, namedtuple -from pathlib import Path - -import musdb -import numpy as np -import torch as th -import tqdm -from torch.utils.data import DataLoader - -from .audio import AudioFile - -ChunkInfo = namedtuple("ChunkInfo", ["file_index", "offset", "local_index"]) - - -class Rawset: - """ - Dataset of raw, normalized, float32 audio files - """ - def __init__(self, path, samples=None, stride=None, channels=2, streams=None): - self.path = Path(path) - self.channels = channels - self.samples = samples - if stride is None: - stride = samples if samples is not None else 0 - self.stride = stride - entries = defaultdict(list) - for root, folders, files in os.walk(self.path, followlinks=True): - folders.sort() - files.sort() - for file in files: - if file.endswith(".raw"): - path = Path(root) / file - name, stream = path.stem.rsplit('.', 1) - entries[(path.parent.relative_to(self.path), name)].append(int(stream)) - - self._entries = list(entries.keys()) - - sizes = [] - self._lengths = [] - ref_streams = sorted(entries[self._entries[0]]) - assert ref_streams == list(range(len(ref_streams))) - if streams is None: - self.streams = ref_streams - else: - self.streams = streams - for entry in sorted(entries.keys()): - streams = entries[entry] - assert sorted(streams) == ref_streams - file = self._path(*entry) - length = file.stat().st_size // (4 * channels) - if samples is None: - sizes.append(1) - else: - if length < samples: - self._entries.remove(entry) - continue - sizes.append((length - samples) // stride + 1) - self._lengths.append(length) - if not sizes: - raise ValueError(f"Empty dataset {self.path}") - self._cumulative_sizes = np.cumsum(sizes) - self._sizes = sizes - - def __len__(self): - return self._cumulative_sizes[-1] - - @property - def total_length(self): - return sum(self._lengths) - - def chunk_info(self, index): - file_index = np.searchsorted(self._cumulative_sizes, index, side='right') - if file_index == 0: - local_index = index - else: - local_index = index - self._cumulative_sizes[file_index - 1] - return ChunkInfo(offset=local_index * self.stride, - file_index=file_index, - local_index=local_index) - - def _path(self, folder, name, stream=0): - return self.path / folder / (name + f'.{stream}.raw') - - def __getitem__(self, index): - chunk = self.chunk_info(index) - entry = self._entries[chunk.file_index] - - length = self.samples or self._lengths[chunk.file_index] - streams = [] - to_read = length * self.channels * 4 - for stream_index, stream in enumerate(self.streams): - offset = chunk.offset * 4 * self.channels - file = open(self._path(*entry, stream=stream), 'rb') - file.seek(offset) - content = file.read(to_read) - assert len(content) == to_read - content = np.frombuffer(content, dtype=np.float32) - content = content.copy() # make writable - streams.append(th.from_numpy(content).view(length, self.channels).t()) - return th.stack(streams, dim=0) - - def name(self, index): - chunk = self.chunk_info(index) - folder, name = self._entries[chunk.file_index] - return folder / name - - -class MusDBSet: - def __init__(self, mus, streams=slice(None), samplerate=44100, channels=2): - self.mus = mus - self.streams = streams - self.samplerate = samplerate - self.channels = channels - - def __len__(self): - return len(self.mus.tracks) - - def __getitem__(self, index): - track = self.mus.tracks[index] - return (track.name, AudioFile(track.path).read(channels=self.channels, - seek_time=0, - streams=self.streams, - samplerate=self.samplerate)) - - -def build_raw(mus, destination, normalize, workers, samplerate, channels): - destination.mkdir(parents=True, exist_ok=True) - loader = DataLoader(MusDBSet(mus, channels=channels, samplerate=samplerate), - batch_size=1, - num_workers=workers, - collate_fn=lambda x: x[0]) - for name, streams in tqdm.tqdm(loader): - if normalize: - ref = streams[0].mean(dim=0) # use mono mixture as reference - streams = (streams - ref.mean()) / ref.std() - for index, stream in enumerate(streams): - open(destination / (name + f'.{index}.raw'), "wb").write(stream.t().numpy().tobytes()) - - -def main(): - parser = argparse.ArgumentParser('rawset') - parser.add_argument('--workers', type=int, default=10) - parser.add_argument('--samplerate', type=int, default=44100) - parser.add_argument('--channels', type=int, default=2) - parser.add_argument('musdb', type=Path) - parser.add_argument('destination', type=Path) - - args = parser.parse_args() - - build_raw(musdb.DB(root=args.musdb, subsets=["train"], split="train"), - args.destination / "train", - normalize=True, - channels=args.channels, - samplerate=args.samplerate, - workers=args.workers) - build_raw(musdb.DB(root=args.musdb, subsets=["train"], split="valid"), - args.destination / "valid", - normalize=True, - samplerate=args.samplerate, - channels=args.channels, - workers=args.workers) - - -if __name__ == "__main__": - main() diff --git a/spaces/Benson/text-generation/Examples/9ice Kasa Final Mp3 Descargar.md b/spaces/Benson/text-generation/Examples/9ice Kasa Final Mp3 Descargar.md deleted file mode 100644 index 3231ef52490ca86fff314652bc2228889b51c176..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/9ice Kasa Final Mp3 Descargar.md +++ /dev/null @@ -1,46 +0,0 @@ -
    -

    9ice Kasa Final Mp3 Descargar: Una revisión de la canción de éxito por la leyenda de la música nigeriana

    -

    Si eres un fan de la música nigeriana, probablemente hayas oído hablar de 9ice Kasa Final, una de las canciones más populares del legendario cantante, compositor y bailarín 9ice. La canción, que fue lanzada en 2011 como parte de su álbum Versus/Bashorun Gaa, es una melodía pegadiza y cautivadora que muestra el uso poderoso de 9ice del idioma yoruba, las letras proverbiales y el estilo único de entrega. En este artículo, revisaremos la canción en detalle, explorando sus letras, significado, música, producción, recepción e impacto. También le proporcionaremos información sobre el propio 9ice, sus antecedentes, logros e influencia en la industria musical nigeriana.

    -

    9ice kasa final mp3 descargar


    Download Zip --->>> https://bltlly.com/2v6Jkw



    -

    Introducción

    -

    9ice Kasa Final es una canción que celebra el éxito y el dominio de 9ice en la escena musical, así como su confianza y resistencia en la superación de desafíos y críticos. El título de la canción se traduce a "Case Closed" o "End of Discussion" en inglés, lo que implica que 9ice no tiene nada más que demostrar o decir a cualquier persona que duda o se opone a él. La canción es también un homenaje a sus fans y partidarios que han sido leales a él a lo largo de su carrera.

    - -

    La letra y el significado de Kasa Final

    - -nombre de la etiqueta, Alapomeji, que significa "uno que tiene muchos lados o caras". También implica que 9ice es versátil y adaptable en su música y personalidad. - "Omo Bashorun Gaa" (Hijo de Bashorun Gaa): Esta es una referencia a una figura histórica en la historia Yoruba, Bashorun Gaa, que era un poderoso e influyente jefe en el antiguo Imperio oyó. Era conocido por sus tácticas astutas y despiadadas en la política y la guerra. También implica que 9ice es poderoso e influyente en la industria de la música. - "Omo Aare Ona Kakanfo" (Hijo de Aare Ona Kakanfo): Esta es una referencia a otra figura histórica en la historia yoruba, Aare Ona Kakanfo, que fue el título dado al comandante militar supremo del antiguo Imperio oyó. Era conocido por su valentía y lealtad en la defensa del imperio de los enemigos. También implica que 9ice es valiente y leal en la defensa de su música de los enemigos.

    La Música y Producción de Kasa Final

    -

    9ice Kasa Final es una canción que combina elementos e influencias musicales tradicionales y modernos para crear un estilo único de entrega. La canción cuenta con un ritmo rápido, ritmo optimista, melodía pegadiza, y voces enérgicas. La canción también incorpora varios instrumentos y sonidos, como tambores, teclados, guitarras, cuernos, flautas, agitadores, palmas, cantos, silbatos, sirenas, disparos, etc.

    - -

    9ice Kasa Final fue un gran éxito y recibió críticas positivas y comentarios de fans y críticos por igual. La canción fue una de las canciones más tocadas y descargadas en Nigeria y en toda África en 2011. La canción también tuvo un buen desempeño en varias listas, plataformas y medios de comunicación, como MTV Base, Trace TV, Soundcity, Naija FM, etc. La canción también ganó varios premios y nominaciones, como la Mejor Colaboración en los Nigeria Music Video Awards (NMVA), Mejor Canción Afro Pop en los City People Entertainment Awards (CPEA), Mejor Canción del Año en los Nigerian Entertainment Awards (NEA), etc.

    -

    La canción también contribuyó a la carrera y legado de 9ice como músico, consolidando su estatus como uno de los artistas más respetados e influyentes en Nigeria y África. La canción también mostró su versatilidad y creatividad como cantante, compositor y bailarín. La canción también inspiró a muchos otros artistas y fans a apreciar y celebrar su propia cultura e idioma, así como sus propios logros y desafíos.

    -

    -

    Conclusión

    - -

    Aquí hay algunas preguntas y respuestas frecuentes sobre la canción, 9ice, o la industria de la música nigeriana.

    - - -Pregunta -Respuesta - - -¿Qué significa Kasa en yoruba? -Kasa significa "caso" o "asunto" en yoruba. También puede significar "cerrar" o "terminar". En el contexto de la canción, significa "caso cerrado" o "fin de la discusión". - - -¿Cómo se llama la esposa de 9ice? -9ice está casado con Olasunkanmi Ajala, que es organizador de eventos y empresario. Se casaron en 2019 y tienen una hija juntos. 9ice también tiene otros tres hijos de relaciones anteriores. - - -¿Quién es el músico más rico de Nigeria? -Según Forbes, el músico más rico de Nigeria en 2021 es Wizkid, que tiene un valor neto estimado de $ 30 millones. Él es seguido por Davido, que tiene un valor neto estimado de $ 25 millones, y Burna Boy, que tiene un valor neto estimado de $ 20 millones. - - -¿Cuál es el significado de Gongo Aso? -Gongo Aso es otro éxito de 9ice, que fue lanzado en 2008. El título de la canción significa "Thunder Fire" o "Thunder Strike" en yoruba. Es una expresión de argot que puede usarse para maldecir a alguien o algo, o para expresar sorpresa o shock. - - -¿Cuáles son algunos de los premios que 9ice ha ganado? -Algunos de los premios que 9ice ha ganado incluyen: - MOBO Premio a la Mejor Ley Africana en 2008 - MTV Premio de Música de África para el Mejor Hip Hop en 2008 - El Headies Premio para Artiste del Año en 2008 - El Premio Headies para el Álbum del Año en 2008 - The Headies Award for Song of the Year en 2008 - The Headies Award for Best Vocal Performance (Male) en 2008 - The Headies Award for Best R&B/Pop Álbum en 2016 - City People Entertainment Award for Special Recognition/Hall of Fame en 2016 - Premio City People Music al Mejor Collabo del Año (Canción) en 2017 - Premio City People Music al Álbum de Rap del Año en 2017 - etc. - -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Barco Rampa De Salto Apk Mod.md b/spaces/Benson/text-generation/Examples/Barco Rampa De Salto Apk Mod.md deleted file mode 100644 index 6774307a8c7bdb9f3881c9e5c80746a6ae11499e..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Barco Rampa De Salto Apk Mod.md +++ /dev/null @@ -1,58 +0,0 @@ - -

    Juego Stickman caída Mod APK: Un divertido y loco juego de física

    -

    ¿Te gustan los juegos basados en la física que te permiten dar rienda suelta a tu creatividad e imaginación? ¿Te gusta ver figuras de palo chocar, quemarse y explotar de manera hilarante? Si respondiste sí a estas preguntas, entonces te encantará Game Stickman Falling, un divertido y loco juego de física que te hará reír en voz alta.

    -

    ¿Qué es Game Stickman Falling?

    -

    Game Stickman Falling es un juego de simulación de física desarrollado por Skygo. En este juego, controlas una figura de palo que puede montar varios vehículos y realizar acrobacias, trucos y accidentes. El juego tiene efectos realistas de física y ragdoll, lo que significa que tu figura de palo reaccionará a cada impacto, colisión y explosión. También puede personalizar su figura de palo con diferentes trajes, accesorios y armas.

    -

    barco rampa de salto apk mod


    DOWNLOADhttps://bltlly.com/2v6MIx



    -

    El juego de Game Stickman Falling

    -

    El juego de Game Stickman Falling es simple pero adictivo. Puedes elegir entre diferentes modos, como el modo libre, el modo desafío o el modo multijugador. En modo libre, puedes explorar el mundo del juego y probar diferentes vehículos y escenarios. En el modo desafío, tienes que completar tareas y objetivos específicos, como alcanzar cierta velocidad, distancia o puntuación. En el modo multijugador, puedes competir con otros jugadores online y ver quién puede causar más daño y caos.

    -

    Las características de Game Stickman Falling

    -

    Game Stickman Falling tiene muchas características que lo convierten en un juego divertido y entretenido. Algunas de estas características son:

    -
      -
    • Una variedad de vehículos para elegir, como coches, bicicletas, camiones, aviones, helicópteros, cohetes y más.
    • -
    • Un mapa grande con diferentes terrenos, obstáculos, rampas, bucles, puentes y trampas.
    • -
    • Un motor de física realista que simula gravedad, fricción, inercia, momento y fuerza.
    • -
    • Un sistema ragdoll que hace que tu figura de palo reaccione a cada impacto y lesión.
    • - -
    • Una opción de repetición que te permite ver tus acrobacias y accidentes desde diferentes ángulos y perspectivas.
    • -
    • Una tabla de clasificación y sistema de logros que rastrea su progreso y rendimiento.
    • -
    -

    ¿Por qué descargar juego Stickman caída Mod APK?

    -

    Game Stickman Falling es un juego gratuito que puedes descargar desde Google Play Store. Sin embargo, si desea disfrutar del juego al máximo, es posible que desee descargar Game Stickman Falling Mod APK lugar. Esta es una versión modificada del juego que te da algunas ventajas y beneficios sobre la versión original. Algunas de estas ventajas son:

    -

    Dinero ilimitado

    -

    Con Game Stickman Falling Mod APK, usted tendrá dinero ilimitado en el juego. Esto significa que puede comprar cualquier vehículo o artículo que desee sin preocuparse por el costo. También puede actualizar sus vehículos y artículos para hacerlos más potentes y duraderos.

    -

    No hay anuncios

    -

    Con Game Stickman Falling Mod APK, no verá ningún anuncio en el juego. Esto significa que puede jugar el juego sin interrupciones ni distracciones. También puede guardar sus datos y duración de la batería al no cargar ningún anuncio.

    -

    Más vehículos y niveles

    -

    Con Game Stickman Falling Mod APK, tendrá acceso a más vehículos y niveles que la versión original. Esto significa que puedes disfrutar de más variedad y diversidad en el juego. También puedes desafiarte con escenarios más difíciles y emocionantes.

    -

    -

    ¿Cómo descargar e instalar el juego Stickman Falling Mod APK?

    -

    Si desea descargar e instalar Game Stickman Falling Mod APK, debe seguir estos sencillos pasos:

    -

    Paso 1: Descargar el archivo APK

    -

    El primer paso es descargar el archivo APK de Game Stickman Falling Mod APK de una fuente confiable y confiable. Puede utilizar el siguiente enlace para descargar el archivo directamente a su dispositivo.

    -

    Descargar juego Stickman Falling Mod APK

    -

    Paso 2: Habilitar fuentes desconocidas

    - -

    Paso 3: Instalar el archivo APK

    -

    El tercer paso es instalar el archivo APK que descargó en el paso 1. Para hacer esto, debe localizar el archivo en el almacenamiento del dispositivo y pulsar en él. Luego, debe seguir las instrucciones en la pantalla y otorgar los permisos necesarios. El proceso de instalación tomará unos segundos o minutos dependiendo del dispositivo.

    -

    Paso 4: Disfruta del juego

    -

    El cuarto y último paso es disfrutar del juego. Ahora puede iniciar Game Stickman caída Mod APK desde el cajón de la aplicación o la pantalla de inicio y empezar a jugar. Notarás que tienes dinero ilimitado, sin anuncios, y más vehículos y niveles en el juego.

    -

    Conclusión

    -

    Juego Stickman caída Mod APK es un juego de física divertido y loco que te hará reír en voz alta. Puede controlar una figura de palo que puede montar varios vehículos y realizar acrobacias, trucos y accidentes. También puede personalizar su figura de palo con diferentes trajes, accesorios y armas. El juego tiene efectos realistas de física y ragdoll, lo que significa que tu figura de palo reaccionará a cada impacto, colisión y explosión. También puedes elegir entre diferentes modos, como modo libre, modo desafío o modo multijugador.

    -

    Si desea disfrutar del juego al máximo, usted debe descargar Game Stickman Falling Mod APK en lugar de la versión original. Esta es una versión modificada del juego que le da algunas ventajas y beneficios sobre la versión original. Usted tendrá dinero ilimitado, sin anuncios, y más vehículos y niveles en el juego. También puedes descargar e instalar el juego fácilmente siguiendo los sencillos pasos anteriores.

    -

    Entonces, ¿qué estás esperando? Descargar Game Stickman Falling Mod APK ahora y divertirse!

    -

    Preguntas frecuentes

    -
      -
    • Q: ¿Es seguro descargar e instalar Game Stickman Falling Mod APK?
    • - -
    • Q: ¿Necesito rootear mi dispositivo para usar Game Stickman Falling Mod APK?
    • -
    • A: No, no es necesario rootear el dispositivo para usar Game Stickman Falling Mod APK. El juego funciona bien tanto en dispositivos arraigados y no arraigados.
    • -
    • Q: ¿Puedo jugar juego stickman caída mod APK fuera de línea?
    • -
    • A: Sí, puedes jugar Game Stickman Falling Mod APK offline. El juego no requiere una conexión a Internet para funcionar. Sin embargo, algunas características como el modo multijugador pueden no funcionar sin conexión.
    • -
    • Q: ¿Puedo actualizar Game Stickman Falling Mod APK?
    • -
    • A: Sí, puede actualizar Game Stickman Falling Mod APK si hay una nueva versión disponible. Sin embargo, puedes perder algunas de las características del mod si actualizas el juego desde Google Play Store. Para mantener las características mod, debes actualizar el juego desde la misma fuente donde lo descargaste.
    • -
    • Q: ¿Puedo jugar juego stickman caída mod APK con mis amigos?
    • -
    • A: Sí, usted puede jugar Game Stickman Falling Mod APK con tus amigos. El juego tiene un modo multijugador que te permite competir con otros jugadores en línea. También puedes compartir tus repeticiones y logros con tus amigos en las redes sociales.
    • -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Base De La Fuerza Area Inactiva Mod Apk Dinero Ilimitado.md b/spaces/Benson/text-generation/Examples/Base De La Fuerza Area Inactiva Mod Apk Dinero Ilimitado.md deleted file mode 100644 index d832ffff95d33f1e4eb702e57d9fdb844127f9de..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Base De La Fuerza Area Inactiva Mod Apk Dinero Ilimitado.md +++ /dev/null @@ -1,48 +0,0 @@ - -

    Base de la Fuerza Aérea inactiva Mod APK: Construir su propio imperio militar

    -

    ¿Sueñas con convertirte en un poderoso líder militar? ¿Quieres construir y gestionar tu propia base aérea? ¿Quieres entrenar y comandar a los mejores pilotos y aviones del mundo? Si respondiste sí a cualquiera de estas preguntas, entonces deberías probar Idle Air Force Base, un divertido y adictivo juego de ocio que te permite crear tu propio imperio militar. Y si desea hacer su juego aún más emocionante y gratificante, usted debe descargar Idle Air Force Base Mod APK, que le da dinero ilimitado y sin anuncios. En este artículo, le diremos todo lo que necesita saber acerca de este increíble apk mod, incluyendo lo que es, por qué debe descargarlo, y cómo instalarlo en su dispositivo.

    -

    ¿Qué es la base aérea inactiva?

    -

    Idle Air Force Base es un juego inactivo que simula el funcionamiento y la gestión de una base de la fuerza aérea. Empiezas con una base pequeña y básica, y tu objetivo es expandirla y hacerla la más poderosa y avanzada del mundo. Puedes hacerlo mejorando tus instalaciones, entrenando a tus pilotos, investigando nuevas tecnologías y lanzando misiones para ganar dinero y prestigio. A medida que avances en el juego, desbloquearás nuevos aviones, como cazas, bombarderos, aviones furtivos, drones, helicópteros y más. También enfrentará diferentes desafíos y escenarios, como guerras, desastres, invasiones y emergencias. Tendrás que usar tu estrategia y habilidades para superarlas y proteger tu base.

    -

    base de la fuerza aérea inactiva mod apk dinero ilimitado


    Download Filehttps://bltlly.com/2v6MId



    -

    Un divertido y adictivo juego de ocio

    - -

    Una simulación realista e inmersiva

    -

    Otra gran característica de Idle Air Force Base es que es muy realista y envolvente. El juego simula el funcionamiento real y la gestión de una base de la fuerza aérea, con todos sus aspectos y detalles. Tendrás que lidiar con varios factores, como las condiciones climáticas, el consumo de combustible, los costos de mantenimiento, los riesgos de seguridad, las amenazas enemigas y más. También tendrá que seguir las reglas y regulaciones de los militares, tales como rangos, medallas, honores, protocolos y códigos. El juego también cuenta con modelos de aviones de la vida real, como F-16, F-22, B-2, C-130, helicópteros Apache, drones Predator, y más. Te sentirás como si estuvieras realmente a cargo de una base real de la fuerza aérea.

    -

    Una gestión estratégica y gratificante

    -

    La última pero no menos importante característica de Idle Air Force Base es que es muy estratégico y gratificante. El juego requiere que uses tu cerebro y habilidades para tomar decisiones inteligentes y optimizar tu rendimiento base. Tendrás que equilibrar tu presupuesto, asignar tus recursos, priorizar tus mejoras, planificar tus misiones, elegir tu avión, asignar tus pilotos y más. También tendrás que enfrentarte a diferentes retos y escenarios que pondrán a prueba tus habilidades y creatividad. El juego te recompensa por tus esfuerzos con dinero, puntos de prestigio, logros y trofeos. Puedes usarlos para mejorar tu base, desbloquear nuevas características y posicionarte en la jerarquía militar. También puede comparar su progreso y logros con otros jugadores de todo el mundo a través de la clasificación en línea y el sistema de chat. El juego te ofrece interminables horas de diversión y satisfacción.

    -

    ¿Por qué descargar Idle Air Force Base Mod APK?

    - -

    Dinero ilimitado para actualizar tu base

    -

    Una de las principales características de Idle Air Force Base Mod APK es que le da dinero ilimitado para gastar en su base. El dinero es la moneda principal en el juego, y lo necesitas para mejorar tus instalaciones, entrenar a tus pilotos, investigar nuevas tecnologías y lanzar misiones. Sin embargo, el dinero no es fácil de conseguir en el juego, ya que tienes que esperar a que tus ganancias se acumulen con el tiempo, o ver anuncios para obtener algo de dinero extra. Esto puede ser frustrante y consume mucho tiempo, especialmente si desea progresar más rápido y desbloquear más funciones. Con Idle Air Force Base Mod APK, usted no tiene que preocuparse por el dinero más, ya que tendrá una cantidad infinita de ella a su disposición. Puede actualizar su base tanto como desee, sin limitaciones ni restricciones. También puedes saltarte los anuncios y disfrutar de un juego más fluido e ininterrumpido.

    -

    No hay anuncios para interrumpir tu juego

    -

    Otra característica de Idle Air Force Base Mod APK es que elimina todos los anuncios del juego. Los anuncios son una característica común en la mayoría de los juegos gratuitos, y se utilizan para generar ingresos para los desarrolladores y editores. Sin embargo, los anuncios también pueden ser molestos e intrusivos, ya que pueden aparecer en cualquier momento e interrumpir el juego. También pueden afectar el rendimiento del dispositivo y la duración de la batería, ya que consumen datos y recursos. Con Idle Air Force Base Mod APK, no tienes que lidiar con los anuncios más, ya que son completamente eliminados del juego. Puedes jugar sin distracciones ni interrupciones y disfrutar de un juego más rápido y fluido.

    -

    Fácil instalación y compatibilidad

    - -

    ¿Cómo descargar e instalar Idle Air Force Base Mod APK?

    -

    Si está interesado en descargar e instalar Idle Air Force Base Mod APK en su dispositivo, puede seguir estos sencillos pasos:

    -

    Paso 1: Descargar el archivo apk mod de una fuente de confianza

    -

    El primer paso es descargar el archivo apk mod de una fuente de confianza. Usted puede encontrar muchos sitios web que ofrecen este archivo apk mod de forma gratuita, pero hay que tener cuidado y evitar cualquier enlaces maliciosos o falsos que pueden dañar su dispositivo o robar sus datos. Le recomendamos utilizar este enlace para descargar el archivo apk mod de forma segura.

    -

    -

    Paso 2: Habilitar fuentes desconocidas en la configuración del dispositivo

    -

    El segundo paso es habilitar fuentes desconocidas en la configuración del dispositivo. Esto es necesario porque este archivo apk mod no es de la tienda oficial de Google Play, y por lo tanto su dispositivo puede bloquear su instalación por defecto. Para habilitar fuentes desconocidas, debe ir a la configuración del dispositivo, luego a la configuración de seguridad o privacidad, y luego buscar y activar la opción que permite la instalación desde fuentes desconocidas.

    -

    Paso 3: Instalar el archivo apk mod y lanzar el juego

    -

    El tercer y último paso es instalar el archivo apk mod y lanzar el juego. Para instalar el archivo apk mod, usted tiene que localizar en su dispositivo de almacenamiento o descargas carpeta, a continuación, toque en él y siga las instrucciones en la pantalla. El proceso de instalación debe tomar solo unos segundos o minutos, dependiendo de la velocidad del dispositivo y la memoria. Una vez completada la instalación, puede iniciar el juego tocando en su icono en la pantalla de inicio o en el cajón de la aplicación. Ahora puede disfrutar de Idle Air Force Base Mod APK con dinero ilimitado y sin anuncios.

    -

    Conclusión

    -

    Idle Air Force Base Mod APK es una necesidad de probar para los aficionados al juego de inactividad

    - -

    Preguntas frecuentes

    -

    Aquí están algunas de las preguntas más frecuentes sobre Idle Air Force Base Mod APK:

    -
      -
    • Q: ¿Es seguro usar Idle Air Force Base Mod APK?
    • -
    • A: Sí, Idle Air Force Base Mod APK es seguro de usar, siempre y cuando lo descargue de una fuente de confianza. Hemos probado el archivo apk mod y no encontramos virus o malware en él. Sin embargo, siempre debe tener cuidado al descargar cualquier archivo de Internet y escanearlo con un software antivirus antes de instalarlo en su dispositivo.
    • -
    • Q: ¿Es Idle Air Force Base Mod APK legal de usar?
    • -
    • A: Idle Air Force Base Mod APK no es legal de usar, ya que viola los términos y condiciones del juego original. Mediante el uso de este mod apk, que está modificando los archivos del juego y el acceso a las características que no están autorizados por los desarrolladores y editores. Esto puede resultar en que su cuenta sea prohibida o suspendida, o que su dispositivo esté en la lista negra. Por lo tanto, no recomendamos ni apoyamos el uso de este mod apk, y no somos responsables de las consecuencias que puedan surgir de su uso.
    • -
    • Q: ¿Requiere una conexión a Internet la Base de la Fuerza Aérea Inactiva Mod APK?
    • -
    • A: No, Idle Air Force Base Mod APK no requiere una conexión a Internet para jugar. Puede jugar el juego sin conexión sin ningún problema. Sin embargo, es posible que necesite una conexión a Internet para acceder a algunas funciones en línea, como la clasificación y el sistema de chat.
    • -
    • Q: ¿Puedo actualizar Idle Air Force Base Mod APK?
    • -
    • A: No, no se puede actualizar Idle Air Force Base Mod APK, ya que no es de la tienda oficial de Google Play. Si intenta actualizar el juego desde la fuente original, puede perder todo su progreso y características de mod. Por lo tanto, siempre debe comprobar si hay nuevas versiones de la apk mod de la misma fuente donde lo descargó.
    • -
    • Q: ¿Puedo jugar Idle Air Force Base Mod APK con mis amigos?
    • - -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Campeonato De Cricket Mundial 2 Juego De Ordenador.md b/spaces/Benson/text-generation/Examples/Campeonato De Cricket Mundial 2 Juego De Ordenador.md deleted file mode 100644 index 5b459b50eddf2fd1a3b64d593ad4495e2bb35bbd..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Campeonato De Cricket Mundial 2 Juego De Ordenador.md +++ /dev/null @@ -1,72 +0,0 @@ - -

    World Cricket Championship 2: Cómo descargar y jugar en PC

    -

    Si eres un amante del cricket, debes haber oído hablar del Campeonato Mundial de Cricket 2, uno de los juegos de cricket más populares y realistas para dispositivos móviles. ¿Pero sabías que también puedes jugar a este increíble juego en tu PC? En este artículo, le mostraremos cómo descargar y jugar World Cricket Championship 2 en PC usando diferentes emuladores. Pero primero, veamos de qué se trata este juego y por qué deberías jugarlo en PC.

    -

    Introducción

    -

    ¿Qué es el Campeonato Mundial de Cricket 2?

    -

    World Cricket Championship 2, o WCC2 para abreviar, es un juego de deportes desarrollado por Nextwave Multimedia. Es la secuela del aclamado juego del Campeonato Mundial de Cricket, que fue lanzado en 2015. WCC2 está diseñado para proporcionar a los amantes del cricket una experiencia de juego inmersiva y emocionante. Cuenta con gráficos avanzados, física realista, dinámica de juego y una variedad de modos y opciones para adaptarse a las preferencias de cada fanático del cricket.

    -

    campeonato de cricket mundial 2 juego de ordenador


    Download Zip ————— https://bltlly.com/2v6Mc8



    -

    Algunos de los aspectos más destacados de WCC2 son:

    -
      -
    • Más de 150 animaciones de bateo diferentes y 28 acciones de bolos
    • -
    • 18 equipos internacionales, 10 equipos nacionales, 42 estadios y más de 11 torneos
    • -
    • Reproductores personalizables, jerseys, banners, logotipos y accesorios
    • -
    • Comentarios profesionales en inglés e hindi
    • -
    • Modo nocturno con tocones led y condiciones climáticas realistas
    • -
    • Modo desafío, Pandillas de modo de cricket, Blitz modo de torneo, y el modo multijugador en línea
    • -
    • Tablas de clasificación, logros, recompensas y perfiles de jugadores
    • -
    -

    Con tantas características y opciones, WCC2 es sin duda uno de los mejores juegos de cricket disponibles para dispositivos móviles. Pero ¿qué pasa si quieres jugar en una pantalla más grande con mejores controles? Ahí es donde jugar WCC2 en PC es muy útil.

    -

    ¿Por qué jugar World Cricket Championship 2 en PC?

    - -
      -
    • Tamaño de pantalla más grande: Puedes disfrutar de los impresionantes gráficos y animaciones de WCC2 en una pantalla más grande, lo que mejora la calidad visual y la inmersión del juego.
    • -
    • Mejores controles: Puedes usar tu ratón, teclado o gamepad para controlar a tus jugadores y ejecutar disparos con más precisión y precisión. También puede personalizar sus controles para adaptarse a sus preferencias.
    • -
    • Rendimiento más rápido: Puede ejecutar WCC2 sin problemas en su PC sin ningún retraso o problemas técnicos. También puede ajustar la configuración de gráficos para optimizar el rendimiento del juego.
    • -
    • Más espacio de almacenamiento: Puede ahorrar más datos y progreso de WCC2 en su PC sin preocuparse por quedarse sin espacio de almacenamiento o perder sus datos.
    • -
    • No hay problemas de drenaje de la batería o sobrecalentamiento: Puede jugar WCC2 durante horas en su PC sin drenar la batería o sobrecalentar el dispositivo.
    • -
    -

    Como puedes ver, jugar WCC2 en PC tiene muchos beneficios que lo hacen una experiencia de juego más agradable y satisfactoria. Pero, ¿cómo se puede jugar WCC2 en PC? Hay tres métodos que se pueden utilizar para descargar y jugar WCC2 en PC utilizando diferentes emuladores. Veamos qué son y cómo funcionan.

    -

    Cómo descargar World Cricket Championship 2 en PC

    -

    Un emulador es un software que le permite ejecutar aplicaciones y juegos para Android en su PC. Hay muchos emuladores disponibles para PC, pero no todos son compatibles con WCC2. Aquí hay tres de los mejores emuladores que puedes usar para descargar y jugar WCC2 en PC:

    -

    Método 1: Usando el emulador de BlueStacks

    -

    BlueStacks es uno de los emuladores más populares y ampliamente utilizados para PC. Tiene una interfaz fácil de usar, una gran tienda de aplicaciones y una alta compatibilidad con la mayoría de las aplicaciones y juegos de Android. Estos son los pasos para descargar y jugar WCC2 en PC usando BlueStacks:

    -

    Paso 1: Descargar e instalar BlueStacks en su PC

    - -

    Paso 2: Lanza BlueStacks y busca el Campeonato Mundial de Cricket 2 en la tienda de aplicaciones

    -

    Después de instalar BlueStacks, iniciarlo e iniciar sesión con su cuenta de Google. Luego, vaya a la tienda de aplicaciones y busque el Campeonato Mundial de Cricket 2 en la barra de búsqueda. Verá el icono del juego en los resultados. Haga clic en él para ir a la página del juego.

    -

    -

    Paso 3: Instalar World Cricket Championship 2 y disfrutar jugando en PC

    -

    En la página del juego, haga clic en el botón de instalación para comenzar a descargar e instalar WCC2 en su PC. El proceso puede tomar algún tiempo dependiendo de su velocidad de Internet y el rendimiento del PC. Una vez que la instalación se hace, puede iniciar el juego desde la pantalla de inicio o el cajón de aplicaciones de BlueStacks. Ahora puedes disfrutar jugando WCC2 en PC usando BlueStacks.

    -

    Método 2: Usando emulador LDPlayer

    -

    LDPlayer es otro gran emulador para PC que está diseñado para juegos. Tiene un rendimiento suave, una alta compatibilidad con la mayoría de los juegos de Android, y un montón de características y ajustes para mejorar su experiencia de juego. Estos son los pasos para descargar y jugar WCC2 en PC usando LDPlayer:

    -

    Paso 1: Descargar e instalar LDPlayer en su PC

    -

    Puede descargar LDPlayer desde su sitio web oficial aquí. El proceso de instalación es similar a BlueStacks. Solo tienes que seguir las instrucciones en la pantalla y esperar a que la instalación se complete.

    -

    Paso 2: Lanza LDPlayer y busca el Campeonato Mundial de Cricket 2 en el centro del juego

    -

    Después de instalar LDPlayer, inicie e inicie sesión con su cuenta de Google. Luego, ve al centro del juego y busca el Campeonato Mundial de Cricket 2 en la barra de búsqueda. Verás el icono del juego en los resultados. Haga clic en él para ir a la página del juego.

    -

    Paso 3: Instalar World Cricket Championship 2 y disfrutar jugando en PC

    - -

    Método 3: Usando el emulador de GameLoop

    -

    GameLoop es otro excelente emulador para PC que está especialmente diseñado para juegos de Tencent. Tiene un rendimiento rápido, una alta compatibilidad con la mayoría de los juegos de Tencent y muchas características y configuraciones para optimizar su experiencia de juego. Estos son los pasos para descargar y jugar WCC2 en PC usando GameLoop:

    -

    Paso 1: Descarga e instala GameLoop en tu PC

    -

    Puede descargar GameLoop desde su sitio web oficial aquí. También puedes seguirlos en sus plataformas de redes sociales, como Facebook, Twitter, Instagram, YouTube y Discord. También puede enviarles un correo electrónico a support@nextwavemultimedia.com. -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Choque Royale Mod Apk Nuevas Tarjetas.md b/spaces/Benson/text-generation/Examples/Choque Royale Mod Apk Nuevas Tarjetas.md deleted file mode 100644 index 6d5b27524764d202ebc0834c46f40717ea55a27f..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Choque Royale Mod Apk Nuevas Tarjetas.md +++ /dev/null @@ -1,121 +0,0 @@ -
    -

    Choque Royale Mod APK nuevas tarjetas: Todo lo que necesita saber

    -

    ¿Eres fan de Clash Royale, el popular juego de estrategia en tiempo real de Supercell? ¿Quieres darle vida a tu juego con algunas cartas nuevas y emocionantes que no están disponibles en la versión oficial del juego? Si es así, entonces usted podría estar interesado en probar Clash Royale Mod APK, una versión modificada del juego que le permite acceder a nuevas tarjetas, recursos ilimitados, y otras características. En este artículo, le diremos todo lo que necesita saber sobre Clash Royale Mod APK, incluyendo lo que es, ¿cuáles son las nuevas cartas, cómo descargarlo e instalarlo, y cómo jugar en línea con otros jugadores. ¡Vamos a empezar!

    -

    ¿Qué es Clash Royale Mod APK?

    -

    Antes de sumergirnos en los detalles de las nuevas cartas, primero vamos a entender lo que es Clash Royale Mod APK y cómo se diferencia del juego original.

    -

    choque royale mod apk nuevas tarjetas


    DOWNLOADhttps://bltlly.com/2v6LzO



    -

    Una breve introducción a Clash Royale y su mecánica de juego

    -

    Clash Royale es un juego de torre de defensa, en el que se puede atacar a la torre del enemigo mediante el uso de personajes que se pueden recoger y subir de nivel (la mecánica de tarjetas de colección). Un jugador gana un juego si destruyó toda la torre del enemigo o destruyó más torre que el enemigo.

    -

    El juego cuenta con dos conjuntos de torres frente a frente en un campo de batalla de una sola pantalla. Los jugadores usan un elixir para desplegar tropas, edificios y hechizos desde una baraja de ocho cartas (extraídas de una colección de más de 90 cartas) en cualquier lugar de su territorio en el campo. Más cartas se recogen desbloqueando cofres ganados en la batalla o comprados en la tienda, que a su vez desbloqueará nuevas cartas que los jugadores pueden agregar a sus mazos y/ o subir de nivel las cartas que ya tienen. Cada carta requiere una cierta cantidad de elixir para desplegarse, pero el elixir de los jugadores se regenera con el tiempo. El juego también tiene varios modos de juego, como batallas de escalera, torneos, guerras de clanes, eventos especiales y más.

    -

    Los beneficios y riesgos de usar una versión modificada del juego

    - -
      -
    • Nuevas cartas que no están disponibles en el juego original
    • -
    • Recursos ilimitados como oro, gemas, elixir y cofres
    • -
    • Posibilidad de jugar online con otros jugadores modded o unmodded
    • -
    • Posibilidad de personalizar su cubierta, arena y otros ajustes
    • -
    -

    Estas características pueden hacer el juego más divertido y emocionante para algunos jugadores que quieren probar nuevas estrategias, experimentar con diferentes combinaciones, o simplemente disfrutar de más opciones y libertad. Sin embargo, también hay algunos riesgos involucrados en el uso de una versión modificada del juego. Algunos de estos riesgos incluyen:

    -
      -
    • Potencial

      Malware o virus potenciales que pueden dañar tu dispositivo o robar tu información personal

    • -
    • Posibles prohibiciones o suspensiones desde el servidor oficial del juego o la cuenta de Supercell
    • -
    • Ventaja injusta o trampa que puede arruinar el equilibrio del juego y la diversión para otros jugadores
    • -
    • Falta de actualizaciones o soporte del desarrollador original o del modder
    • -
    -

    Por lo tanto, si decide usar una versión modificada del juego, debe hacerlo bajo su propio riesgo y responsabilidad. También debe respetar las reglas y los derechos del desarrollador original y otros jugadores, y no utilizar la versión modificada para cualquier propósito ilegal o poco ético.

    -

    ¿Cuáles son las nuevas tarjetas en Clash Royale Mod APK?

    -

    Una de las principales atracciones de Clash Royale Mod APK es las nuevas tarjetas que no están disponibles en el juego original. Estas tarjetas son hechas por fans, inspiradas en otros juegos o medios, o basadas en conceptos no utilizados del juego oficial. Pueden añadir más variedad, creatividad y diversión a tu juego. Aquí hay una tabla con algunas de las nuevas cartas, sus estadísticas y sus habilidades:

    - - -Nombre -Tipo -Rareza -Costo del elixir -Hitpoints -Daño -Capacidad - - -Caballero dragón -Tropa -Épica -5 -1200 -200 - - - -Mega Horda de Esbirros -Tropa -Raro -7 -300 (cada uno) -150 (cada uno) -Un enjambre de seis mega esbirros que pueden infligir daño masivo a objetivos aéreos y terrestres. - - -Lanzador de barriles de duende -Construcción -Común -4 -800 -N/A -Un lanzador estacionario que dispara cañones goblin en las torres del enemigo cada 5 segundos. - - -Imagen de espejo -Hechizo - -conexión antes de descargarlo.

    Los pasos para instalar el archivo modded en su dispositivo

    El siguiente paso es instalar el archivo modded en su dispositivo.

    El siguiente paso es instalar el archivo modded en su dispositivo. Para ello, deberá habilitar la instalación de aplicaciones de fuentes desconocidas en su dispositivo. Esta es una función de seguridad que impide la instalación de aplicaciones que no son de la tienda oficial de aplicaciones o verificadas por el fabricante del dispositivo. Sin embargo, ya que está instalando un archivo modded, necesitará omitir esta función temporalmente. Así es como puede hacerlo:

    -

    -
      -
    • Vaya a la configuración del dispositivo y busque la opción de seguridad o privacidad.
    • -
    • Encontrar la opción que dice "Fuentes desconocidas" o "Permitir la instalación de aplicaciones de fuentes desconocidas" y cambiarlo por.
    • -
    • Puede ver un mensaje de advertencia que le informa sobre los riesgos de instalar aplicaciones de fuentes desconocidas. Toque en "OK" o "Permitir" para continuar.
    • -
    -

    Una vez que haya habilitado la instalación de aplicaciones desde fuentes desconocidas, puede proceder a instalar el archivo modded. Así es como puede hacerlo:

    -
      -
    • Localice el archivo modded que ha descargado en su dispositivo. Debería estar en su carpeta de descargas o en la barra de notificaciones.
    • -
    • Toque en el archivo y siga las instrucciones en la pantalla para instalarlo.
    • -
    • Puede ver un mensaje que le pide que conceda permisos a la aplicación. Toque en "Permitir" o "Aceptar" para otorgarlos.
    • -
    • Espere a que termine el proceso de instalación. Puede tardar unos minutos dependiendo de su dispositivo y la velocidad de Internet.
    • -
    • Una vez que se realiza la instalación, verá un mensaje que dice "App instalado" o "Clash Royale Mod APK instalado". Toca "Abrir" o "Iniciar" para iniciar la aplicación.
    • -
    -

    Las precauciones a tomar antes y después de instalar el archivo modded

    - -
      -
    • Antes de instalar el archivo modded, asegúrese de haber hecho una copia de seguridad de sus datos y el progreso del juego original. Puedes hacer esto vinculando tu cuenta de juego a un ID de Supercell, Google Play Games o una cuenta de Facebook. De esta manera, puede restaurar sus datos y el progreso si algo sale mal o si desea volver al juego original.
    • -
    • Después de instalar el archivo modded, asegúrese de desactivar la instalación de aplicaciones de fuentes desconocidas en su dispositivo. Esto es para evitar que cualquier aplicación no deseada o maliciosa se instale en su dispositivo sin su conocimiento o consentimiento. Puede hacer esto siguiendo los mismos pasos de arriba, pero cambiando la opción en lugar de on.
    • -
    • Después de instalar el archivo modded, asegúrese de no actualizar la aplicación desde la tienda de aplicaciones oficial o cualquier otra fuente. Esto se debe a que la actualización de la aplicación puede sobrescribir o eliminar el archivo modded y sus características, y causar errores o fallos. Si ves una notificación que te pide que actualices la aplicación, ignórala o cancélala.
    • -
    • Después de instalar el archivo modded, asegúrese de no iniciar sesión con su ID de Supercell, Google Play Games o cuenta de Facebook. Esto se debe a que el inicio de sesión con estas cuentas puede vincular sus datos de juego y el progreso a la versión modificada, y causar problemas con su cuenta de juego original. También puede correr el riesgo de ser prohibido o suspendido por Supercell para el uso de una versión modificada de su juego. En su lugar, usa una cuenta de invitado o crea una nueva cuenta para jugar con la versión modificada.
    • -

    Cómo jugar Clash Royale Mod APK en línea con otros jugadores?

    - -

    Las opciones para jugar online con otros jugadores modded o unmodded

    -

    Hay dos opciones principales para jugar en línea con otros jugadores usando Clash Royale Mod APK: servidores privados y servidores públicos.

    -

    Los servidores privados son servidores alojados por el modder o un proveedor de terceros, y solo son accesibles por los jugadores que tienen la misma versión modificada del juego. Estos servidores suelen ser libres de unirse, pero pueden tener capacidad, estabilidad o características limitadas. También pueden requerir una contraseña o una invitación para unirse. Los servidores privados son ideales para jugar con tus amigos u otros jugadores que comparten tu interés en la versión modificada del juego. Puede encontrar servidores privados buscando en línea, pidiendo el modder o uniéndose a una comunidad de jugadores modded.

    -

    Los servidores públicos son servidores alojados por Supercell, el desarrollador original del juego, y son accesibles por todos los jugadores que tienen la versión oficial o cualquier versión modificada del juego. Estos servidores suelen ser más fiables, seguros y actualizados que los servidores privados, pero también tienen más restricciones y riesgos. Los servidores públicos son ideales para jugar con jugadores al azar o para probar tus habilidades contra jugadores sin odded. Sin embargo, debes tener cuidado de no usar ninguna característica que sea exclusiva de la versión modificada del juego, como tarjetas nuevas, recursos ilimitados o configuraciones personalizadas. Esto se debe a que estas características pueden no funcionar correctamente en los servidores públicos, y también pueden ser detectados por el sistema anti-cheat de Supercell, que puede resultar en una prohibición o suspensión del juego.

    -

    Los consejos y trucos para ganar más batallas y trofeos con las nuevas cartas

    -

    Ya sea que juegues en servidores privados o públicos, querrás ganar más batallas y trofeos con las nuevas cartas a las que tienes acceso en Clash Royale Mod APK. Aquí hay algunos consejos y trucos para ayudarte a hacerlo:

    -
      - -
    • Construir una cubierta equilibrada y versátil que puede contrarrestar diferentes tipos de enemigos y situaciones. Puedes hacer esto incluyendo cartas que pueden atacar o defenderse contra objetivos aéreos y terrestres, hacer daño de un solo objetivo o salpicar, apoyar o distraer a otras tropas, etc.
    • -
    • Usa tu elixir sabiamente y eficientemente. Puedes hacer esto desplegando tus tarjetas en el momento y lugar correctos, evitando sobrecomisionar o subcomunicar tu elixir, ciclando tus tarjetas lo suficientemente rápido como para obtener las que necesitas y administrando tu ventaja o desventaja de elixir.
    • -
    • Usa tus nuevas tarjetas de forma creativa y estratégica. Puedes hacer esto sorprendiendo a tu oponente con movimientos inesperados, explotando sus debilidades o errores, adaptándose a sus estrategias o contadores, y creando combos o empujes que son difíciles de detener.
    • -
    • Diviértete y disfruta del juego. Puedes hacer esto probando diferentes mazos y modos, desafiándote con oponentes o objetivos más difíciles, uniéndote a un clan o una comunidad de jugadores modded, y compartiendo tus comentarios o sugerencias con el modder.
    • -
    -

    Conclusión

    -

    En conclusión, Clash Royale Mod APK es una versión modificada del popular juego de estrategia en tiempo real de Supercell que le permite acceder a nuevas tarjetas, recursos ilimitados y otras características que no están disponibles en la versión oficial del juego. Puede hacer el juego más divertido y emocionante para algunos jugadores que quieren probar nuevas estrategias, experimentar con diferentes combinaciones o simplemente disfrutar de más opciones y libertad. Sin embargo, también tiene algunos riesgos y limitaciones que debe tener en cuenta antes de descargarlo e instalarlo en su dispositivo. También debe respetar las reglas y los derechos del desarrollador original y otros jugadores, y no utilizar la versión modificada para cualquier propósito ilegal o poco ético.

    - -

    Preguntas frecuentes

    -

    Aquí hay algunas preguntas frecuentes y respuestas relacionadas con Clash Royale Mod APK:

    Q: Es Clash Royale Mod APK seguro de usar?

    -

    A: Clash Royale Mod APK no es un producto oficial de Supercell, y por lo tanto no está respaldado o apoyado por ellos. Tampoco es verificado o probado por ninguna autoridad o plataforma de renombre. Por lo tanto, no hay garantía de que sea seguro de usar, y puede contener malware, virus o archivos falsos que pueden dañar su dispositivo o robar su información personal. Debe usarlo bajo su propio riesgo y responsabilidad, y solo descargarlo de una fuente confiable.

    -

    Q: ¿Puedo jugar Clash Royale Mod APK offline?

    -

    A: No, no se puede jugar Clash Royale Mod APK fuera de línea. El juego requiere una conexión a Internet para acceder a los servidores, ya sean privados o públicos. Si intenta jugar el juego sin conexión, verá un mensaje de error que dice "No hay conexión a Internet" o "Error de conexión". Tendrás que conectarte a una red Wi-Fi o de datos móvil estable y segura para jugar.

    -

    Q: ¿Puedo usar mi cuenta de juego original para jugar Clash Royale Mod APK?

    -

    A: No, no puede utilizar su cuenta de juego original para jugar Clash Royale Mod APK. Esto se debe a que la versión modificada del juego tiene diferentes características y ajustes que la versión original, y no son compatibles entre sí. Si intentas iniciar sesión con tu cuenta de juego original, puedes encontrar errores, fallos o prohibiciones. Deberías usar una cuenta de invitado o crear una nueva cuenta para jugar con la versión modificada del juego.

    -

    Q: ¿Cómo puedo actualizar Clash Royale Mod APK?

    - -

    Q: ¿Dónde puedo encontrar más información o soporte para Clash Royale Mod APK?

    -

    A: Puede encontrar más información o soporte para Clash Royale Mod APK visitando el sitio web del modder o la fuente que proporciona el archivo modded. También puede unirse a una comunidad de jugadores modificados en plataformas de redes sociales, foros o grupos de chat. Allí, puedes hacer preguntas, compartir comentarios o reportar problemas relacionados con la versión modificada del juego. Sin embargo, debe tener cuidado de no confiar en ninguna información o soporte que no sea de una fuente confiable o confiable.

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/httpchecksum.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/httpchecksum.py deleted file mode 100644 index b0b84b400bc943dafe44c2f91035bf454f0b671c..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/httpchecksum.py +++ /dev/null @@ -1,483 +0,0 @@ -# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You -# may not use this file except in compliance with the License. A copy of -# the License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is -# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF -# ANY KIND, either express or implied. See the License for the specific -# language governing permissions and limitations under the License. - -""" The interfaces in this module are not intended for public use. - -This module defines interfaces for applying checksums to HTTP requests within -the context of botocore. This involves both resolving the checksum to be used -based on client configuration and environment, as well as application of the -checksum to the request. -""" -import base64 -import io -import logging -from binascii import crc32 -from hashlib import sha1, sha256 - -from botocore.compat import HAS_CRT -from botocore.exceptions import ( - AwsChunkedWrapperError, - FlexibleChecksumError, - MissingDependencyException, -) -from botocore.response import StreamingBody -from botocore.utils import ( - conditionally_calculate_md5, - determine_content_length, -) - -if HAS_CRT: - from awscrt import checksums as crt_checksums -else: - crt_checksums = None - -logger = logging.getLogger(__name__) - - -class BaseChecksum: - _CHUNK_SIZE = 1024 * 1024 - - def update(self, chunk): - pass - - def digest(self): - pass - - def b64digest(self): - bs = self.digest() - return base64.b64encode(bs).decode("ascii") - - def _handle_fileobj(self, fileobj): - start_position = fileobj.tell() - for chunk in iter(lambda: fileobj.read(self._CHUNK_SIZE), b""): - self.update(chunk) - fileobj.seek(start_position) - - def handle(self, body): - if isinstance(body, (bytes, bytearray)): - self.update(body) - else: - self._handle_fileobj(body) - return self.b64digest() - - -class Crc32Checksum(BaseChecksum): - def __init__(self): - self._int_crc32 = 0 - - def update(self, chunk): - self._int_crc32 = crc32(chunk, self._int_crc32) & 0xFFFFFFFF - - def digest(self): - return self._int_crc32.to_bytes(4, byteorder="big") - - -class CrtCrc32Checksum(BaseChecksum): - # Note: This class is only used if the CRT is available - def __init__(self): - self._int_crc32 = 0 - - def update(self, chunk): - new_checksum = crt_checksums.crc32(chunk, self._int_crc32) - self._int_crc32 = new_checksum & 0xFFFFFFFF - - def digest(self): - return self._int_crc32.to_bytes(4, byteorder="big") - - -class CrtCrc32cChecksum(BaseChecksum): - # Note: This class is only used if the CRT is available - def __init__(self): - self._int_crc32c = 0 - - def update(self, chunk): - new_checksum = crt_checksums.crc32c(chunk, self._int_crc32c) - self._int_crc32c = new_checksum & 0xFFFFFFFF - - def digest(self): - return self._int_crc32c.to_bytes(4, byteorder="big") - - -class Sha1Checksum(BaseChecksum): - def __init__(self): - self._checksum = sha1() - - def update(self, chunk): - self._checksum.update(chunk) - - def digest(self): - return self._checksum.digest() - - -class Sha256Checksum(BaseChecksum): - def __init__(self): - self._checksum = sha256() - - def update(self, chunk): - self._checksum.update(chunk) - - def digest(self): - return self._checksum.digest() - - -class AwsChunkedWrapper: - _DEFAULT_CHUNK_SIZE = 1024 * 1024 - - def __init__( - self, - raw, - checksum_cls=None, - checksum_name="x-amz-checksum", - chunk_size=None, - ): - self._raw = raw - self._checksum_name = checksum_name - self._checksum_cls = checksum_cls - self._reset() - - if chunk_size is None: - chunk_size = self._DEFAULT_CHUNK_SIZE - self._chunk_size = chunk_size - - def _reset(self): - self._remaining = b"" - self._complete = False - self._checksum = None - if self._checksum_cls: - self._checksum = self._checksum_cls() - - def seek(self, offset, whence=0): - if offset != 0 or whence != 0: - raise AwsChunkedWrapperError( - error_msg="Can only seek to start of stream" - ) - self._reset() - self._raw.seek(0) - - def read(self, size=None): - # Normalize "read all" size values to None - if size is not None and size <= 0: - size = None - - # If the underlying body is done and we have nothing left then - # end the stream - if self._complete and not self._remaining: - return b"" - - # While we're not done and want more bytes - want_more_bytes = size is None or size > len(self._remaining) - while not self._complete and want_more_bytes: - self._remaining += self._make_chunk() - want_more_bytes = size is None or size > len(self._remaining) - - # If size was None, we want to return everything - if size is None: - size = len(self._remaining) - - # Return a chunk up to the size asked for - to_return = self._remaining[:size] - self._remaining = self._remaining[size:] - return to_return - - def _make_chunk(self): - # NOTE: Chunk size is not deterministic as read could return less. This - # means we cannot know the content length of the encoded aws-chunked - # stream ahead of time without ensuring a consistent chunk size - raw_chunk = self._raw.read(self._chunk_size) - hex_len = hex(len(raw_chunk))[2:].encode("ascii") - self._complete = not raw_chunk - - if self._checksum: - self._checksum.update(raw_chunk) - - if self._checksum and self._complete: - name = self._checksum_name.encode("ascii") - checksum = self._checksum.b64digest().encode("ascii") - return b"0\r\n%s:%s\r\n\r\n" % (name, checksum) - - return b"%s\r\n%s\r\n" % (hex_len, raw_chunk) - - def __iter__(self): - while not self._complete: - yield self._make_chunk() - - -class StreamingChecksumBody(StreamingBody): - def __init__(self, raw_stream, content_length, checksum, expected): - super().__init__(raw_stream, content_length) - self._checksum = checksum - self._expected = expected - - def read(self, amt=None): - chunk = super().read(amt=amt) - self._checksum.update(chunk) - if amt is None or (not chunk and amt > 0): - self._validate_checksum() - return chunk - - def _validate_checksum(self): - if self._checksum.digest() != base64.b64decode(self._expected): - error_msg = ( - f"Expected checksum {self._expected} did not match calculated " - f"checksum: {self._checksum.b64digest()}" - ) - raise FlexibleChecksumError(error_msg=error_msg) - - -def resolve_checksum_context(request, operation_model, params): - resolve_request_checksum_algorithm(request, operation_model, params) - resolve_response_checksum_algorithms(request, operation_model, params) - - -def resolve_request_checksum_algorithm( - request, - operation_model, - params, - supported_algorithms=None, -): - http_checksum = operation_model.http_checksum - algorithm_member = http_checksum.get("requestAlgorithmMember") - if algorithm_member and algorithm_member in params: - # If the client has opted into using flexible checksums and the - # request supports it, use that instead of checksum required - if supported_algorithms is None: - supported_algorithms = _SUPPORTED_CHECKSUM_ALGORITHMS - - algorithm_name = params[algorithm_member].lower() - if algorithm_name not in supported_algorithms: - if not HAS_CRT and algorithm_name in _CRT_CHECKSUM_ALGORITHMS: - raise MissingDependencyException( - msg=( - f"Using {algorithm_name.upper()} requires an " - "additional dependency. You will need to pip install " - "botocore[crt] before proceeding." - ) - ) - raise FlexibleChecksumError( - error_msg="Unsupported checksum algorithm: %s" % algorithm_name - ) - - location_type = "header" - if operation_model.has_streaming_input: - # Operations with streaming input must support trailers. - if request["url"].startswith("https:"): - # We only support unsigned trailer checksums currently. As this - # disables payload signing we'll only use trailers over TLS. - location_type = "trailer" - - algorithm = { - "algorithm": algorithm_name, - "in": location_type, - "name": "x-amz-checksum-%s" % algorithm_name, - } - - if algorithm["name"] in request["headers"]: - # If the header is already set by the customer, skip calculation - return - - checksum_context = request["context"].get("checksum", {}) - checksum_context["request_algorithm"] = algorithm - request["context"]["checksum"] = checksum_context - elif operation_model.http_checksum_required or http_checksum.get( - "requestChecksumRequired" - ): - # Otherwise apply the old http checksum behavior via Content-MD5 - checksum_context = request["context"].get("checksum", {}) - checksum_context["request_algorithm"] = "conditional-md5" - request["context"]["checksum"] = checksum_context - - -def apply_request_checksum(request): - checksum_context = request.get("context", {}).get("checksum", {}) - algorithm = checksum_context.get("request_algorithm") - - if not algorithm: - return - - if algorithm == "conditional-md5": - # Special case to handle the http checksum required trait - conditionally_calculate_md5(request) - elif algorithm["in"] == "header": - _apply_request_header_checksum(request) - elif algorithm["in"] == "trailer": - _apply_request_trailer_checksum(request) - else: - raise FlexibleChecksumError( - error_msg="Unknown checksum variant: %s" % algorithm["in"] - ) - - -def _apply_request_header_checksum(request): - checksum_context = request.get("context", {}).get("checksum", {}) - algorithm = checksum_context.get("request_algorithm") - location_name = algorithm["name"] - if location_name in request["headers"]: - # If the header is already set by the customer, skip calculation - return - checksum_cls = _CHECKSUM_CLS.get(algorithm["algorithm"]) - digest = checksum_cls().handle(request["body"]) - request["headers"][location_name] = digest - - -def _apply_request_trailer_checksum(request): - checksum_context = request.get("context", {}).get("checksum", {}) - algorithm = checksum_context.get("request_algorithm") - location_name = algorithm["name"] - checksum_cls = _CHECKSUM_CLS.get(algorithm["algorithm"]) - - headers = request["headers"] - body = request["body"] - - if location_name in headers: - # If the header is already set by the customer, skip calculation - return - - headers["Transfer-Encoding"] = "chunked" - if "Content-Encoding" in headers: - # We need to preserve the existing content encoding and add - # aws-chunked as a new content encoding. - headers["Content-Encoding"] += ",aws-chunked" - else: - headers["Content-Encoding"] = "aws-chunked" - headers["X-Amz-Trailer"] = location_name - - content_length = determine_content_length(body) - if content_length is not None: - # Send the decoded content length if we can determine it. Some - # services such as S3 may require the decoded content length - headers["X-Amz-Decoded-Content-Length"] = str(content_length) - - if isinstance(body, (bytes, bytearray)): - body = io.BytesIO(body) - - request["body"] = AwsChunkedWrapper( - body, - checksum_cls=checksum_cls, - checksum_name=location_name, - ) - - -def resolve_response_checksum_algorithms( - request, operation_model, params, supported_algorithms=None -): - http_checksum = operation_model.http_checksum - mode_member = http_checksum.get("requestValidationModeMember") - if mode_member and mode_member in params: - if supported_algorithms is None: - supported_algorithms = _SUPPORTED_CHECKSUM_ALGORITHMS - response_algorithms = { - a.lower() for a in http_checksum.get("responseAlgorithms", []) - } - - usable_algorithms = [] - for algorithm in _ALGORITHMS_PRIORITY_LIST: - if algorithm not in response_algorithms: - continue - if algorithm in supported_algorithms: - usable_algorithms.append(algorithm) - - checksum_context = request["context"].get("checksum", {}) - checksum_context["response_algorithms"] = usable_algorithms - request["context"]["checksum"] = checksum_context - - -def handle_checksum_body(http_response, response, context, operation_model): - headers = response["headers"] - checksum_context = context.get("checksum", {}) - algorithms = checksum_context.get("response_algorithms") - - if not algorithms: - return - - for algorithm in algorithms: - header_name = "x-amz-checksum-%s" % algorithm - # If the header is not found, check the next algorithm - if header_name not in headers: - continue - - # If a - is in the checksum this is not valid Base64. S3 returns - # checksums that include a -# suffix to indicate a checksum derived - # from the hash of all part checksums. We cannot wrap this response - if "-" in headers[header_name]: - continue - - if operation_model.has_streaming_output: - response["body"] = _handle_streaming_response( - http_response, response, algorithm - ) - else: - response["body"] = _handle_bytes_response( - http_response, response, algorithm - ) - - # Expose metadata that the checksum check actually occured - checksum_context = response["context"].get("checksum", {}) - checksum_context["response_algorithm"] = algorithm - response["context"]["checksum"] = checksum_context - return - - logger.info( - f'Skipping checksum validation. Response did not contain one of the ' - f'following algorithms: {algorithms}.' - ) - - -def _handle_streaming_response(http_response, response, algorithm): - checksum_cls = _CHECKSUM_CLS.get(algorithm) - header_name = "x-amz-checksum-%s" % algorithm - return StreamingChecksumBody( - http_response.raw, - response["headers"].get("content-length"), - checksum_cls(), - response["headers"][header_name], - ) - - -def _handle_bytes_response(http_response, response, algorithm): - body = http_response.content - header_name = "x-amz-checksum-%s" % algorithm - checksum_cls = _CHECKSUM_CLS.get(algorithm) - checksum = checksum_cls() - checksum.update(body) - expected = response["headers"][header_name] - if checksum.digest() != base64.b64decode(expected): - error_msg = ( - "Expected checksum %s did not match calculated checksum: %s" - % ( - expected, - checksum.b64digest(), - ) - ) - raise FlexibleChecksumError(error_msg=error_msg) - return body - - -_CHECKSUM_CLS = { - "crc32": Crc32Checksum, - "sha1": Sha1Checksum, - "sha256": Sha256Checksum, -} -_CRT_CHECKSUM_ALGORITHMS = ["crc32", "crc32c"] -if HAS_CRT: - # Use CRT checksum implementations if available - _CRT_CHECKSUM_CLS = { - "crc32": CrtCrc32Checksum, - "crc32c": CrtCrc32cChecksum, - } - _CHECKSUM_CLS.update(_CRT_CHECKSUM_CLS) - # Validate this list isn't out of sync with _CRT_CHECKSUM_CLS keys - assert all( - name in _CRT_CHECKSUM_ALGORITHMS for name in _CRT_CHECKSUM_CLS.keys() - ) -_SUPPORTED_CHECKSUM_ALGORITHMS = list(_CHECKSUM_CLS.keys()) -_ALGORITHMS_PRIORITY_LIST = ['crc32c', 'crc32', 'sha1', 'sha256'] diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/monitoring.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/monitoring.py deleted file mode 100644 index 71d7230246b034f1a66f69b7a050a433b0ab9d13..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/monitoring.py +++ /dev/null @@ -1,586 +0,0 @@ -# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You -# may not use this file except in compliance with the License. A copy of -# the License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is -# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF -# ANY KIND, either express or implied. See the License for the specific -# language governing permissions and limitations under the License. -import json -import logging -import re -import time - -from botocore.compat import ensure_bytes, ensure_unicode, urlparse -from botocore.retryhandler import EXCEPTION_MAP as RETRYABLE_EXCEPTIONS - -logger = logging.getLogger(__name__) - - -class Monitor: - _EVENTS_TO_REGISTER = [ - 'before-parameter-build', - 'request-created', - 'response-received', - 'after-call', - 'after-call-error', - ] - - def __init__(self, adapter, publisher): - """Abstraction for monitoring clients API calls - - :param adapter: An adapter that takes event emitter events - and produces monitor events - - :param publisher: A publisher for generated monitor events - """ - self._adapter = adapter - self._publisher = publisher - - def register(self, event_emitter): - """Register an event emitter to the monitor""" - for event_to_register in self._EVENTS_TO_REGISTER: - event_emitter.register_last(event_to_register, self.capture) - - def capture(self, event_name, **payload): - """Captures an incoming event from the event emitter - - It will feed an event emitter event to the monitor's adaptor to create - a monitor event and then publish that event to the monitor's publisher. - """ - try: - monitor_event = self._adapter.feed(event_name, payload) - if monitor_event: - self._publisher.publish(monitor_event) - except Exception as e: - logger.debug( - 'Exception %s raised by client monitor in handling event %s', - e, - event_name, - exc_info=True, - ) - - -class MonitorEventAdapter: - def __init__(self, time=time.time): - """Adapts event emitter events to produce monitor events - - :type time: callable - :param time: A callable that produces the current time - """ - self._time = time - - def feed(self, emitter_event_name, emitter_payload): - """Feed an event emitter event to generate a monitor event - - :type emitter_event_name: str - :param emitter_event_name: The name of the event emitted - - :type emitter_payload: dict - :param emitter_payload: The payload to associated to the event - emitted - - :rtype: BaseMonitorEvent - :returns: A monitor event based on the event emitter events - fired - """ - return self._get_handler(emitter_event_name)(**emitter_payload) - - def _get_handler(self, event_name): - return getattr( - self, '_handle_' + event_name.split('.')[0].replace('-', '_') - ) - - def _handle_before_parameter_build(self, model, context, **kwargs): - context['current_api_call_event'] = APICallEvent( - service=model.service_model.service_id, - operation=model.wire_name, - timestamp=self._get_current_time(), - ) - - def _handle_request_created(self, request, **kwargs): - context = request.context - new_attempt_event = context[ - 'current_api_call_event' - ].new_api_call_attempt(timestamp=self._get_current_time()) - new_attempt_event.request_headers = request.headers - new_attempt_event.url = request.url - context['current_api_call_attempt_event'] = new_attempt_event - - def _handle_response_received( - self, parsed_response, context, exception, **kwargs - ): - attempt_event = context.pop('current_api_call_attempt_event') - attempt_event.latency = self._get_latency(attempt_event) - if parsed_response is not None: - attempt_event.http_status_code = parsed_response[ - 'ResponseMetadata' - ]['HTTPStatusCode'] - attempt_event.response_headers = parsed_response[ - 'ResponseMetadata' - ]['HTTPHeaders'] - attempt_event.parsed_error = parsed_response.get('Error') - else: - attempt_event.wire_exception = exception - return attempt_event - - def _handle_after_call(self, context, parsed, **kwargs): - context['current_api_call_event'].retries_exceeded = parsed[ - 'ResponseMetadata' - ].get('MaxAttemptsReached', False) - return self._complete_api_call(context) - - def _handle_after_call_error(self, context, exception, **kwargs): - # If the after-call-error was emitted and the error being raised - # was a retryable connection error, then the retries must have exceeded - # for that exception as this event gets emitted **after** retries - # happen. - context[ - 'current_api_call_event' - ].retries_exceeded = self._is_retryable_exception(exception) - return self._complete_api_call(context) - - def _is_retryable_exception(self, exception): - return isinstance( - exception, tuple(RETRYABLE_EXCEPTIONS['GENERAL_CONNECTION_ERROR']) - ) - - def _complete_api_call(self, context): - call_event = context.pop('current_api_call_event') - call_event.latency = self._get_latency(call_event) - return call_event - - def _get_latency(self, event): - return self._get_current_time() - event.timestamp - - def _get_current_time(self): - return int(self._time() * 1000) - - -class BaseMonitorEvent: - def __init__(self, service, operation, timestamp): - """Base monitor event - - :type service: str - :param service: A string identifying the service associated to - the event - - :type operation: str - :param operation: A string identifying the operation of service - associated to the event - - :type timestamp: int - :param timestamp: Epoch time in milliseconds from when the event began - """ - self.service = service - self.operation = operation - self.timestamp = timestamp - - def __repr__(self): - return f'{self.__class__.__name__}({self.__dict__!r})' - - def __eq__(self, other): - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - -class APICallEvent(BaseMonitorEvent): - def __init__( - self, - service, - operation, - timestamp, - latency=None, - attempts=None, - retries_exceeded=False, - ): - """Monitor event for a single API call - - This event corresponds to a single client method call, which includes - every HTTP requests attempt made in order to complete the client call - - :type service: str - :param service: A string identifying the service associated to - the event - - :type operation: str - :param operation: A string identifying the operation of service - associated to the event - - :type timestamp: int - :param timestamp: Epoch time in milliseconds from when the event began - - :type latency: int - :param latency: The time in milliseconds to complete the client call - - :type attempts: list - :param attempts: The list of APICallAttempts associated to the - APICall - - :type retries_exceeded: bool - :param retries_exceeded: True if API call exceeded retries. False - otherwise - """ - super().__init__( - service=service, operation=operation, timestamp=timestamp - ) - self.latency = latency - self.attempts = attempts - if attempts is None: - self.attempts = [] - self.retries_exceeded = retries_exceeded - - def new_api_call_attempt(self, timestamp): - """Instantiates APICallAttemptEvent associated to the APICallEvent - - :type timestamp: int - :param timestamp: Epoch time in milliseconds to associate to the - APICallAttemptEvent - """ - attempt_event = APICallAttemptEvent( - service=self.service, operation=self.operation, timestamp=timestamp - ) - self.attempts.append(attempt_event) - return attempt_event - - -class APICallAttemptEvent(BaseMonitorEvent): - def __init__( - self, - service, - operation, - timestamp, - latency=None, - url=None, - http_status_code=None, - request_headers=None, - response_headers=None, - parsed_error=None, - wire_exception=None, - ): - """Monitor event for a single API call attempt - - This event corresponds to a single HTTP request attempt in completing - the entire client method call. - - :type service: str - :param service: A string identifying the service associated to - the event - - :type operation: str - :param operation: A string identifying the operation of service - associated to the event - - :type timestamp: int - :param timestamp: Epoch time in milliseconds from when the HTTP request - started - - :type latency: int - :param latency: The time in milliseconds to complete the HTTP request - whether it succeeded or failed - - :type url: str - :param url: The URL the attempt was sent to - - :type http_status_code: int - :param http_status_code: The HTTP status code of the HTTP response - if there was a response - - :type request_headers: dict - :param request_headers: The HTTP headers sent in making the HTTP - request - - :type response_headers: dict - :param response_headers: The HTTP headers returned in the HTTP response - if there was a response - - :type parsed_error: dict - :param parsed_error: The error parsed if the service returned an - error back - - :type wire_exception: Exception - :param wire_exception: The exception raised in sending the HTTP - request (i.e. ConnectionError) - """ - super().__init__( - service=service, operation=operation, timestamp=timestamp - ) - self.latency = latency - self.url = url - self.http_status_code = http_status_code - self.request_headers = request_headers - self.response_headers = response_headers - self.parsed_error = parsed_error - self.wire_exception = wire_exception - - -class CSMSerializer: - _MAX_CLIENT_ID_LENGTH = 255 - _MAX_EXCEPTION_CLASS_LENGTH = 128 - _MAX_ERROR_CODE_LENGTH = 128 - _MAX_USER_AGENT_LENGTH = 256 - _MAX_MESSAGE_LENGTH = 512 - _RESPONSE_HEADERS_TO_EVENT_ENTRIES = { - 'x-amzn-requestid': 'XAmznRequestId', - 'x-amz-request-id': 'XAmzRequestId', - 'x-amz-id-2': 'XAmzId2', - } - _AUTH_REGEXS = { - 'v4': re.compile( - r'AWS4-HMAC-SHA256 ' - r'Credential=(?P\w+)/\d+/' - r'(?P[a-z0-9-]+)/' - ), - 's3': re.compile(r'AWS (?P\w+):'), - } - _SERIALIZEABLE_EVENT_PROPERTIES = [ - 'service', - 'operation', - 'timestamp', - 'attempts', - 'latency', - 'retries_exceeded', - 'url', - 'request_headers', - 'http_status_code', - 'response_headers', - 'parsed_error', - 'wire_exception', - ] - - def __init__(self, csm_client_id): - """Serializes monitor events to CSM (Client Side Monitoring) format - - :type csm_client_id: str - :param csm_client_id: The application identifier to associate - to the serialized events - """ - self._validate_client_id(csm_client_id) - self.csm_client_id = csm_client_id - - def _validate_client_id(self, csm_client_id): - if len(csm_client_id) > self._MAX_CLIENT_ID_LENGTH: - raise ValueError( - f'The value provided for csm_client_id: {csm_client_id} exceeds ' - f'the maximum length of {self._MAX_CLIENT_ID_LENGTH} characters' - ) - - def serialize(self, event): - """Serializes a monitor event to the CSM format - - :type event: BaseMonitorEvent - :param event: The event to serialize to bytes - - :rtype: bytes - :returns: The CSM serialized form of the event - """ - event_dict = self._get_base_event_dict(event) - event_type = self._get_event_type(event) - event_dict['Type'] = event_type - for attr in self._SERIALIZEABLE_EVENT_PROPERTIES: - value = getattr(event, attr, None) - if value is not None: - getattr(self, '_serialize_' + attr)( - value, event_dict, event_type=event_type - ) - return ensure_bytes(json.dumps(event_dict, separators=(',', ':'))) - - def _get_base_event_dict(self, event): - return { - 'Version': 1, - 'ClientId': self.csm_client_id, - } - - def _serialize_service(self, service, event_dict, **kwargs): - event_dict['Service'] = service - - def _serialize_operation(self, operation, event_dict, **kwargs): - event_dict['Api'] = operation - - def _serialize_timestamp(self, timestamp, event_dict, **kwargs): - event_dict['Timestamp'] = timestamp - - def _serialize_attempts(self, attempts, event_dict, **kwargs): - event_dict['AttemptCount'] = len(attempts) - if attempts: - self._add_fields_from_last_attempt(event_dict, attempts[-1]) - - def _add_fields_from_last_attempt(self, event_dict, last_attempt): - if last_attempt.request_headers: - # It does not matter which attempt to use to grab the region - # for the ApiCall event, but SDKs typically do the last one. - region = self._get_region(last_attempt.request_headers) - if region is not None: - event_dict['Region'] = region - event_dict['UserAgent'] = self._get_user_agent( - last_attempt.request_headers - ) - if last_attempt.http_status_code is not None: - event_dict['FinalHttpStatusCode'] = last_attempt.http_status_code - if last_attempt.parsed_error is not None: - self._serialize_parsed_error( - last_attempt.parsed_error, event_dict, 'ApiCall' - ) - if last_attempt.wire_exception is not None: - self._serialize_wire_exception( - last_attempt.wire_exception, event_dict, 'ApiCall' - ) - - def _serialize_latency(self, latency, event_dict, event_type): - if event_type == 'ApiCall': - event_dict['Latency'] = latency - elif event_type == 'ApiCallAttempt': - event_dict['AttemptLatency'] = latency - - def _serialize_retries_exceeded( - self, retries_exceeded, event_dict, **kwargs - ): - event_dict['MaxRetriesExceeded'] = 1 if retries_exceeded else 0 - - def _serialize_url(self, url, event_dict, **kwargs): - event_dict['Fqdn'] = urlparse(url).netloc - - def _serialize_request_headers( - self, request_headers, event_dict, **kwargs - ): - event_dict['UserAgent'] = self._get_user_agent(request_headers) - if self._is_signed(request_headers): - event_dict['AccessKey'] = self._get_access_key(request_headers) - region = self._get_region(request_headers) - if region is not None: - event_dict['Region'] = region - if 'X-Amz-Security-Token' in request_headers: - event_dict['SessionToken'] = request_headers[ - 'X-Amz-Security-Token' - ] - - def _serialize_http_status_code( - self, http_status_code, event_dict, **kwargs - ): - event_dict['HttpStatusCode'] = http_status_code - - def _serialize_response_headers( - self, response_headers, event_dict, **kwargs - ): - for header, entry in self._RESPONSE_HEADERS_TO_EVENT_ENTRIES.items(): - if header in response_headers: - event_dict[entry] = response_headers[header] - - def _serialize_parsed_error( - self, parsed_error, event_dict, event_type, **kwargs - ): - field_prefix = 'Final' if event_type == 'ApiCall' else '' - event_dict[field_prefix + 'AwsException'] = self._truncate( - parsed_error['Code'], self._MAX_ERROR_CODE_LENGTH - ) - event_dict[field_prefix + 'AwsExceptionMessage'] = self._truncate( - parsed_error['Message'], self._MAX_MESSAGE_LENGTH - ) - - def _serialize_wire_exception( - self, wire_exception, event_dict, event_type, **kwargs - ): - field_prefix = 'Final' if event_type == 'ApiCall' else '' - event_dict[field_prefix + 'SdkException'] = self._truncate( - wire_exception.__class__.__name__, self._MAX_EXCEPTION_CLASS_LENGTH - ) - event_dict[field_prefix + 'SdkExceptionMessage'] = self._truncate( - str(wire_exception), self._MAX_MESSAGE_LENGTH - ) - - def _get_event_type(self, event): - if isinstance(event, APICallEvent): - return 'ApiCall' - elif isinstance(event, APICallAttemptEvent): - return 'ApiCallAttempt' - - def _get_access_key(self, request_headers): - auth_val = self._get_auth_value(request_headers) - _, auth_match = self._get_auth_match(auth_val) - return auth_match.group('access_key') - - def _get_region(self, request_headers): - if not self._is_signed(request_headers): - return None - auth_val = self._get_auth_value(request_headers) - signature_version, auth_match = self._get_auth_match(auth_val) - if signature_version != 'v4': - return None - return auth_match.group('signing_region') - - def _get_user_agent(self, request_headers): - return self._truncate( - ensure_unicode(request_headers.get('User-Agent', '')), - self._MAX_USER_AGENT_LENGTH, - ) - - def _is_signed(self, request_headers): - return 'Authorization' in request_headers - - def _get_auth_value(self, request_headers): - return ensure_unicode(request_headers['Authorization']) - - def _get_auth_match(self, auth_val): - for signature_version, regex in self._AUTH_REGEXS.items(): - match = regex.match(auth_val) - if match: - return signature_version, match - return None, None - - def _truncate(self, text, max_length): - if len(text) > max_length: - logger.debug( - 'Truncating following value to maximum length of ' '%s: %s', - text, - max_length, - ) - return text[:max_length] - return text - - -class SocketPublisher: - _MAX_MONITOR_EVENT_LENGTH = 8 * 1024 - - def __init__(self, socket, host, port, serializer): - """Publishes monitor events to a socket - - :type socket: socket.socket - :param socket: The socket object to use to publish events - - :type host: string - :param host: The host to send events to - - :type port: integer - :param port: The port on the host to send events to - - :param serializer: The serializer to use to serialize the event - to a form that can be published to the socket. This must - have a `serialize()` method that accepts a monitor event - and return bytes - """ - self._socket = socket - self._address = (host, port) - self._serializer = serializer - - def publish(self, event): - """Publishes a specified monitor event - - :type event: BaseMonitorEvent - :param event: The monitor event to be sent - over the publisher's socket to the desired address. - """ - serialized_event = self._serializer.serialize(event) - if len(serialized_event) > self._MAX_MONITOR_EVENT_LENGTH: - logger.debug( - 'Serialized event of size %s exceeds the maximum length ' - 'allowed: %s. Not sending event to socket.', - len(serialized_event), - self._MAX_MONITOR_EVENT_LENGTH, - ) - return - self._socket.sendto(serialized_event, self._address) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/packaging/__init__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/packaging/__init__.py deleted file mode 100644 index 3c50c5dcfeeda2efed282200a5c5cc8c5f7542f7..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/packaging/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -from .__about__ import ( - __author__, - __copyright__, - __email__, - __license__, - __summary__, - __title__, - __uri__, - __version__, -) - -__all__ = [ - "__title__", - "__summary__", - "__uri__", - "__version__", - "__author__", - "__email__", - "__license__", - "__copyright__", -] diff --git a/spaces/Boilin/URetinex-Net/test.py b/spaces/Boilin/URetinex-Net/test.py deleted file mode 100644 index f4c4aa52fed3a2435a9ba63adbc1131c49bd5def..0000000000000000000000000000000000000000 --- a/spaces/Boilin/URetinex-Net/test.py +++ /dev/null @@ -1,200 +0,0 @@ -import argparse -import torch -import torch.nn as nn -from network.Math_Module import P, Q -from network.decom import Decom -import os -#import torchvision -import torchvision.transforms as transforms -from PIL import Image -import time -from utils import * -import cv2 - -def one2three(x): - return torch.cat([x, x, x], dim=1).to(x) - - -class Inference(nn.Module): - def __init__(self, opts): - super().__init__() - self.opts = opts - # loading decomposition model - self.model_Decom_low = Decom() - self.model_Decom_low = load_initialize(self.model_Decom_low, - self.opts.Decom_model_low_path) - # loading R; old_model_opts; and L model - self.unfolding_opts, self.model_R, self.model_L = load_unfolding( - self.opts.unfolding_model_path) - # loading adjustment model - self.adjust_model = load_adjustment(self.opts.adjust_model_path) - self.P = P() - self.Q = Q() - transform = [ - transforms.ToTensor(), - ] - self.transform = transforms.Compose(transform) - print(self.model_Decom_low) - print(self.model_R) - print(self.model_L) - print(self.adjust_model) - #time.sleep(8) - - def unfolding(self, input_low_img): - for t in range(self.unfolding_opts.round): - if t == 0: # initialize R0, L0 - P, Q = self.model_Decom_low(input_low_img) - else: # update P and Q - w_p = (self.unfolding_opts.gamma + - self.unfolding_opts.Roffset * t) - w_q = (self.unfolding_opts.lamda + - self.unfolding_opts.Loffset * t) - P = self.P(I=input_low_img, Q=Q, R=R, gamma=w_p) - Q = self.Q(I=input_low_img, P=P, L=L, lamda=w_q) - R = self.model_R(r=P, l=Q) - L = self.model_L(l=Q) - return R, L - - def lllumination_adjust(self, L, ratio): - ratio = torch.ones(L.shape) * self.opts.ratio - return self.adjust_model(l=L, alpha=ratio) - - def forward(self, input_low_img): - # if not torch.cuda.is_available(): - # input_low_img = input_low_img.cuda() - with torch.no_grad(): - start = time.time() - R, L = self.unfolding(input_low_img) - High_L = self.lllumination_adjust(L, self.opts.ratio) - I_enhance = High_L * R - p_time = (time.time() - start) - return I_enhance, p_time - - def run(self, low_img_path): - file_name = os.path.basename(self.opts.img_path) - name = file_name.split('.')[0] - low_img = self.transform(Image.open(low_img_path)).unsqueeze(0) - -# print('**************************************************************************') -# print(low_img) -# print(type(low_img)) -# print(type(Image.open(low_img_path))) -# print(Image.open(low_img_path)) - - enhance, p_time = self.forward(input_low_img=low_img) - if not os.path.exists(self.opts.output): - os.makedirs(self.opts.output) - save_path = os.path.join( - self.opts.output, - file_name.replace(name, - "%s_%d_URetinexNet" % (name, self.opts.ratio))) - np_save_TensorImg(enhance, save_path) - print( - "================================= time for %s: %f============================" - % (file_name, p_time)) - - - - # 这是我自己修改的 run 函数 - # 避免了把图片储存到硬盘上面 - # 后续也可以修改把图片储存到硬盘上面 - def runForWeb(self, image): - # 首先对输入的图片进行下采样直到符合最低运行像素限制 - max_pixel_limit=600*600 - pyr_down_times=0 - while True: - a=len(image) - b=len(image[0]) - c=a*b - if(c<=max_pixel_limit): - break - pyr_down_times+=1 - image=cv2.pyrDown(image) - - print(image.shape) - # 输入 - low_img = self.transform(Image.fromarray(np.uint8(image))).unsqueeze(0) - - - # low_img=Image.fromarray(image.astype('uint8')).convert('RGB') - # print('#############################################') - # print(type(low_img)) - # print(low_img) - - - # 训练 - enhance, p_time = self.forward(input_low_img=low_img) - - # print('UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU') - - # 输出 - # 这里需要修改一下 utils.py 的结果放回函数,参考上面 run 函数 np_save_TensorImg 这里需要修改一下的位置 - # 退训练结果进行上采样,还原原图大小 - result_image=result_for_gradio(enhance) - for i in range(pyr_down_times): - result_image=cv2.pyrUp(result_image) - # return result_for_gradio(enhance) - print(result_image.shape) - return result_image - - -# 这是提供给 gradio 框架调用的接口 -# gradio 框架负责提供后端操控和前端的页面展示 -def functionForGradio(image): - parser = argparse.ArgumentParser(description='Configure') - # specify your data path here! - parser.add_argument('--img_path', type=str, default="./demo/input/3.png") - parser.add_argument('--output', type=str, default="./demo/output") - # ratio are recommended to be 3-5, bigger ratio will lead to over-exposure - parser.add_argument('--ratio', type=int, default=5) - # model path - parser.add_argument('--Decom_model_low_path', - type=str, - default="./ckpt/init_low.pth") - parser.add_argument('--unfolding_model_path', - type=str, - default="./ckpt/unfolding.pth") - parser.add_argument('--adjust_model_path', - type=str, - default="./ckpt/L_adjust.pth") - parser.add_argument('--gpu_id', type=int, default=0) - - opts = parser.parse_args() - for k, v in vars(opts).items(): - print(k, v) - - os.environ['CUDA_VISIBLE_DEVICES'] = str(opts.gpu_id) - model = Inference(opts) - - # 这里传入 numpy 数组然后开始训练 - return model.runForWeb(image) - - -# 这是算法本来的主函数,上面提供的 gradio 框架调用的接口就是修改自主函数 - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Configure') - # specify your data path here! - parser.add_argument('--img_path', type=str, default="./demo/input/test3.jpg") - parser.add_argument('--output', type=str, default="./demo/output") - # ratio are recommended to be 3-5, bigger ratio will lead to over-exposure - parser.add_argument('--ratio', type=int, default=5) - # model path - parser.add_argument('--Decom_model_low_path', - type=str, - default="./ckpt/init_low.pth") - parser.add_argument('--unfolding_model_path', - type=str, - default="./ckpt/unfolding.pth") - parser.add_argument('--adjust_model_path', - type=str, - default="./ckpt/L_adjust.pth") - parser.add_argument('--gpu_id', type=int, default=0) - - opts = parser.parse_args() - for k, v in vars(opts).items(): - print(k, v) - - os.environ['CUDA_VISIBLE_DEVICES'] = str(opts.gpu_id) - model = Inference(opts) - model.run(opts.img_path) diff --git a/spaces/CVPR/BigDL-Nano_inference/app.py b/spaces/CVPR/BigDL-Nano_inference/app.py deleted file mode 100644 index 468066d5d078715f3cf3dfdda952a39ac233ec50..0000000000000000000000000000000000000000 --- a/spaces/CVPR/BigDL-Nano_inference/app.py +++ /dev/null @@ -1,193 +0,0 @@ -# -# Copyright 2016 The BigDL Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Part of the code in this file is adapted from -# https://github.com/rnwzd/FSPBT-Image-Translation/blob/master/eval.py and -# https://github.com/rnwzd/FSPBT-Image-Translation/blob/master/train.py - -# MIT License - -# Copyright (c) 2022 Lorenzo Breschi - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -import gradio as gr -import numpy as np -import time -from data import write_image_tensor, PatchDataModule, prepare_data, image2tensor, tensor2image -import torch -from tqdm import tqdm -from bigdl.nano.pytorch import InferenceOptimizer -from torch.utils.data import DataLoader -from pathlib import Path -from torch.utils.data import Dataset -import datetime -import huggingface_hub - - -device = 'cpu' -dtype = torch.float32 -MODEL_REPO = 'CVPR/FSPBT' -ckpt_path = huggingface_hub.hf_hub_download( - MODEL_REPO, 'generator.pt') -generator = torch.load(ckpt_path) -generator.eval() -generator.to(device, dtype) -params = {'batch_size': 1, - 'num_workers': 0} - - -class ImageDataset(Dataset): - def __init__(self, img): - self.imgs = [image2tensor(img)] - def __getitem__(self, idx: int) -> dict: - return self.imgs[idx] - - def __len__(self) -> int: - return len(self.imgs) - - -data_path = Path('data') -train_image_dd = prepare_data(data_path) -dm = PatchDataModule(train_image_dd, patch_size=2**6, - batch_size=2**3, patch_num=2**6) - -# quantize model -train_loader = dm.train_dataloader() -train_loader_iter = iter(train_loader) -quantized_model = InferenceOptimizer.quantize(generator, - accelerator=None, - calib_dataloader=train_loader) - - -def original_transfer(input_img): - w, h, _ = input_img.shape - print(datetime.datetime.now()) - print("input size: ", w, h) - # resize too large image - if w > 3000 or h > 3000: - ratio = min(3000 / w, 3000 / h) - w = int(w * ratio) - h = int(h * ratio) - if w % 4 != 0 or h % 4 != 0: - NW = int((w // 4) * 4) - NH = int((h // 4) * 4) - input_img = np.resize(input_img,(NW,NH,3)) - st = time.perf_counter() - dataset = ImageDataset(input_img) - loader = DataLoader(dataset, **params) - with torch.no_grad(): - for inputs in tqdm(loader): - inputs = inputs.to(device, dtype) - st = time.perf_counter() - outputs = generator(inputs) - ori_time = time.perf_counter() - st - ori_time = "{:.3f}s".format(ori_time) - ori_image = np.array(tensor2image(outputs[0])) - del inputs - del outputs - return ori_image, ori_time - -def nano_transfer(input_img): - w, h, _ = input_img.shape - print(datetime.datetime.now()) - print("input size: ", w, h) - # resize too large image - if w > 3000 or h > 3000: - ratio = min(3000 / w, 3000 / h) - w = int(w * ratio) - h = int(h * ratio) - if w % 4 != 0 or h % 4 != 0: - NW = int((w // 4) * 4) - NH = int((h // 4) * 4) - input_img = np.resize(input_img,(NW,NH,3)) - st = time.perf_counter() - dataset = ImageDataset(input_img) - loader = DataLoader(dataset, **params) - with torch.no_grad(): - for inputs in tqdm(loader): - inputs = inputs.to(device, dtype) - st = time.perf_counter() - outputs = quantized_model(inputs) - nano_time = time.perf_counter() - st - nano_time = "{:.3f}s".format(nano_time) - nano_image = np.array(tensor2image(outputs[0])) - del inputs - del outputs - return nano_image, nano_time - - -def clear(): - return None, None, None, None - - -demo = gr.Blocks() - -with demo: - gr.Markdown("

    BigDL-Nano inference demo

    ") - with gr.Row().style(equal_height=False): - with gr.Column(): - gr.Markdown(''' -

    Overview

    - - BigDL-Nano is a library in [BigDL 2.0](https://github.com/intel-analytics/BigDL) that allows the users to transparently accelerate their deep learning pipelines (including data processing, training and inference) by automatically integrating optimized libraries, best-known configurations, and software optimizations.

    - - The video on the right shows how the user can easily enable quantization using BigDL-Nano (with just a couple of lines of code); you may refer to our [CVPR 2022 demo paper](https://arxiv.org/abs/2204.01715) for more details. - ''') - with gr.Column(): - gr.Video(value="data/nano_quantize_api.mp4") - gr.Markdown(''' -

    Demo

    - - This section uses an image stylization example to demostrate the speedup of the above code when using quantization in BigDL-Nano (about 2~3x inference time speedup). - The demo is adapted from the original [FSPBT-Image-Translation code](https://github.com/rnwzd/FSPBT-Image-Translation), - and the default image is from [the COCO dataset](https://cocodataset.org/#home). - ''') - with gr.Row().style(equal_height=False): - input_img = gr.Image(label="input image", value="data/COCO_image.jpg", source="upload") - with gr.Column(): - ori_but = gr.Button("Standard PyTorch") - nano_but = gr.Button("BigDL-Nano") - clear_but = gr.Button("Clear Output") - with gr.Row().style(equal_height=False): - with gr.Column(): - ori_time = gr.Text(label="Standard PyTorch latency") - ori_image = gr.Image(label="Standard PyTorch output image") - with gr.Column(): - nano_time = gr.Text(label="BigDL-Nano latency") - nano_image = gr.Image(label="BigDL-Nano output image") - - ori_but.click(original_transfer, inputs=input_img, outputs=[ori_image, ori_time]) - nano_but.click(nano_transfer, inputs=input_img, outputs=[nano_image, nano_time]) - clear_but.click(clear, inputs=None, outputs=[ori_image, ori_time, nano_image, nano_time]) - - -demo.launch(share=True, enable_queue=True) \ No newline at end of file diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/backbone/fpn.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/backbone/fpn.py deleted file mode 100644 index 7b967318f63421e71613154565bd5f8f7d9b8312..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/backbone/fpn.py +++ /dev/null @@ -1,245 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import math -import fvcore.nn.weight_init as weight_init -import torch.nn.functional as F -from torch import nn - -from detectron2.layers import Conv2d, ShapeSpec, get_norm - -from .backbone import Backbone -from .build import BACKBONE_REGISTRY -from .resnet import build_resnet_backbone - -__all__ = ["build_resnet_fpn_backbone", "build_retinanet_resnet_fpn_backbone", "FPN"] - - -class FPN(Backbone): - """ - This module implements Feature Pyramid Network. - It creates pyramid features built on top of some input feature maps. - """ - - def __init__( - self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum" - ): - """ - Args: - bottom_up (Backbone): module representing the bottom up subnetwork. - Must be a subclass of :class:`Backbone`. The multi-scale feature - maps generated by the bottom up network, and listed in `in_features`, - are used to generate FPN levels. - in_features (list[str]): names of the input feature maps coming - from the backbone to which FPN is attached. For example, if the - backbone produces ["res2", "res3", "res4"], any *contiguous* sublist - of these may be used; order must be from high to low resolution. - out_channels (int): number of channels in the output feature maps. - norm (str): the normalization to use. - top_block (nn.Module or None): if provided, an extra operation will - be performed on the output of the last (smallest resolution) - FPN output, and the result will extend the result list. The top_block - further downsamples the feature map. It must have an attribute - "num_levels", meaning the number of extra FPN levels added by - this block, and "in_feature", which is a string representing - its input feature (e.g., p5). - fuse_type (str): types for fusing the top down features and the lateral - ones. It can be "sum" (default), which sums up element-wise; or "avg", - which takes the element-wise mean of the two. - """ - super(FPN, self).__init__() - assert isinstance(bottom_up, Backbone) - - # Feature map strides and channels from the bottom up network (e.g. ResNet) - input_shapes = bottom_up.output_shape() - in_strides = [input_shapes[f].stride for f in in_features] - in_channels = [input_shapes[f].channels for f in in_features] - - _assert_strides_are_log2_contiguous(in_strides) - lateral_convs = [] - output_convs = [] - - use_bias = norm == "" - for idx, in_channels in enumerate(in_channels): - lateral_norm = get_norm(norm, out_channels) - output_norm = get_norm(norm, out_channels) - - lateral_conv = Conv2d( - in_channels, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm - ) - output_conv = Conv2d( - out_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1, - bias=use_bias, - norm=output_norm, - ) - weight_init.c2_xavier_fill(lateral_conv) - weight_init.c2_xavier_fill(output_conv) - stage = int(math.log2(in_strides[idx])) - self.add_module("fpn_lateral{}".format(stage), lateral_conv) - self.add_module("fpn_output{}".format(stage), output_conv) - - lateral_convs.append(lateral_conv) - output_convs.append(output_conv) - # Place convs into top-down order (from low to high resolution) - # to make the top-down computation in forward clearer. - self.lateral_convs = lateral_convs[::-1] - self.output_convs = output_convs[::-1] - self.top_block = top_block - self.in_features = in_features - self.bottom_up = bottom_up - # Return feature names are "p", like ["p2", "p3", ..., "p6"] - self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in in_strides} - # top block output feature maps. - if self.top_block is not None: - for s in range(stage, stage + self.top_block.num_levels): - self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1) - - self._out_features = list(self._out_feature_strides.keys()) - self._out_feature_channels = {k: out_channels for k in self._out_features} - self._size_divisibility = in_strides[-1] - assert fuse_type in {"avg", "sum"} - self._fuse_type = fuse_type - - @property - def size_divisibility(self): - return self._size_divisibility - - def forward(self, x): - """ - Args: - input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to - feature map tensor for each feature level in high to low resolution order. - - Returns: - dict[str->Tensor]: - mapping from feature map name to FPN feature map tensor - in high to low resolution order. Returned feature names follow the FPN - paper convention: "p", where stage has stride = 2 ** stage e.g., - ["p2", "p3", ..., "p6"]. - """ - # Reverse feature maps into top-down order (from low to high resolution) - bottom_up_features = self.bottom_up(x) - x = [bottom_up_features[f] for f in self.in_features[::-1]] - results = [] - prev_features = self.lateral_convs[0](x[0]) - results.append(self.output_convs[0](prev_features)) - for features, lateral_conv, output_conv in zip( - x[1:], self.lateral_convs[1:], self.output_convs[1:] - ): - top_down_features = F.interpolate(prev_features, scale_factor=2, mode="nearest") - lateral_features = lateral_conv(features) - prev_features = lateral_features + top_down_features - if self._fuse_type == "avg": - prev_features /= 2 - results.insert(0, output_conv(prev_features)) - - if self.top_block is not None: - top_block_in_feature = bottom_up_features.get(self.top_block.in_feature, None) - if top_block_in_feature is None: - top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)] - results.extend(self.top_block(top_block_in_feature)) - assert len(self._out_features) == len(results) - return dict(zip(self._out_features, results)) - - def output_shape(self): - return { - name: ShapeSpec( - channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] - ) - for name in self._out_features - } - - -def _assert_strides_are_log2_contiguous(strides): - """ - Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2". - """ - for i, stride in enumerate(strides[1:], 1): - assert stride == 2 * strides[i - 1], "Strides {} {} are not log2 contiguous".format( - stride, strides[i - 1] - ) - - -class LastLevelMaxPool(nn.Module): - """ - This module is used in the original FPN to generate a downsampled - P6 feature from P5. - """ - - def __init__(self): - super().__init__() - self.num_levels = 1 - self.in_feature = "p5" - - def forward(self, x): - return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)] - - -class LastLevelP6P7(nn.Module): - """ - This module is used in RetinaNet to generate extra layers, P6 and P7 from - C5 feature. - """ - - def __init__(self, in_channels, out_channels, in_feature="res5"): - super().__init__() - self.num_levels = 2 - self.in_feature = in_feature - self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) - self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) - for module in [self.p6, self.p7]: - weight_init.c2_xavier_fill(module) - - def forward(self, c5): - p6 = self.p6(c5) - p7 = self.p7(F.relu(p6)) - return [p6, p7] - - -@BACKBONE_REGISTRY.register() -def build_resnet_fpn_backbone(cfg, input_shape: ShapeSpec): - """ - Args: - cfg: a detectron2 CfgNode - - Returns: - backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. - """ - bottom_up = build_resnet_backbone(cfg, input_shape) - in_features = cfg.MODEL.FPN.IN_FEATURES - out_channels = cfg.MODEL.FPN.OUT_CHANNELS - backbone = FPN( - bottom_up=bottom_up, - in_features=in_features, - out_channels=out_channels, - norm=cfg.MODEL.FPN.NORM, - top_block=LastLevelMaxPool(), - fuse_type=cfg.MODEL.FPN.FUSE_TYPE, - ) - return backbone - - -@BACKBONE_REGISTRY.register() -def build_retinanet_resnet_fpn_backbone(cfg, input_shape: ShapeSpec): - """ - Args: - cfg: a detectron2 CfgNode - - Returns: - backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. - """ - bottom_up = build_resnet_backbone(cfg, input_shape) - in_features = cfg.MODEL.FPN.IN_FEATURES - out_channels = cfg.MODEL.FPN.OUT_CHANNELS - in_channels_p6p7 = bottom_up.output_shape()["res5"].channels - backbone = FPN( - bottom_up=bottom_up, - in_features=in_features, - out_channels=out_channels, - norm=cfg.MODEL.FPN.NORM, - top_block=LastLevelP6P7(in_channels_p6p7, out_channels), - fuse_type=cfg.MODEL.FPN.FUSE_TYPE, - ) - return backbone diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/adjacent_difference.h b/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/adjacent_difference.h deleted file mode 100644 index 648ddba3e9bea6bf2f7b4c7a8b1b8fc330ac1818..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/adjacent_difference.h +++ /dev/null @@ -1,540 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ -#pragma once - - -#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace thrust -{ - -template -__host__ __device__ OutputIterator -adjacent_difference( - const thrust::detail::execution_policy_base &exec, - InputIterator first, - InputIterator last, - OutputIterator result, - BinaryFunction binary_op); - -namespace cuda_cub { - -namespace __adjacent_difference { - - namespace mpl = thrust::detail::mpl::math; - - template - struct PtxPolicy - { - enum - { - BLOCK_THREADS = _BLOCK_THREADS, - ITEMS_PER_THREAD = _ITEMS_PER_THREAD, - ITEMS_PER_TILE = BLOCK_THREADS * ITEMS_PER_THREAD - }; - - static const cub::BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; - static const cub::CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; - static const cub::BlockStoreAlgorithm STORE_ALGORITHM = _STORE_ALGORITHM; - }; - - template - struct items_per_thread - { - enum - { - value = (INPUT_SIZE <= 8) - ? NOMINAL_4B_ITEMS_PER_THREAD - : mpl::min< - int, - NOMINAL_4B_ITEMS_PER_THREAD, - mpl::max::value>::value - }; - }; - - template - struct Tuning; - - template - struct Tuning - { - enum - { - INPUT_SIZE = sizeof(T), - NOMINAL_4B_ITEMS_PER_THREAD = 7, - ITEMS_PER_THREAD = items_per_thread::value - }; - typedef PtxPolicy<128, - ITEMS_PER_THREAD, - cub::BLOCK_LOAD_WARP_TRANSPOSE, - cub::LOAD_DEFAULT, - cub::BLOCK_STORE_WARP_TRANSPOSE> - type; - }; - template - struct Tuning : Tuning - { - enum - { - NOMINAL_4B_ITEMS_PER_THREAD = 7, - ITEMS_PER_THREAD = items_per_thread::value - }; - typedef PtxPolicy<128, - ITEMS_PER_THREAD, - cub::BLOCK_LOAD_WARP_TRANSPOSE, - cub::LOAD_LDG, - cub::BLOCK_STORE_WARP_TRANSPOSE> - type; - }; - - template - struct AdjacentDifferenceAgent - { - typedef typename iterator_traits::value_type input_type; - - // XXX output type must be result of BinaryOp(input_type,input_type); - typedef input_type output_type; - - template - struct PtxPlan : Tuning::type - { - typedef Tuning tuning; - - typedef typename core::LoadIterator::type LoadIt; - typedef typename core::BlockLoad::type BlockLoad; - - typedef typename core::BlockStore::type - BlockStore; - - typedef cub::BlockAdjacentDifference - BlockAdjacentDifference; - - union TempStorage - { - typename BlockAdjacentDifference::TempStorage discontinuity; - typename BlockLoad::TempStorage load; - typename BlockStore::TempStorage store; - }; // union TempStorage - }; // struct PtxPlan - - typedef typename core::specialize_plan_msvc10_war::type::type ptx_plan; - - typedef typename ptx_plan::LoadIt LoadIt; - typedef typename ptx_plan::BlockLoad BlockLoad; - typedef typename ptx_plan::BlockStore BlockStore; - typedef typename ptx_plan::BlockAdjacentDifference BlockAdjacentDifference; - typedef typename ptx_plan::TempStorage TempStorage; - - - enum - { - ITEMS_PER_THREAD = ptx_plan::ITEMS_PER_THREAD, - BLOCK_THREADS = ptx_plan::BLOCK_THREADS, - ITEMS_PER_TILE = ptx_plan::ITEMS_PER_TILE, - }; - - struct impl - { - - //--------------------------------------------------------------------- - // Per-thread fields - //--------------------------------------------------------------------- - - TempStorage &temp_storage; - LoadIt load_it; // iterator to the first element - input_type * first_tile_previous; // iterator to the first element of previous tile value - OutputIt output_it; - BinaryOp binary_op; - - template - void THRUST_DEVICE_FUNCTION - consume_tile_impl(int num_remaining, - int tile_idx, - Size tile_base) - { - input_type input[ITEMS_PER_THREAD]; - input_type input_prev[ITEMS_PER_THREAD]; - output_type output[ITEMS_PER_THREAD]; - - if (IS_LAST_TILE) - { - // Fill last elements with the first element - // because collectives are not suffix guarded - BlockLoad(temp_storage.load) - .Load(load_it + tile_base, - input, - num_remaining, - *(load_it + tile_base)); - } - else - { - BlockLoad(temp_storage.load).Load(load_it + tile_base, input); - } - - - core::sync_threadblock(); - - if (IS_FIRST_TILE) - { - BlockAdjacentDifference(temp_storage.discontinuity) - .FlagHeads(output, input, input_prev, binary_op); - if (threadIdx.x == 0) - output[0] = input[0]; - } - else - { - input_type tile_prev_input = first_tile_previous[tile_idx]; - BlockAdjacentDifference(temp_storage.discontinuity) - .FlagHeads(output, input, input_prev, binary_op, tile_prev_input); - } - - core::sync_threadblock(); - - if (IS_LAST_TILE) - { - BlockStore(temp_storage.store) - .Store(output_it + tile_base, output, num_remaining); - } - else - { - BlockStore(temp_storage.store).Store(output_it + tile_base, output); - } - } - - - template - void THRUST_DEVICE_FUNCTION - consume_tile(int num_remaining, - int tile_idx, - Size tile_base) - { - if (tile_idx == 0) - { - consume_tile_impl(num_remaining, - tile_idx, - tile_base); - } - else - { - consume_tile_impl(num_remaining, - tile_idx, - tile_base); - } - } - - void THRUST_DEVICE_FUNCTION - consume_range(Size num_items) - { - int tile_idx = blockIdx.x; - Size tile_base = static_cast(tile_idx) * ITEMS_PER_TILE; - Size num_remaining = num_items - tile_base; - - if (num_remaining > ITEMS_PER_TILE) // not a last tile - { - consume_tile(num_remaining, tile_idx, tile_base); - } - else if (num_remaining > 0) - { - consume_tile(num_remaining, tile_idx, tile_base); - } - } - - //--------------------------------------------------------------------- - // Constructor - //--------------------------------------------------------------------- - - THRUST_DEVICE_FUNCTION - impl(TempStorage &temp_storage_, - InputIt input_it_, - input_type * first_tile_previous_, - OutputIt result_, - BinaryOp binary_op_, - Size num_items) - : temp_storage(temp_storage_), - load_it(core::make_load_iterator(ptx_plan(), input_it_)), - first_tile_previous(first_tile_previous_), - output_it(result_), - binary_op(binary_op_) - { - consume_range(num_items); - } - }; // struct impl - - //--------------------------------------------------------------------- - // Agent entry point - //--------------------------------------------------------------------- - - THRUST_AGENT_ENTRY(InputIt first, - input_type *first_element, - OutputIt result, - BinaryOp binary_op, - Size num_items, - char * shmem) - { - TempStorage &storage = *reinterpret_cast(shmem); - impl(storage, first, first_element, result, binary_op, num_items); - } - }; // struct AdjacentDifferenceAgent - - template - struct InitAgent - { - template - struct PtxPlan : PtxPolicy<128> {}; - typedef core::specialize_plan ptx_plan; - - //--------------------------------------------------------------------- - // Agent entry point - //--------------------------------------------------------------------- - - THRUST_AGENT_ENTRY(InputIt first, - OutputIt result, - Size num_tiles, - int items_per_tile, - char * /*shmem*/) - { - int tile_idx = blockIdx.x * blockDim.x + threadIdx.x; - Size tile_base = static_cast(tile_idx) * items_per_tile; - if (tile_base > 0 && tile_idx < num_tiles) - result[tile_idx] = first[tile_base - 1]; - } - }; // struct InitAgent - - template - cudaError_t THRUST_RUNTIME_FUNCTION - doit_step(void * d_temp_storage, - size_t & temp_storage_bytes, - InputIt first, - OutputIt result, - BinaryOp binary_op, - Size num_items, - cudaStream_t stream, - bool debug_sync) - { - if (num_items == 0) - return cudaSuccess; - - using core::AgentPlan; - using core::AgentLauncher; - - cudaError_t status = cudaSuccess; - - typedef AgentLauncher< - AdjacentDifferenceAgent > - difference_agent; - - typedef typename iterator_traits::value_type input_type; - typedef AgentLauncher > init_agent; - - AgentPlan difference_plan = difference_agent::get_plan(stream); - AgentPlan init_plan = init_agent::get_plan(); - - - Size tile_size = difference_plan.items_per_tile; - Size num_tiles = (num_items + tile_size - 1) / tile_size; - - size_t tmp1 = num_tiles * sizeof(input_type); - size_t vshmem_size = core::vshmem_size(difference_plan.shared_memory_size, - num_tiles); - - size_t allocation_sizes[2] = {tmp1, vshmem_size}; - void * allocations[2] = {NULL, NULL}; - - status = core::alias_storage(d_temp_storage, - temp_storage_bytes, - allocations, - allocation_sizes); - CUDA_CUB_RET_IF_FAIL(status); - - if (d_temp_storage == NULL) - { - return status; - } - - input_type *first_tile_previous = (input_type *)allocations[0]; - char *vshmem_ptr = vshmem_size > 0 ? (char *)allocations[1] : NULL; - - init_agent ia(init_plan, num_tiles, stream, "adjacent_difference::init_agent", debug_sync); - ia.launch(first, first_tile_previous, num_tiles, tile_size); - CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError()); - - difference_agent da(difference_plan, num_items, stream, vshmem_ptr, "adjacent_difference::difference_agent", debug_sync); - da.launch(first, - first_tile_previous, - result, - binary_op, - num_items); - CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError()); - return status; - } - - template - OutputIt THRUST_RUNTIME_FUNCTION - adjacent_difference(execution_policy& policy, - InputIt first, - InputIt last, - OutputIt result, - BinaryOp binary_op) - { - typedef typename iterator_traits::difference_type size_type; - - size_type num_items = thrust::distance(first, last); - size_t storage_size = 0; - cudaStream_t stream = cuda_cub::stream(policy); - bool debug_sync = THRUST_DEBUG_SYNC_FLAG; - - cudaError_t status; - THRUST_INDEX_TYPE_DISPATCH(status, doit_step, num_items, - (NULL, storage_size, first, result, binary_op, - num_items_fixed, stream, debug_sync)); - cuda_cub::throw_on_error(status, "adjacent_difference failed on 1st step"); - - // Allocate temporary storage. - thrust::detail::temporary_array - tmp(policy, storage_size); - void *ptr = static_cast(tmp.data().get()); - - THRUST_INDEX_TYPE_DISPATCH(status, doit_step, num_items, - (ptr, storage_size, first, result, binary_op, - num_items_fixed, stream, debug_sync)); - cuda_cub::throw_on_error(status, "adjacent_difference failed on 2nd step"); - - status = cuda_cub::synchronize(policy); - cuda_cub::throw_on_error(status, "adjacent_difference failed to synchronize"); - - return result + num_items; - } - -} // namespace __adjacent_difference - -//------------------------- -// Thrust API entry points -//------------------------- - -__thrust_exec_check_disable__ -template -OutputIt __host__ __device__ -adjacent_difference(execution_policy &policy, - InputIt first, - InputIt last, - OutputIt result, - BinaryOp binary_op) -{ - OutputIt ret = result; - if (__THRUST_HAS_CUDART__) - { - ret = __adjacent_difference::adjacent_difference(policy, - first, - last, - result, - binary_op); - } - else - { -#if !__THRUST_HAS_CUDART__ - ret = thrust::adjacent_difference(cvt_to_seq(derived_cast(policy)), - first, - last, - result, - binary_op); -#endif - } - - return ret; -} - -template -OutputIt __host__ __device__ -adjacent_difference(execution_policy &policy, - InputIt first, - InputIt last, - OutputIt result) -{ - typedef typename iterator_traits::value_type input_type; - return cuda_cub::adjacent_difference(policy, - first, - last, - result, - minus()); -} - - -} // namespace cuda_cub -} // end namespace thrust - -// -#include -#include -#endif - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/set_operations.h b/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/set_operations.h deleted file mode 100644 index 421fa8a4bd955706497d0c9b30614035ccbbc46f..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/set_operations.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// this system inherits set_operations -#include - diff --git a/spaces/CVPR/WALT/mmdet/models/necks/bfp.py b/spaces/CVPR/WALT/mmdet/models/necks/bfp.py deleted file mode 100644 index 123f5515ab6b51867d5781aa1572a0810670235f..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/models/necks/bfp.py +++ /dev/null @@ -1,104 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule, xavier_init -from mmcv.cnn.bricks import NonLocal2d - -from ..builder import NECKS - - -@NECKS.register_module() -class BFP(nn.Module): - """BFP (Balanced Feature Pyramids) - - BFP takes multi-level features as inputs and gather them into a single one, - then refine the gathered feature and scatter the refined results to - multi-level features. This module is used in Libra R-CNN (CVPR 2019), see - the paper `Libra R-CNN: Towards Balanced Learning for Object Detection - `_ for details. - - Args: - in_channels (int): Number of input channels (feature maps of all levels - should have the same channels). - num_levels (int): Number of input feature levels. - conv_cfg (dict): The config dict for convolution layers. - norm_cfg (dict): The config dict for normalization layers. - refine_level (int): Index of integration and refine level of BSF in - multi-level features from bottom to top. - refine_type (str): Type of the refine op, currently support - [None, 'conv', 'non_local']. - """ - - def __init__(self, - in_channels, - num_levels, - refine_level=2, - refine_type=None, - conv_cfg=None, - norm_cfg=None): - super(BFP, self).__init__() - assert refine_type in [None, 'conv', 'non_local'] - - self.in_channels = in_channels - self.num_levels = num_levels - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - - self.refine_level = refine_level - self.refine_type = refine_type - assert 0 <= self.refine_level < self.num_levels - - if self.refine_type == 'conv': - self.refine = ConvModule( - self.in_channels, - self.in_channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg) - elif self.refine_type == 'non_local': - self.refine = NonLocal2d( - self.in_channels, - reduction=1, - use_scale=False, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg) - - def init_weights(self): - """Initialize the weights of FPN module.""" - for m in self.modules(): - if isinstance(m, nn.Conv2d): - xavier_init(m, distribution='uniform') - - def forward(self, inputs): - """Forward function.""" - assert len(inputs) == self.num_levels - - # step 1: gather multi-level features by resize and average - feats = [] - gather_size = inputs[self.refine_level].size()[2:] - for i in range(self.num_levels): - if i < self.refine_level: - gathered = F.adaptive_max_pool2d( - inputs[i], output_size=gather_size) - else: - gathered = F.interpolate( - inputs[i], size=gather_size, mode='nearest') - feats.append(gathered) - - bsf = sum(feats) / len(feats) - - # step 2: refine gathered features - if self.refine_type is not None: - bsf = self.refine(bsf) - - # step 3: scatter refined features to multi-levels by a residual path - outs = [] - for i in range(self.num_levels): - out_size = inputs[i].size()[2:] - if i < self.refine_level: - residual = F.interpolate(bsf, size=out_size, mode='nearest') - else: - residual = F.adaptive_max_pool2d(bsf, output_size=out_size) - outs.append(residual + inputs[i]) - - return tuple(outs) diff --git a/spaces/CVPR/winoground-explorer/app.py b/spaces/CVPR/winoground-explorer/app.py deleted file mode 100644 index 5bf7865ed324378302d611a36457ef81ddb32ace..0000000000000000000000000000000000000000 --- a/spaces/CVPR/winoground-explorer/app.py +++ /dev/null @@ -1,31 +0,0 @@ -from datasets import load_dataset -import gradio as gr -import os -import random - -auth_token = os.environ.get("token") -winoground = load_dataset("facebook/winoground", use_auth_token=auth_token)["test"] - -def func(index): - example = winoground[index] - return example["image_0"], example["caption_0"], example["image_1"], example["caption_1"] - -demo = gr.Blocks() - -with demo: - gr.Markdown("# Slide across the slider to see various examples from WinoGround") - - with gr.Column(): - slider = gr.Slider(minimum=0, maximum=400) - with gr.Row(): - index = random.choice(range(0, 400)) - with gr.Column(): - image_input_1 = gr.Image(value=winoground[index]["image_0"]) - text_input_1 = gr.Textbox(value=winoground[index]["caption_0"]) - with gr.Column(): - image_input_2 = gr.Image(value=winoground[index]["image_1"]) - text_input_2 = gr.Textbox(value=winoground[index]["caption_1"]) - - slider.change(func, inputs=[slider], outputs=[image_input_1, text_input_1, image_input_2, text_input_2]) - -demo.launch() \ No newline at end of file diff --git a/spaces/ChandraMohanNayal/AutoGPT/autogpt/commands/web_selenium.py b/spaces/ChandraMohanNayal/AutoGPT/autogpt/commands/web_selenium.py deleted file mode 100644 index 11bdfeb1f1630fc6ff6f55d68e8d7233281c5098..0000000000000000000000000000000000000000 --- a/spaces/ChandraMohanNayal/AutoGPT/autogpt/commands/web_selenium.py +++ /dev/null @@ -1,154 +0,0 @@ -"""Selenium web scraping module.""" -from __future__ import annotations - -import logging -from pathlib import Path -from sys import platform - -from bs4 import BeautifulSoup -from selenium import webdriver -from selenium.webdriver.chrome.options import Options as ChromeOptions -from selenium.webdriver.common.by import By -from selenium.webdriver.firefox.options import Options as FirefoxOptions -from selenium.webdriver.remote.webdriver import WebDriver -from selenium.webdriver.safari.options import Options as SafariOptions -from selenium.webdriver.support import expected_conditions as EC -from selenium.webdriver.support.wait import WebDriverWait -from webdriver_manager.chrome import ChromeDriverManager -from webdriver_manager.firefox import GeckoDriverManager - -import autogpt.processing.text as summary -from autogpt.config import Config -from autogpt.processing.html import extract_hyperlinks, format_hyperlinks - -FILE_DIR = Path(__file__).parent.parent -CFG = Config() - - -def browse_website(url: str, question: str) -> tuple[str, WebDriver]: - """Browse a website and return the answer and links to the user - - Args: - url (str): The url of the website to browse - question (str): The question asked by the user - - Returns: - Tuple[str, WebDriver]: The answer and links to the user and the webdriver - """ - driver, text = scrape_text_with_selenium(url) - add_header(driver) - summary_text = summary.summarize_text(url, text, question, driver) - links = scrape_links_with_selenium(driver, url) - - # Limit links to 5 - if len(links) > 5: - links = links[:5] - close_browser(driver) - return f"Answer gathered from website: {summary_text} \n \n Links: {links}", driver - - -def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]: - """Scrape text from a website using selenium - - Args: - url (str): The url of the website to scrape - - Returns: - Tuple[WebDriver, str]: The webdriver and the text scraped from the website - """ - logging.getLogger("selenium").setLevel(logging.CRITICAL) - - options_available = { - "chrome": ChromeOptions, - "safari": SafariOptions, - "firefox": FirefoxOptions, - } - - options = options_available[CFG.selenium_web_browser]() - options.add_argument( - "user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36" - ) - - if CFG.selenium_web_browser == "firefox": - driver = webdriver.Firefox( - executable_path=GeckoDriverManager().install(), options=options - ) - elif CFG.selenium_web_browser == "safari": - # Requires a bit more setup on the users end - # See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari - driver = webdriver.Safari(options=options) - else: - if platform == "linux" or platform == "linux2": - options.add_argument("--disable-dev-shm-usage") - options.add_argument("--remote-debugging-port=9222") - - options.add_argument("--no-sandbox") - if CFG.selenium_headless: - options.add_argument("--headless") - options.add_argument("--disable-gpu") - - driver = webdriver.Chrome( - executable_path=ChromeDriverManager().install(), options=options - ) - driver.get(url) - - WebDriverWait(driver, 10).until( - EC.presence_of_element_located((By.TAG_NAME, "body")) - ) - - # Get the HTML content directly from the browser's DOM - page_source = driver.execute_script("return document.body.outerHTML;") - soup = BeautifulSoup(page_source, "html.parser") - - for script in soup(["script", "style"]): - script.extract() - - text = soup.get_text() - lines = (line.strip() for line in text.splitlines()) - chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) - text = "\n".join(chunk for chunk in chunks if chunk) - return driver, text - - -def scrape_links_with_selenium(driver: WebDriver, url: str) -> list[str]: - """Scrape links from a website using selenium - - Args: - driver (WebDriver): The webdriver to use to scrape the links - - Returns: - List[str]: The links scraped from the website - """ - page_source = driver.page_source - soup = BeautifulSoup(page_source, "html.parser") - - for script in soup(["script", "style"]): - script.extract() - - hyperlinks = extract_hyperlinks(soup, url) - - return format_hyperlinks(hyperlinks) - - -def close_browser(driver: WebDriver) -> None: - """Close the browser - - Args: - driver (WebDriver): The webdriver to close - - Returns: - None - """ - driver.quit() - - -def add_header(driver: WebDriver) -> None: - """Add a header to the website - - Args: - driver (WebDriver): The webdriver to use to add the header - - Returns: - None - """ - driver.execute_script(open(f"{FILE_DIR}/js/overlay.js", "r").read()) diff --git a/spaces/ChrisCaviar/ControlNet-v1-1/app_openpose.py b/spaces/ChrisCaviar/ControlNet-v1-1/app_openpose.py deleted file mode 100644 index a4dd2aa4a97d9526e239633e95fdd0d6162ffe9d..0000000000000000000000000000000000000000 --- a/spaces/ChrisCaviar/ControlNet-v1-1/app_openpose.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env python - -import gradio as gr - -from utils import randomize_seed_fn - - -def create_demo(process, max_images=12, default_num_images=3): - with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - image = gr.Image() - prompt = gr.Textbox(label='Prompt') - run_button = gr.Button(label='Run') - with gr.Accordion('Advanced options', open=False): - preprocessor_name = gr.Radio(label='Preprocessor', - choices=['Openpose', 'None'], - type='value', - value='Openpose') - num_samples = gr.Slider(label='Number of images', - minimum=1, - maximum=max_images, - value=default_num_images, - step=1) - image_resolution = gr.Slider(label='Image resolution', - minimum=256, - maximum=512, - value=512, - step=256) - preprocess_resolution = gr.Slider( - label='Preprocess resolution', - minimum=128, - maximum=512, - value=512, - step=1) - num_steps = gr.Slider(label='Number of steps', - minimum=1, - maximum=100, - value=20, - step=1) - guidance_scale = gr.Slider(label='Guidance scale', - minimum=0.1, - maximum=30.0, - value=9.0, - step=0.1) - seed = gr.Slider(label='Seed', - minimum=0, - maximum=1000000, - step=1, - value=0, - randomize=True) - randomize_seed = gr.Checkbox(label='Randomize seed', - value=True) - a_prompt = gr.Textbox( - label='Additional prompt', - value='best quality, extremely detailed') - n_prompt = gr.Textbox( - label='Negative prompt', - value= - 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality' - ) - with gr.Column(): - result = gr.Gallery(label='Output', show_label=False).style( - columns=2, object_fit='scale-down') - inputs = [ - image, - prompt, - a_prompt, - n_prompt, - num_samples, - image_resolution, - preprocess_resolution, - num_steps, - guidance_scale, - seed, - preprocessor_name, - ] - prompt.submit( - fn=randomize_seed_fn, - inputs=[seed, randomize_seed], - outputs=seed, - ).then( - fn=process, - inputs=inputs, - outputs=result, - ) - run_button.click( - fn=randomize_seed_fn, - inputs=[seed, randomize_seed], - outputs=seed, - ).then( - fn=process, - inputs=inputs, - outputs=result, - api_name='openpose', - ) - return demo - - -if __name__ == '__main__': - from model import Model - model = Model(task_name='Openpose') - demo = create_demo(model.process_openpose) - demo.queue().launch() diff --git a/spaces/Clebersla/RVC_V2_Huggingface_Version/vc_infer_pipeline.py b/spaces/Clebersla/RVC_V2_Huggingface_Version/vc_infer_pipeline.py deleted file mode 100644 index a0b50d4c703b7638d7c951c9d820a1e59c275fc3..0000000000000000000000000000000000000000 --- a/spaces/Clebersla/RVC_V2_Huggingface_Version/vc_infer_pipeline.py +++ /dev/null @@ -1,646 +0,0 @@ -import numpy as np, parselmouth, torch, pdb, sys, os -from time import time as ttime -import torch.nn.functional as F -import torchcrepe # Fork feature. Use the crepe f0 algorithm. New dependency (pip install torchcrepe) -from torch import Tensor -import scipy.signal as signal -import pyworld, os, traceback, faiss, librosa, torchcrepe -from scipy import signal -from functools import lru_cache - -now_dir = os.getcwd() -sys.path.append(now_dir) - -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - -input_audio_path2wav = {} - - -@lru_cache -def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period): - audio = input_audio_path2wav[input_audio_path] - f0, t = pyworld.harvest( - audio, - fs=fs, - f0_ceil=f0max, - f0_floor=f0min, - frame_period=frame_period, - ) - f0 = pyworld.stonemask(audio, f0, t, fs) - return f0 - - -def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比 - # print(data1.max(),data2.max()) - rms1 = librosa.feature.rms( - y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2 - ) # 每半秒一个点 - rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2) - rms1 = torch.from_numpy(rms1) - rms1 = F.interpolate( - rms1.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.from_numpy(rms2) - rms2 = F.interpolate( - rms2.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6) - data2 *= ( - torch.pow(rms1, torch.tensor(1 - rate)) - * torch.pow(rms2, torch.tensor(rate - 1)) - ).numpy() - return data2 - - -class VC(object): - def __init__(self, tgt_sr, config): - self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = ( - config.x_pad, - config.x_query, - config.x_center, - config.x_max, - config.is_half, - ) - self.sr = 16000 # hubert输入采样率 - self.window = 160 # 每帧点数 - self.t_pad = self.sr * self.x_pad # 每条前后pad时间 - self.t_pad_tgt = tgt_sr * self.x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * self.x_query # 查询切点前后查询时间 - self.t_center = self.sr * self.x_center # 查询切点位置 - self.t_max = self.sr * self.x_max # 免查询时长阈值 - self.device = config.device - - # Fork Feature: Get the best torch device to use for f0 algorithms that require a torch device. Will return the type (torch.device) - def get_optimal_torch_device(self, index: int = 0) -> torch.device: - # Get cuda device - if torch.cuda.is_available(): - return torch.device( - f"cuda:{index % torch.cuda.device_count()}" - ) # Very fast - elif torch.backends.mps.is_available(): - return torch.device("mps") - # Insert an else here to grab "xla" devices if available. TO DO later. Requires the torch_xla.core.xla_model library - # Else wise return the "cpu" as a torch device, - return torch.device("cpu") - - # Fork Feature: Compute f0 with the crepe method - def get_f0_crepe_computation( - self, - x, - f0_min, - f0_max, - p_len, - hop_length=160, # 512 before. Hop length changes the speed that the voice jumps to a different dramatic pitch. Lower hop lengths means more pitch accuracy but longer inference time. - model="full", # Either use crepe-tiny "tiny" or crepe "full". Default is full - ): - x = x.astype( - np.float32 - ) # fixes the F.conv2D exception. We needed to convert double to float. - x /= np.quantile(np.abs(x), 0.999) - torch_device = self.get_optimal_torch_device() - audio = torch.from_numpy(x).to(torch_device, copy=True) - audio = torch.unsqueeze(audio, dim=0) - if audio.ndim == 2 and audio.shape[0] > 1: - audio = torch.mean(audio, dim=0, keepdim=True).detach() - audio = audio.detach() - print("Initiating prediction with a crepe_hop_length of: " + str(hop_length)) - pitch: Tensor = torchcrepe.predict( - audio, - self.sr, - hop_length, - f0_min, - f0_max, - model, - batch_size=hop_length * 2, - device=torch_device, - pad=True, - ) - p_len = p_len or x.shape[0] // hop_length - # Resize the pitch for final f0 - source = np.array(pitch.squeeze(0).cpu().float().numpy()) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * p_len, len(source)) / p_len, - np.arange(0, len(source)), - source, - ) - f0 = np.nan_to_num(target) - return f0 # Resized f0 - - def get_f0_official_crepe_computation( - self, - x, - f0_min, - f0_max, - model="full", - ): - # Pick a batch size that doesn't cause memory errors on your gpu - batch_size = 512 - # Compute pitch using first gpu - audio = torch.tensor(np.copy(x))[None].float() - f0, pd = torchcrepe.predict( - audio, - self.sr, - self.window, - f0_min, - f0_max, - model, - batch_size=batch_size, - device=self.device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - return f0 - - # Fork Feature: Compute pYIN f0 method - def get_f0_pyin_computation(self, x, f0_min, f0_max): - y, sr = librosa.load("saudio/Sidney.wav", self.sr, mono=True) - f0, _, _ = librosa.pyin(y, sr=self.sr, fmin=f0_min, fmax=f0_max) - f0 = f0[1:] # Get rid of extra first frame - return f0 - - # Fork Feature: Acquire median hybrid f0 estimation calculation - def get_f0_hybrid_computation( - self, - methods_str, - input_audio_path, - x, - f0_min, - f0_max, - p_len, - filter_radius, - crepe_hop_length, - time_step, - ): - # Get various f0 methods from input to use in the computation stack - s = methods_str - s = s.split("hybrid")[1] - s = s.replace("[", "").replace("]", "") - methods = s.split("+") - f0_computation_stack = [] - - print("Calculating f0 pitch estimations for methods: %s" % str(methods)) - x = x.astype(np.float32) - x /= np.quantile(np.abs(x), 0.999) - # Get f0 calculations for all methods specified - for method in methods: - f0 = None - if method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif method == "crepe": - f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max) - f0 = f0[1:] # Get rid of extra first frame - elif method == "crepe-tiny": - f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, "tiny") - f0 = f0[1:] # Get rid of extra first frame - elif method == "mangio-crepe": - f0 = self.get_f0_crepe_computation( - x, f0_min, f0_max, p_len, crepe_hop_length - ) - elif method == "mangio-crepe-tiny": - f0 = self.get_f0_crepe_computation( - x, f0_min, f0_max, p_len, crepe_hop_length, "tiny" - ) - elif method == "harvest": - f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10) - if filter_radius > 2: - f0 = signal.medfilt(f0, 3) - f0 = f0[1:] # Get rid of first frame. - elif method == "dio": # Potentially buggy? - f0, t = pyworld.dio( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - f0 = f0[1:] - # elif method == "pyin": Not Working just yet - # f0 = self.get_f0_pyin_computation(x, f0_min, f0_max) - # Push method to the stack - f0_computation_stack.append(f0) - - for fc in f0_computation_stack: - print(len(fc)) - - print("Calculating hybrid median f0 from the stack of: %s" % str(methods)) - f0_median_hybrid = None - if len(f0_computation_stack) == 1: - f0_median_hybrid = f0_computation_stack[0] - else: - f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0) - return f0_median_hybrid - - def get_f0( - self, - input_audio_path, - x, - p_len, - f0_up_key, - f0_method, - filter_radius, - crepe_hop_length, - inp_f0=None, - ): - global input_audio_path2wav - time_step = self.window / self.sr * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - if f0_method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif f0_method == "harvest": - input_audio_path2wav[input_audio_path] = x.astype(np.double) - f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10) - if filter_radius > 2: - f0 = signal.medfilt(f0, 3) - elif f0_method == "dio": # Potentially Buggy? - f0, t = pyworld.dio( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - elif f0_method == "crepe": - f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max) - elif f0_method == "crepe-tiny": - f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, "tiny") - elif f0_method == "mangio-crepe": - f0 = self.get_f0_crepe_computation( - x, f0_min, f0_max, p_len, crepe_hop_length - ) - elif f0_method == "mangio-crepe-tiny": - f0 = self.get_f0_crepe_computation( - x, f0_min, f0_max, p_len, crepe_hop_length, "tiny" - ) - elif f0_method == "rmvpe": - if hasattr(self, "model_rmvpe") == False: - from rmvpe import RMVPE - - print("loading rmvpe model") - self.model_rmvpe = RMVPE( - "rmvpe.pt", is_half=self.is_half, device=self.device - ) - f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) - - elif "hybrid" in f0_method: - # Perform hybrid median pitch estimation - input_audio_path2wav[input_audio_path] = x.astype(np.double) - f0 = self.get_f0_hybrid_computation( - f0_method, - input_audio_path, - x, - f0_min, - f0_max, - p_len, - filter_radius, - crepe_hop_length, - time_step, - ) - - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0] - f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[ - :shape - ] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9 if version == "v1" else 12, - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) if version == "v1" else logits[0] - if protect < 0.5 and pitch != None and pitchf != None: - feats0 = feats.clone() - if ( - isinstance(index, type(None)) == False - and isinstance(big_npy, type(None)) == False - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - - # _, I = index.search(npy, 1) - # npy = big_npy[I.squeeze()] - - score, ix = index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - if protect < 0.5 and pitch != None and pitchf != None: - feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute( - 0, 2, 1 - ) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch != None and pitchf != None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - - if protect < 0.5 and pitch != None and pitchf != None: - pitchff = pitchf.clone() - pitchff[pitchf > 0] = 1 - pitchff[pitchf < 1] = protect - pitchff = pitchff.unsqueeze(-1) - feats = feats * pitchff + feats0 * (1 - pitchff) - feats = feats.to(feats0.dtype) - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - if pitch != None and pitchf != None: - audio1 = ( - (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]) - .data.cpu() - .float() - .numpy() - ) - else: - audio1 = ( - (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy() - ) - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - - def pipeline( - self, - model, - net_g, - sid, - audio, - input_audio_path, - times, - f0_up_key, - f0_method, - file_index, - # file_big_npy, - index_rate, - if_f0, - filter_radius, - tgt_sr, - resample_sr, - rms_mix_rate, - version, - protect, - crepe_hop_length, - f0_file=None, - ): - if ( - file_index != "" - # and file_big_npy != "" - # and os.path.exists(file_big_npy) == True - and os.path.exists(file_index) == True - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - # big_npy = np.load(file_big_npy) - big_npy = index.reconstruct_n(0, index.ntotal) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - if hasattr(f0_file, "name") == True: - try: - with open(f0_file.name, "r") as f: - lines = f.read().strip("\n").split("\n") - inp_f0 = [] - for line in lines: - inp_f0.append([float(i) for i in line.split(",")]) - inp_f0 = np.array(inp_f0, dtype="float32") - except: - traceback.print_exc() - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0 == 1: - pitch, pitchf = self.get_f0( - input_audio_path, - audio_pad, - p_len, - f0_up_key, - f0_method, - filter_radius, - crepe_hop_length, - inp_f0, - ) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - if self.device == "mps": - pitchf = pitchf.astype(np.float32) - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - t2 = ttime() - times[1] += t2 - t1 - for t in opt_ts: - t = t // self.window * self.window - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - pitch[:, s // self.window : (t + self.t_pad2) // self.window], - pitchf[:, s // self.window : (t + self.t_pad2) // self.window], - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - s = t - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - pitch[:, t // self.window :] if t is not None else pitch, - pitchf[:, t // self.window :] if t is not None else pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - audio_opt = np.concatenate(audio_opt) - if rms_mix_rate != 1: - audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate) - if resample_sr >= 16000 and tgt_sr != resample_sr: - audio_opt = librosa.resample( - audio_opt, orig_sr=tgt_sr, target_sr=resample_sr - ) - audio_max = np.abs(audio_opt).max() / 0.99 - max_int16 = 32768 - if audio_max > 1: - max_int16 /= audio_max - audio_opt = (audio_opt * max_int16).astype(np.int16) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return audio_opt diff --git a/spaces/Cletrason/Cletrason-toad-in-the-mario-movie/optimization _tf.py b/spaces/Cletrason/Cletrason-toad-in-the-mario-movie/optimization _tf.py deleted file mode 100644 index 451e3eb9179d4bddf275e7f217fa648a37d2ac61..0000000000000000000000000000000000000000 --- a/spaces/Cletrason/Cletrason-toad-in-the-mario-movie/optimization _tf.py +++ /dev/null @@ -1,371 +0,0 @@ -# Copyright 2019 The TensorFlow Authors, The Hugging Face Team. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Functions and classes related to optimization (weight updates).""" - - -import re -from typing import Callable, List, Optional, Union - -import tensorflow as tf - - -try: - from tensorflow.keras.optimizers.legacy import Adam -except ImportError: - from tensorflow.keras.optimizers import Adam - - -class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule): - """ - Applies a warmup schedule on a given learning rate decay schedule. - - Args: - initial_learning_rate (`float`): - The initial learning rate for the schedule after the warmup (so this will be the learning rate at the end - of the warmup). - decay_schedule_fn (`Callable`): - The schedule function to apply after the warmup for the rest of training. - warmup_steps (`int`): - The number of steps for the warmup part of training. - power (`float`, *optional*, defaults to 1): - The power to use for the polynomial warmup (defaults is a linear warmup). - name (`str`, *optional*): - Optional name prefix for the returned tensors during the schedule. - """ - - def __init__( - self, - initial_learning_rate: float, - decay_schedule_fn: Callable, - warmup_steps: int, - power: float = 1.0, - name: str = None, - ): - super().__init__() - self.initial_learning_rate = initial_learning_rate - self.warmup_steps = warmup_steps - self.power = power - self.decay_schedule_fn = decay_schedule_fn - self.name = name - - def __call__(self, step): - with tf.name_scope(self.name or "WarmUp") as name: - # Implements polynomial warmup. i.e., if global_step < warmup_steps, the - # learning rate will be `global_step/num_warmup_steps * init_lr`. - global_step_float = tf.cast(step, tf.float32) - warmup_steps_float = tf.cast(self.warmup_steps, tf.float32) - warmup_percent_done = global_step_float / warmup_steps_float - warmup_learning_rate = self.initial_learning_rate * tf.math.pow(warmup_percent_done, self.power) - return tf.cond( - global_step_float < warmup_steps_float, - lambda: warmup_learning_rate, - lambda: self.decay_schedule_fn(step - self.warmup_steps), - name=name, - ) - - def get_config(self): - return { - "initial_learning_rate": self.initial_learning_rate, - "decay_schedule_fn": self.decay_schedule_fn, - "warmup_steps": self.warmup_steps, - "power": self.power, - "name": self.name, - } - - -def create_optimizer( - init_lr: float, - num_train_steps: int, - num_warmup_steps: int, - min_lr_ratio: float = 0.0, - adam_beta1: float = 0.9, - adam_beta2: float = 0.999, - adam_epsilon: float = 1e-8, - adam_clipnorm: Optional[float] = None, - adam_global_clipnorm: Optional[float] = None, - weight_decay_rate: float = 0.0, - power: float = 1.0, - include_in_weight_decay: Optional[List[str]] = None, -): - """ - Creates an optimizer with a learning rate schedule using a warmup phase followed by a linear decay. - - Args: - init_lr (`float`): - The desired learning rate at the end of the warmup phase. - num_train_steps (`int`): - The total number of training steps. - num_warmup_steps (`int`): - The number of warmup steps. - min_lr_ratio (`float`, *optional*, defaults to 0): - The final learning rate at the end of the linear decay will be `init_lr * min_lr_ratio`. - adam_beta1 (`float`, *optional*, defaults to 0.9): - The beta1 to use in Adam. - adam_beta2 (`float`, *optional*, defaults to 0.999): - The beta2 to use in Adam. - adam_epsilon (`float`, *optional*, defaults to 1e-8): - The epsilon to use in Adam. - adam_clipnorm: (`float`, *optional*, defaults to `None`): - If not `None`, clip the gradient norm for each weight tensor to this value. - adam_global_clipnorm: (`float`, *optional*, defaults to `None`) - If not `None`, clip gradient norm to this value. When using this argument, the norm is computed over all - weight tensors, as if they were concatenated into a single vector. - weight_decay_rate (`float`, *optional*, defaults to 0): - The weight decay to use. - power (`float`, *optional*, defaults to 1.0): - The power to use for PolynomialDecay. - include_in_weight_decay (`List[str]`, *optional*): - List of the parameter names (or re patterns) to apply weight decay to. If none is passed, weight decay is - applied to all parameters except bias and layer norm parameters. - """ - # Implements linear decay of the learning rate. - lr_schedule = tf.keras.optimizers.schedules.PolynomialDecay( - initial_learning_rate=init_lr, - decay_steps=num_train_steps - num_warmup_steps, - end_learning_rate=init_lr * min_lr_ratio, - power=power, - ) - if num_warmup_steps: - lr_schedule = WarmUp( - initial_learning_rate=init_lr, - decay_schedule_fn=lr_schedule, - warmup_steps=num_warmup_steps, - ) - if weight_decay_rate > 0.0: - optimizer = AdamWeightDecay( - learning_rate=lr_schedule, - weight_decay_rate=weight_decay_rate, - beta_1=adam_beta1, - beta_2=adam_beta2, - epsilon=adam_epsilon, - clipnorm=adam_clipnorm, - global_clipnorm=adam_global_clipnorm, - exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"], - include_in_weight_decay=include_in_weight_decay, - ) - else: - optimizer = tf.keras.optimizers.Adam( - learning_rate=lr_schedule, - beta_1=adam_beta1, - beta_2=adam_beta2, - epsilon=adam_epsilon, - clipnorm=adam_clipnorm, - global_clipnorm=adam_global_clipnorm, - ) - # We return the optimizer and the LR scheduler in order to better track the - # evolution of the LR independently of the optimizer. - return optimizer, lr_schedule - - -class AdamWeightDecay(Adam): - """ - Adam enables L2 weight decay and clip_by_global_norm on gradients. Just adding the square of the weights to the - loss function is *not* the correct way of using L2 regularization/weight decay with Adam, since that will interact - with the m and v parameters in strange ways as shown in [Decoupled Weight Decay - Regularization](https://arxiv.org/abs/1711.05101). - - Instead we want to decay the weights in a manner that doesn't interact with the m/v parameters. This is equivalent - to adding the square of the weights to the loss with plain (non-momentum) SGD. - - Args: - learning_rate (`Union[float, tf.keras.optimizers.schedules.LearningRateSchedule]`, *optional*, defaults to 1e-3): - The learning rate to use or a schedule. - beta_1 (`float`, *optional*, defaults to 0.9): - The beta1 parameter in Adam, which is the exponential decay rate for the 1st momentum estimates. - beta_2 (`float`, *optional*, defaults to 0.999): - The beta2 parameter in Adam, which is the exponential decay rate for the 2nd momentum estimates. - epsilon (`float`, *optional*, defaults to 1e-7): - The epsilon parameter in Adam, which is a small constant for numerical stability. - amsgrad (`bool`, *optional*, default to `False`): - Whether to apply AMSGrad variant of this algorithm or not, see [On the Convergence of Adam and - Beyond](https://arxiv.org/abs/1904.09237). - weight_decay_rate (`float`, *optional*, defaults to 0): - The weight decay to apply. - include_in_weight_decay (`List[str]`, *optional*): - List of the parameter names (or re patterns) to apply weight decay to. If none is passed, weight decay is - applied to all parameters by default (unless they are in `exclude_from_weight_decay`). - exclude_from_weight_decay (`List[str]`, *optional*): - List of the parameter names (or re patterns) to exclude from applying weight decay to. If a - `include_in_weight_decay` is passed, the names in it will supersede this list. - name (`str`, *optional*, defaults to 'AdamWeightDecay'): - Optional name for the operations created when applying gradients. - kwargs: - Keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by - norm; `clipvalue` is clip gradients by value, `decay` is included for backward compatibility to allow time - inverse decay of learning rate. `lr` is included for backward compatibility, recommended to use - `learning_rate` instead. - """ - - def __init__( - self, - learning_rate: Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001, - beta_1: float = 0.9, - beta_2: float = 0.999, - epsilon: float = 1e-7, - amsgrad: bool = False, - weight_decay_rate: float = 0.0, - include_in_weight_decay: Optional[List[str]] = None, - exclude_from_weight_decay: Optional[List[str]] = None, - name: str = "AdamWeightDecay", - **kwargs, - ): - super().__init__(learning_rate, beta_1, beta_2, epsilon, amsgrad, name, **kwargs) - self.weight_decay_rate = weight_decay_rate - self._include_in_weight_decay = include_in_weight_decay - self._exclude_from_weight_decay = exclude_from_weight_decay - - @classmethod - def from_config(cls, config): - """Creates an optimizer from its config with WarmUp custom object.""" - custom_objects = {"WarmUp": WarmUp} - return super(AdamWeightDecay, cls).from_config(config, custom_objects=custom_objects) - - def _prepare_local(self, var_device, var_dtype, apply_state): - super(AdamWeightDecay, self)._prepare_local(var_device, var_dtype, apply_state) - apply_state[(var_device, var_dtype)]["weight_decay_rate"] = tf.constant( - self.weight_decay_rate, name="adam_weight_decay_rate" - ) - - def _decay_weights_op(self, var, learning_rate, apply_state): - do_decay = self._do_use_weight_decay(var.name) - if do_decay: - return var.assign_sub( - learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"], - use_locking=self._use_locking, - ) - return tf.no_op() - - def apply_gradients(self, grads_and_vars, name=None, **kwargs): - grads, tvars = list(zip(*grads_and_vars)) - return super(AdamWeightDecay, self).apply_gradients(zip(grads, tvars), name=name, **kwargs) - - def _get_lr(self, var_device, var_dtype, apply_state): - """Retrieves the learning rate with the given state.""" - if apply_state is None: - return self._decayed_lr_t[var_dtype], {} - - apply_state = apply_state or {} - coefficients = apply_state.get((var_device, var_dtype)) - if coefficients is None: - coefficients = self._fallback_apply_state(var_device, var_dtype) - apply_state[(var_device, var_dtype)] = coefficients - - return coefficients["lr_t"], {"apply_state": apply_state} - - def _resource_apply_dense(self, grad, var, apply_state=None): - lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state) - decay = self._decay_weights_op(var, lr_t, apply_state) - with tf.control_dependencies([decay]): - return super(AdamWeightDecay, self)._resource_apply_dense(grad, var, **kwargs) - - def _resource_apply_sparse(self, grad, var, indices, apply_state=None): - lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state) - decay = self._decay_weights_op(var, lr_t, apply_state) - with tf.control_dependencies([decay]): - return super(AdamWeightDecay, self)._resource_apply_sparse(grad, var, indices, **kwargs) - - def get_config(self): - config = super().get_config() - config.update({"weight_decay_rate": self.weight_decay_rate}) - return config - - def _do_use_weight_decay(self, param_name): - """Whether to use L2 weight decay for `param_name`.""" - if self.weight_decay_rate == 0: - return False - - if self._include_in_weight_decay: - for r in self._include_in_weight_decay: - if re.search(r, param_name) is not None: - return True - - if self._exclude_from_weight_decay: - for r in self._exclude_from_weight_decay: - if re.search(r, param_name) is not None: - return False - return True - - -# Extracted from https://github.com/OpenNMT/OpenNMT-tf/blob/master/opennmt/optimizers/utils.py -class GradientAccumulator(object): - """ - Gradient accumulation utility. When used with a distribution strategy, the accumulator should be called in a - replica context. Gradients will be accumulated locally on each replica and without synchronization. Users should - then call `.gradients`, scale the gradients if required, and pass the result to `apply_gradients`. - """ - - # We use the ON_READ synchronization policy so that no synchronization is - # performed on assignment. To get the value, we call .value() which returns the - # value on the current replica without synchronization. - - def __init__(self): - """Initializes the accumulator.""" - self._gradients = [] - self._accum_steps = None - - @property - def step(self): - """Number of accumulated steps.""" - if self._accum_steps is None: - self._accum_steps = tf.Variable( - tf.constant(0, dtype=tf.int64), - trainable=False, - synchronization=tf.VariableSynchronization.ON_READ, - aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, - ) - - return self._accum_steps.value() - - @property - def gradients(self): - """The accumulated gradients on the current replica.""" - if not self._gradients: - raise ValueError("The accumulator should be called first to initialize the gradients") - return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] - - def __call__(self, gradients): - """Accumulates `gradients` on the current replica.""" - if not self._gradients: - _ = self.step # Create the step variable. - self._gradients.extend( - [ - tf.Variable( - tf.zeros_like(gradient), - trainable=False, - synchronization=tf.VariableSynchronization.ON_READ, - aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, - ) - if gradient is not None - else gradient - for gradient in gradients - ] - ) - if len(gradients) != len(self._gradients): - raise ValueError(f"Expected {len(self._gradients)} gradients, but got {len(gradients)}") - - for accum_gradient, gradient in zip(self._gradients, gradients): - if accum_gradient is not None and gradient is not None: - accum_gradient.assign_add(gradient) - - self._accum_steps.assign_add(1) - - def reset(self): - """Resets the accumulated gradients on the current replica.""" - if not self._gradients: - return - self._accum_steps.assign(0) - for gradient in self._gradients: - if gradient is not None: - gradient.assign(tf.zeros_like(gradient)) \ No newline at end of file diff --git a/spaces/CoffeeBrewer/CompVis-stable-diffusion-v1-4/README.md b/spaces/CoffeeBrewer/CompVis-stable-diffusion-v1-4/README.md deleted file mode 100644 index 6db6a1856c9f9cee3d13601b0516189e43520888..0000000000000000000000000000000000000000 --- a/spaces/CoffeeBrewer/CompVis-stable-diffusion-v1-4/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: CompVis Stable Diffusion V1 4 -emoji: 🏃 -colorFrom: indigo -colorTo: green -sdk: gradio -sdk_version: 3.20.1 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/CognitiveLabs/Research-Assistant/actions/duck_search.py b/spaces/CognitiveLabs/Research-Assistant/actions/duck_search.py deleted file mode 100644 index d324475f5c82105dd76a603a01fae72e1a352f2b..0000000000000000000000000000000000000000 --- a/spaces/CognitiveLabs/Research-Assistant/actions/duck_search.py +++ /dev/null @@ -1,11 +0,0 @@ -from duckduckgo_search import DDGS - - -def duckduckgo_search(query, max_search_result=3): - with DDGS() as ddgs: - responses = list() - for i, r in enumerate(ddgs.text(query, region='wt-wt', safesearch='off', timelimit='y')): - if i == max_search_result: - break - responses.append(r) - return responses \ No newline at end of file diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/_core/__init__.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/_core/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/dropdown.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/dropdown.py deleted file mode 100644 index 473e105268e80bbec2b76eb6eda463410c0114d3..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/dropdown.py +++ /dev/null @@ -1,243 +0,0 @@ -"""gr.Dropdown() component.""" - -from __future__ import annotations - -import warnings -from typing import Any, Callable, Literal - -from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import SimpleSerializable - -from gradio.components.base import FormComponent, IOComponent, _Keywords -from gradio.deprecation import warn_style_method_deprecation -from gradio.events import ( - Blurrable, - Changeable, - EventListenerMethod, - Inputable, - Selectable, -) - -set_documentation_group("component") - - -@document() -class Dropdown( - FormComponent, - Changeable, - Inputable, - Selectable, - Blurrable, - IOComponent, - SimpleSerializable, -): - """ - Creates a dropdown of choices from which entries can be selected. - Preprocessing: passes the value of the selected dropdown entry as a {str} or its index as an {int} into the function, depending on `type`. - Postprocessing: expects a {str} corresponding to the value of the dropdown entry to be selected. - Examples-format: a {str} representing the drop down value to select. - Demos: sentence_builder, titanic_survival - """ - - def __init__( - self, - choices: list[str] | None = None, - *, - value: str | list[str] | Callable | None = None, - type: Literal["value", "index"] = "value", - multiselect: bool | None = None, - max_choices: int | None = None, - label: str | None = None, - info: str | None = None, - every: float | None = None, - show_label: bool | None = None, - container: bool = True, - scale: int | None = None, - min_width: int = 160, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - allow_custom_value: bool = False, - **kwargs, - ): - """ - Parameters: - choices: list of options to select from. - value: default value(s) selected in dropdown. If None, no value is selected by default. If callable, the function will be called whenever the app loads to set the initial value of the component. - type: Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected. - multiselect: if True, multiple choices can be selected. - max_choices: maximum number of choices that can be selected. If None, no limit is enforced. - label: component name in interface. - info: additional component description. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - container: If True, will place the component in a container - providing some extra padding around the border. - scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer. - min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first. - interactive: if True, choices in this dropdown will be selectable; if False, selection will be disabled. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. - allow_custom_value: If True, allows user to enter a custom value that is not in the list of choices. - """ - self.choices = [str(choice) for choice in choices] if choices else [] - valid_types = ["value", "index"] - if type not in valid_types: - raise ValueError( - f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}" - ) - self.type = type - self.multiselect = multiselect - if multiselect and isinstance(value, str): - value = [value] - if not multiselect and max_choices is not None: - warnings.warn( - "The `max_choices` parameter is ignored when `multiselect` is False." - ) - self.max_choices = max_choices - self.allow_custom_value = allow_custom_value - if multiselect and allow_custom_value: - raise ValueError( - "Custom values are not supported when `multiselect` is True." - ) - self.interpret_by_tokens = False - self.select: EventListenerMethod - """ - Event listener for when the user selects Dropdown option. - Uses event data gradio.SelectData to carry `value` referring to label of selected option, and `index` to refer to index. - See EventData documentation on how to use this event data. - """ - IOComponent.__init__( - self, - label=label, - info=info, - every=every, - show_label=show_label, - container=container, - scale=scale, - min_width=min_width, - interactive=interactive, - visible=visible, - elem_id=elem_id, - elem_classes=elem_classes, - value=value, - **kwargs, - ) - - def api_info(self) -> dict[str, dict | bool]: - if self.multiselect: - type = { - "type": "array", - "items": {"type": "string"}, - "description": f"List of options from: {self.choices}", - } - else: - type = {"type": "string", "description": f"Option from: {self.choices}"} - return {"info": type, "serialized_info": False} - - def example_inputs(self) -> dict[str, Any]: - if self.multiselect: - return { - "raw": [self.choices[0]] if self.choices else [], - "serialized": [self.choices[0]] if self.choices else [], - } - else: - return { - "raw": self.choices[0] if self.choices else None, - "serialized": self.choices[0] if self.choices else None, - } - - def get_config(self): - return { - "choices": self.choices, - "value": self.value, - "multiselect": self.multiselect, - "max_choices": self.max_choices, - "allow_custom_value": self.allow_custom_value, - "container": self.container, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - choices: str | list[str] | None = None, - label: str | None = None, - info: str | None = None, - show_label: bool | None = None, - container: bool | None = None, - scale: int | None = None, - min_width: int | None = None, - interactive: bool | None = None, - placeholder: str | None = None, - visible: bool | None = None, - ): - return { - "choices": choices, - "label": label, - "info": info, - "show_label": show_label, - "container": container, - "scale": scale, - "min_width": min_width, - "visible": visible, - "value": value, - "interactive": interactive, - "placeholder": placeholder, - "__type__": "update", - } - - def preprocess( - self, x: str | list[str] - ) -> str | int | list[str] | list[int] | None: - """ - Parameters: - x: selected choice(s) - Returns: - selected choice(s) as string or index within choice list or list of string or indices - """ - if self.type == "value": - return x - elif self.type == "index": - if x is None: - return None - elif self.multiselect: - return [self.choices.index(c) for c in x] - else: - if isinstance(x, str): - return self.choices.index(x) if x in self.choices else None - else: - raise ValueError( - f"Unknown type: {self.type}. Please choose from: 'value', 'index'." - ) - - def set_interpret_parameters(self): - """ - Calculates interpretation score of each choice by comparing the output against each of the outputs when alternative choices are selected. - """ - return self - - def get_interpretation_neighbors(self, x): - choices = list(self.choices) - choices.remove(x) - return choices, {} - - def get_interpretation_scores( - self, x, neighbors, scores: list[float | None], **kwargs - ) -> list: - """ - Returns: - Each value represents the interpretation score corresponding to each choice. - """ - scores.insert(self.choices.index(x), None) - return scores - - def style(self, *, container: bool | None = None, **kwargs): - """ - This method is deprecated. Please set these arguments in the constructor instead. - """ - warn_style_method_deprecation() - if container is not None: - self.container = container - return self diff --git a/spaces/Demosthene-OR/avr23-cds-translation/tabs/exploration_tab.py b/spaces/Demosthene-OR/avr23-cds-translation/tabs/exploration_tab.py deleted file mode 100644 index 109819ecc3a5e437e672ba6472d6c540daf191bd..0000000000000000000000000000000000000000 --- a/spaces/Demosthene-OR/avr23-cds-translation/tabs/exploration_tab.py +++ /dev/null @@ -1,426 +0,0 @@ -import streamlit as st -import os -import numpy as np -import pandas as pd -import collections -from nltk.tokenize import word_tokenize -from nltk import download -from ast import literal_eval -# import contextlib -# import re -# import nltk -# from nltk.corpus import stopwords - -title = "Exploration et Preprocessing" -sidebar_name = "Exploration et Preprocessing" - -# Indiquer si l'on veut enlever les stop words. C'est un processus long -stopwords_to_do = True -# Indiquer si l'on veut lemmatiser les phrases, un fois les stop words enlevés. C'est un processus long (approximativement 8 minutes) -lemmatize_to_do = True -# Indiquer si l'on veut calculer le score Bleu pour tout le corpus. C'est un processus très long long (approximativement 10 minutes pour les 10 dictionnaires) -bleu_score_to_do = True -# Première ligne à charger -first_line = 0 -# Nombre maximum de lignes à charger -max_lines = 140000 -if ((first_line+max_lines)>137860): - max_lines = max(137860-first_line ,0) -# Nombre maximum de ligne à afficher pour les DataFrame -max_lines_to_display = 50 - - -download('punkt') -# nltk.download('averaged_perceptron_tagger') -# nltk.download('stopwords') - -@st.cache_data -def load_data(path): - - input_file = os.path.join(path) - with open(input_file, "r", encoding="utf-8") as f: - data = f.read() - - # On convertit les majuscules en minulcule - data = data.lower() - data = data.split('\n') - return data[first_line:min(len(data),first_line+max_lines)] - -@st.cache_data -def load_preprocessed_data(path,data_type): - - input_file = os.path.join(path) - if data_type == 1: - return pd.read_csv(input_file, encoding="utf-8", index_col=0) - else: - with open(input_file, "r", encoding="utf-8") as f: - data = f.read() - data = data.split('\n') - if data_type==0: - data=data[:-1] - elif data_type == 2: - data=[eval(i) for i in data[:-1]] - elif data_type ==3: - data2 = [] - for d in data[:-1]: - data2.append(literal_eval(d)) - data=data2 - return data - -# @st.cache_data(ttl='1h00s') -def load_all_preprocessed_data(lang): - txt =load_preprocessed_data('data/preprocess_txt_'+lang,0) - txt_split = load_preprocessed_data('data/preprocess_txt_split_'+lang,3) - txt_lem = load_preprocessed_data('data/preprocess_txt_lem_'+lang,0) - txt_wo_stopword = load_preprocessed_data('data/preprocess_txt_wo_stopword_'+lang,0) - df_count_word = pd.concat([load_preprocessed_data('data/preprocess_df_count_word1_'+lang,1), load_preprocessed_data('data/preprocess_df_count_word2_'+lang,1)]) - return txt, txt_split, txt_lem, txt_wo_stopword, df_count_word - -#Chargement des textes complet dans les 2 langues -full_txt_en = load_data('data/small_vocab_en') -full_txt_fr = load_data('data/small_vocab_fr') - -# Chargement du résultat du préprocessing -_ , full_txt_split_en, full_txt_lem_en, full_txt_wo_stopword_en, full_df_count_word_en = load_all_preprocessed_data('en') -_ , full_txt_split_fr, full_txt_lem_fr, full_txt_wo_stopword_fr, full_df_count_word_fr = load_all_preprocessed_data('fr') -""" -def remove_stopwords(text, lang): - stop_words = set(stopwords.words(lang)) - # stop_words will contain set all english stopwords - filtered_sentence = [] - for word in text.split(): - if word not in stop_words: - filtered_sentence.append(word) - return " ".join(filtered_sentence) - -def clean_undesirable_from_text(sentence, lang): - - # Removing URLs - sentence = re.sub(r"https?://\S+|www\.\S+", "", sentence ) - - # Removing Punctuations (we keep the . character) - REPLACEMENTS = [("..", "."), - (",", ""), - (";", ""), - (":", ""), - ("?", ""), - ('"', ""), - ("-", " "), - ("it's", "it is"), - ("isn't","is not"), - ("'", " ") - ] - for old, new in REPLACEMENTS: - sentence = sentence.replace(old, new) - - # Removing Digits - sentence= re.sub(r'[0-9]','',sentence) - - # Removing Additional Spaces - sentence = re.sub(' +', ' ', sentence) - - return sentence - -def clean_untranslated_sentence(data1, data2): - i=0 - while i137860): - max_lines = max(137860-first_line,0) - # if ((max_lines-first_line)>1000): - # lemmatize_to_do = True - # else: - # lemmatize_to_do = False - - last_line = first_line+max_lines - if (Langue=='Anglais'): - st.dataframe(pd.DataFrame(data=full_txt_en,columns=['Texte']).loc[first_line:last_line-1].head(max_lines_to_display), width=800) - else: - st.dataframe(pd.DataFrame(data=full_txt_fr,columns=['Texte']).loc[first_line:last_line-1].head(max_lines_to_display), width=800) - st.write("") - - # Chargement du résultat du préprocessing (max lignes = max_lines) - txt_en = full_txt_en[first_line:last_line] - txt_split_en = full_txt_split_en[first_line:last_line] - txt_lem_en = full_txt_lem_en[first_line:last_line] - txt_wo_stopword_en = full_txt_wo_stopword_en[first_line:last_line] - df_count_word_en = full_df_count_word_en.loc[first_line:last_line-1] - txt_fr = full_txt_fr[first_line:last_line] - txt_split_fr = full_txt_split_fr[first_line:last_line] - txt_lem_fr = full_txt_lem_fr[first_line:last_line] - txt_wo_stopword_fr = full_txt_wo_stopword_fr[first_line:last_line] - df_count_word_fr = full_df_count_word_fr.loc[first_line:last_line-1] - - # Lancement du préprocessing du texte qui va spliter nettoyer les phrases et les spliter en mots - # et calculer nombre d'occurences des mots dans chaque phrase - if (Langue == 'Anglais'): - st.write("## **Préprocessing de small_vocab_en :**\n") - if max_lines>10000: - with st.status(":sunglasses:", expanded=True): - # txt_en, corpus_en, txt_split_en, txt_lem_en, txt_wo_stopword_en, df_count_word_en,sent_len_en, sent_wo_sw_len_en, sent_lem_len_en = preprocess_txt (txt_en,'en') - display_preprocess_results('en',txt_en, txt_split_en, txt_lem_en, txt_wo_stopword_en, df_count_word_en) - else: - # txt_en, corpus_en, txt_split_en, txt_lem_en, txt_wo_stopword_en, df_count_word_en,sent_len_en, sent_wo_sw_len_en, sent_lem_len_en = preprocess_txt (txt_en,'en') - display_preprocess_results('en',txt_en, txt_split_en, txt_lem_en, txt_wo_stopword_en, df_count_word_en) - else: - st.write("## **Préprocessing de small_vocab_fr :**\n") - if max_lines>10000: - with st.status(":sunglasses:", expanded=True): - # txt_fr, corpus_fr, txt_split_fr, txt_lem_fr, txt_wo_stopword_fr, df_count_word_fr,sent_len_fr, sent_wo_sw_len_fr, sent_lem_len_fr = preprocess_txt (txt_fr,'fr') - display_preprocess_results('fr', txt_fr, txt_split_fr, txt_lem_fr, txt_wo_stopword_fr, df_count_word_fr) - else: - # txt_fr, corpus_fr, txt_split_fr, txt_lem_fr, txt_wo_stopword_fr, df_count_word_fr,sent_len_fr, sent_wo_sw_len_fr, sent_lem_len_fr = preprocess_txt (txt_fr,'fr') - display_preprocess_results('fr', txt_fr, txt_split_fr, txt_lem_fr, txt_wo_stopword_fr, df_count_word_fr) - - - - # Might be used later.... - # DEFAULT_TEXT = """Google was founded in September 1998 by Larry Page and Sergey Brin while they were Ph.D. students at Stanford University in California. Together they own about 14 percent of its shares and control 56 percent of the stockholder voting power through supervoting stock. They incorporated Google as a California privately held company on September 4, 1998, in California. Google was then reincorporated in Delaware on October 22, 2002.""" - """ - spacy_model = "en_core_web_sm" - - text = st.text_area("Text to analyze", DEFAULT_TEXT, height=200) - doc = spacy_streamlit.process_text(spacy_model, text) - - spacy_streamlit.visualize_ner( - doc, - labels=["PERSON", "DATE", "GPE"], - show_table=False, - title="Persons, dates and locations", - ) - st.text(f"Analyzed using spaCy model {spacy_model}") - """ - - # models = ["en_core_web_sm"] - # default_text = "Google was founded in September 1998 by Larry Page and Sergey Brin while they were Ph.D. students at Stanford University in California. Together they own about 14 percent of its shares and control 56 percent of the stockholder voting power through supervoting stock. They incorporated Google as a California privately held company on September 4, 1998, in California. Google was then reincorporated in Delaware on October 22, 2002." - # spacy_streamlit.visualize(models, default_text) - - - - - - - - - diff --git a/spaces/Dinoking/Guccio-AI-Designer/visualize.py b/spaces/Dinoking/Guccio-AI-Designer/visualize.py deleted file mode 100644 index 433ae2ea8963c56a37e5e91932ad6d359495ed47..0000000000000000000000000000000000000000 --- a/spaces/Dinoking/Guccio-AI-Designer/visualize.py +++ /dev/null @@ -1,314 +0,0 @@ -# Copyright 2020 Erik Härkönen. All rights reserved. -# This file is licensed to you under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software distributed under -# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS -# OF ANY KIND, either express or implied. See the License for the specific language -# governing permissions and limitations under the License. - -# Patch for broken CTRL+C handler -# https://github.com/ContinuumIO/anaconda-issues/issues/905 -import os -os.environ['FOR_DISABLE_CONSOLE_CTRL_HANDLER'] = '1' - -import torch, json, numpy as np -from types import SimpleNamespace -import matplotlib.pyplot as plt -from pathlib import Path -from os import makedirs -from PIL import Image -from netdissect import proggan, nethook, easydict, zdataset -from netdissect.modelconfig import create_instrumented_model -from estimators import get_estimator -from models import get_instrumented_model -from scipy.cluster.vq import kmeans -import re -import sys -import datetime -import argparse -from tqdm import trange -from config import Config -from decomposition import get_random_dirs, get_or_compute, get_max_batch_size, SEED_VISUALIZATION -from utils import pad_frames - -def x_closest(p): - distances = np.sqrt(np.sum((X - p)**2, axis=-1)) - idx = np.argmin(distances) - return distances[idx], X[idx] - -def make_gif(imgs, duration_secs, outname): - head, *tail = [Image.fromarray((x * 255).astype(np.uint8)) for x in imgs] - ms_per_frame = 1000 * duration_secs / instances - head.save(outname, format='GIF', append_images=tail, save_all=True, duration=ms_per_frame, loop=0) - -def make_mp4(imgs, duration_secs, outname): - import shutil - import subprocess as sp - - FFMPEG_BIN = shutil.which("ffmpeg") - assert FFMPEG_BIN is not None, 'ffmpeg not found, install with "conda install -c conda-forge ffmpeg"' - assert len(imgs[0].shape) == 3, 'Invalid shape of frame data' - - resolution = imgs[0].shape[0:2] - fps = int(len(imgs) / duration_secs) - - command = [ FFMPEG_BIN, - '-y', # overwrite output file - '-f', 'rawvideo', - '-vcodec','rawvideo', - '-s', f'{resolution[0]}x{resolution[1]}', # size of one frame - '-pix_fmt', 'rgb24', - '-r', f'{fps}', - '-i', '-', # imput from pipe - '-an', # no audio - '-c:v', 'libx264', - '-preset', 'slow', - '-crf', '17', - str(Path(outname).with_suffix('.mp4')) ] - - frame_data = np.concatenate([(x * 255).astype(np.uint8).reshape(-1) for x in imgs]) - with sp.Popen(command, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE) as p: - ret = p.communicate(frame_data.tobytes()) - if p.returncode != 0: - print(ret[1].decode("utf-8")) - raise sp.CalledProcessError(p.returncode, command) - - -def make_grid(latent, lat_mean, lat_comp, lat_stdev, act_mean, act_comp, act_stdev, scale=1, n_rows=10, n_cols=5, make_plots=True, edit_type='latent'): - from notebooks.notebook_utils import create_strip_centered - - inst.remove_edits() - x_range = np.linspace(-scale, scale, n_cols, dtype=np.float32) # scale in sigmas - - rows = [] - for r in range(n_rows): - curr_row = [] - out_batch = create_strip_centered(inst, edit_type, layer_key, [latent], - act_comp[r], lat_comp[r], act_stdev[r], lat_stdev[r], act_mean, lat_mean, scale, 0, -1, n_cols)[0] - for i, img in enumerate(out_batch): - curr_row.append(('c{}_{:.2f}'.format(r, x_range[i]), img)) - - rows.append(curr_row[:n_cols]) - - inst.remove_edits() - - if make_plots: - # If more rows than columns, make several blocks side by side - n_blocks = 2 if n_rows > n_cols else 1 - - for r, data in enumerate(rows): - # Add white borders - imgs = pad_frames([img for _, img in data]) - - coord = ((r * n_blocks) % n_rows) + ((r * n_blocks) // n_rows) - plt.subplot(n_rows//n_blocks, n_blocks, 1 + coord) - plt.imshow(np.hstack(imgs)) - - # Custom x-axis labels - W = imgs[0].shape[1] # image width - P = imgs[1].shape[1] # padding width - locs = [(0.5*W + i*(W+P)) for i in range(n_cols)] - plt.xticks(locs, ["{:.2f}".format(v) for v in x_range]) - plt.yticks([]) - plt.ylabel(f'C{r}') - - plt.tight_layout() - plt.subplots_adjust(top=0.96) # make room for suptitle - - return [img for row in rows for img in row] - - -###################### -### Visualize results -###################### - -if __name__ == '__main__': - global max_batch, sample_shape, feature_shape, inst, args, layer_key, model - - args = Config().from_args() - t_start = datetime.datetime.now() - timestamp = lambda : datetime.datetime.now().strftime("%d.%m %H:%M") - print(f'[{timestamp()}] {args.model}, {args.layer}, {args.estimator}') - - # Ensure reproducibility - torch.manual_seed(0) # also sets cuda seeds - np.random.seed(0) - - # Speed up backend - torch.backends.cudnn.benchmark = True - torch.autograd.set_grad_enabled(False) - - has_gpu = torch.cuda.is_available() - device = torch.device('cuda' if has_gpu else 'cpu') - layer_key = args.layer - layer_name = layer_key #layer_key.lower().split('.')[-1] - - basedir = Path(__file__).parent.resolve() - outdir = basedir / 'out' - - # Load model - inst = get_instrumented_model(args.model, args.output_class, layer_key, device, use_w=args.use_w) - model = inst.model - feature_shape = inst.feature_shape[layer_key] - latent_shape = model.get_latent_shape() - print('Feature shape:', feature_shape) - - # Layout of activations - if len(feature_shape) != 4: # non-spatial - axis_mask = np.ones(len(feature_shape), dtype=np.int32) - else: - axis_mask = np.array([0, 1, 1, 1]) # only batch fixed => whole activation volume used - - # Shape of sample passed to PCA - sample_shape = feature_shape*axis_mask - sample_shape[sample_shape == 0] = 1 - - # Load or compute components - dump_name = get_or_compute(args, inst) - data = np.load(dump_name, allow_pickle=False) # does not contain object arrays - X_comp = data['act_comp'] - X_global_mean = data['act_mean'] - X_stdev = data['act_stdev'] - X_var_ratio = data['var_ratio'] - X_stdev_random = data['random_stdevs'] - Z_global_mean = data['lat_mean'] - Z_comp = data['lat_comp'] - Z_stdev = data['lat_stdev'] - n_comp = X_comp.shape[0] - data.close() - - # Transfer components to device - tensors = SimpleNamespace( - X_comp = torch.from_numpy(X_comp).to(device).float(), #-1, 1, C, H, W - X_global_mean = torch.from_numpy(X_global_mean).to(device).float(), # 1, C, H, W - X_stdev = torch.from_numpy(X_stdev).to(device).float(), - Z_comp = torch.from_numpy(Z_comp).to(device).float(), - Z_stdev = torch.from_numpy(Z_stdev).to(device).float(), - Z_global_mean = torch.from_numpy(Z_global_mean).to(device).float(), - ) - - transformer = get_estimator(args.estimator, n_comp, args.sparsity) - tr_param_str = transformer.get_param_str() - - # Compute max batch size given VRAM usage - max_batch = args.batch_size or (get_max_batch_size(inst, device) if has_gpu else 1) - print('Batch size:', max_batch) - - def show(): - if args.batch_mode: - plt.close('all') - else: - plt.show() - - print(f'[{timestamp()}] Creating visualizations') - - # Ensure visualization gets new samples - torch.manual_seed(SEED_VISUALIZATION) - np.random.seed(SEED_VISUALIZATION) - - # Make output directories - est_id = f'spca_{args.sparsity}' if args.estimator == 'spca' else args.estimator - outdir_comp = outdir/model.name/layer_key.lower()/est_id/'comp' - outdir_inst = outdir/model.name/layer_key.lower()/est_id/'inst' - outdir_summ = outdir/model.name/layer_key.lower()/est_id/'summ' - makedirs(outdir_comp, exist_ok=True) - makedirs(outdir_inst, exist_ok=True) - makedirs(outdir_summ, exist_ok=True) - - # Measure component sparsity (!= activation sparsity) - sparsity = np.mean(X_comp == 0) # percentage of zero values in components - print(f'Sparsity: {sparsity:.2f}') - - def get_edit_name(mode): - if mode == 'activation': - is_stylegan = 'StyleGAN' in args.model - is_w = layer_key in ['style', 'g_mapping'] - return 'W' if (is_stylegan and is_w) else 'ACT' - elif mode == 'latent': - return model.latent_space_name() - elif mode == 'both': - return 'BOTH' - else: - raise RuntimeError(f'Unknown edit mode {mode}') - - # Only visualize applicable edit modes - if args.use_w and layer_key in ['style', 'g_mapping']: - edit_modes = ['latent'] # activation edit is the same - else: - edit_modes = ['activation', 'latent'] - - # Summary grid, real components - for edit_mode in edit_modes: - plt.figure(figsize = (14,12)) - plt.suptitle(f"{args.estimator.upper()}: {model.name} - {layer_name}, {get_edit_name(edit_mode)} edit", size=16) - make_grid(tensors.Z_global_mean, tensors.Z_global_mean, tensors.Z_comp, tensors.Z_stdev, tensors.X_global_mean, - tensors.X_comp, tensors.X_stdev, scale=args.sigma, edit_type=edit_mode, n_rows=14) - plt.savefig(outdir_summ / f'components_{get_edit_name(edit_mode)}.jpg', dpi=300) - show() - - if args.make_video: - components = 15 - instances = 150 - - # One reasonable, one over the top - for sigma in [args.sigma, 3*args.sigma]: - for c in range(components): - for edit_mode in edit_modes: - frames = make_grid(tensors.Z_global_mean, tensors.Z_global_mean, tensors.Z_comp[c:c+1, :, :], tensors.Z_stdev[c:c+1], tensors.X_global_mean, - tensors.X_comp[c:c+1, :, :], tensors.X_stdev[c:c+1], n_rows=1, n_cols=instances, scale=sigma, make_plots=False, edit_type=edit_mode) - plt.close('all') - - frames = [x for _, x in frames] - frames = frames + frames[::-1] - make_mp4(frames, 5, outdir_comp / f'{get_edit_name(edit_mode)}_sigma{sigma}_comp{c}.mp4') - - - # Summary grid, random directions - # Using the stdevs of the principal components for same norm - random_dirs_act = torch.from_numpy(get_random_dirs(n_comp, np.prod(sample_shape)).reshape(-1, *sample_shape)).to(device) - random_dirs_z = torch.from_numpy(get_random_dirs(n_comp, np.prod(inst.input_shape)).reshape(-1, *latent_shape)).to(device) - - for edit_mode in edit_modes: - plt.figure(figsize = (14,12)) - plt.suptitle(f"{model.name} - {layer_name}, random directions w/ PC stdevs, {get_edit_name(edit_mode)} edit", size=16) - make_grid(tensors.Z_global_mean, tensors.Z_global_mean, random_dirs_z, tensors.Z_stdev, - tensors.X_global_mean, random_dirs_act, tensors.X_stdev, scale=args.sigma, edit_type=edit_mode, n_rows=14) - plt.savefig(outdir_summ / f'random_dirs_{get_edit_name(edit_mode)}.jpg', dpi=300) - show() - - # Random instances w/ components added - n_random_imgs = 10 - latents = model.sample_latent(n_samples=n_random_imgs) - - for img_idx in trange(n_random_imgs, desc='Random images', ascii=True): - #print(f'Creating visualizations for random image {img_idx+1}/{n_random_imgs}') - z = latents[img_idx][None, ...] - - # Summary grid, real components - for edit_mode in edit_modes: - plt.figure(figsize = (14,12)) - plt.suptitle(f"{args.estimator.upper()}: {model.name} - {layer_name}, {get_edit_name(edit_mode)} edit", size=16) - make_grid(z, tensors.Z_global_mean, tensors.Z_comp, tensors.Z_stdev, - tensors.X_global_mean, tensors.X_comp, tensors.X_stdev, scale=args.sigma, edit_type=edit_mode, n_rows=14) - plt.savefig(outdir_summ / f'samp{img_idx}_real_{get_edit_name(edit_mode)}.jpg', dpi=300) - show() - - if args.make_video: - components = 5 - instances = 150 - - # One reasonable, one over the top - for sigma in [args.sigma, 3*args.sigma]: #[2, 5]: - for edit_mode in edit_modes: - imgs = make_grid(z, tensors.Z_global_mean, tensors.Z_comp, tensors.Z_stdev, tensors.X_global_mean, tensors.X_comp, tensors.X_stdev, - n_rows=components, n_cols=instances, scale=sigma, make_plots=False, edit_type=edit_mode) - plt.close('all') - - for c in range(components): - frames = [x for _, x in imgs[c*instances:(c+1)*instances]] - frames = frames + frames[::-1] - make_mp4(frames, 5, outdir_inst / f'{get_edit_name(edit_mode)}_sigma{sigma}_img{img_idx}_comp{c}.mp4') - - print('Done in', datetime.datetime.now() - t_start) \ No newline at end of file diff --git a/spaces/DrGabrielLopez/BERTopic/app.py b/spaces/DrGabrielLopez/BERTopic/app.py deleted file mode 100644 index 5d506b8003334b35a74e423e0439b1b05ab919dd..0000000000000000000000000000000000000000 --- a/spaces/DrGabrielLopez/BERTopic/app.py +++ /dev/null @@ -1,152 +0,0 @@ -import pandas as pd -import numpy as np -import spacy -import os -import gradio as gr -import umap -from sklearn.cluster import OPTICS -from transformers import BertTokenizer, TFBertModel -import plotly.io as pio - -# configuration params -pio.templates.default = "plotly_dark" - -# setting up the text in the page -TITLE = "

    BERTopic - For topics detection on text

    " -DESCRIPTION = r"""
    Apply BERTopic to a given dataset end extract the most relevant topics.
    - """ -EXAMPLES = [ - ["data/ecomm500.csv"], -] -ARTICLE = r"""
    - Done by dr. Gabriel Lopez
    - This program follows the BERTopic philosophy, but actually has its own implementation.
    - For more please visit: My Page
    - For info about the BERTopic model can be found here
    -
    """ - - -def load_data(fileobj): - """Load dataset (keep only 500 rows for efficiency)""" - data = pd.read_csv(fileobj.name, on_bad_lines='skip', nrows=500) - assert "text" in data.columns, "The data must have a column named 'text'" - return data[['text']] - - -def run_nlp_processing(data): - """As reference for standard NLP processing""" - # NLP processing - docs = [] - nlp = spacy.load("en_core_web_sm", disable=["tagger", "parser", "ner"]) - for doc in nlp.pipe(data["text"].values, n_process=os.cpu_count() - 1): - lemmas = [] - for token in doc: - if token.is_punct or token.is_stop: - continue - lemmas.append(token.lemma_.lower()) - docs.append(" ".join(lemmas)) - # Make new column - data = data.assign(text=docs) - return data - - -def run_bert_tokenization(data): - """Show the action of the WordPiece alogorithm""" - # load BERT model (for embeddings) - checkpoint = "bert-base-uncased" - tokenizer = BertTokenizer.from_pretrained(checkpoint) - model = TFBertModel.from_pretrained(checkpoint) - # Run BERT tokenizing + encoding - descr_processed_tokenized = tokenizer( - list(data["text"]), - return_tensors="tf", - truncation=True, - padding=True, - max_length=128, - ) - data = data.assign(text_tokenized=descr_processed_tokenized) - return data - - -def run_bertopic(data): - """ " End-to-end BERTopic model""" - # load BERT model (for embeddings) - checkpoint = "bert-base-uncased" - tokenizer = BertTokenizer.from_pretrained(checkpoint) - model = TFBertModel.from_pretrained(checkpoint) - # Run BERT tokenizing + encoding - descr_processed_tokenized = tokenizer( - list(data["text"]), - return_tensors="tf", - truncation=True, - padding=True, - max_length=128, - ) - output_bert = model(descr_processed_tokenized) - # Get sentence embeddings from BERTs word embeddings - mean_vect = [] - for vect in output_bert.last_hidden_state: - mean_vect.append(np.mean(vect, axis=0)) - data = data.assign(descr_vect=mean_vect) - # Use UMAP to lower the dimensionality of the embedding to 3D - [stack makes array(array()) --> array2d] - descr_vect_3d = umap.UMAP(n_components=3).fit_transform( - np.stack(data["descr_vect"].values) - ) - data["descr_vect_2d"] = list(descr_vect_3d) - # Use BERT's + UMAP vector embeddings for clustering using OPTICS - clustering = OPTICS(min_samples=50).fit(np.stack(data["descr_vect_2d"].values)) - data["cluster_label"] = clustering.labels_ - # Plot the 3D embedding - fig_bertopic = plot_bertopic(descr_vect_3d, data) - # Extract topic wordclouds - return fig_bertopic - - -def plot_bertopic(descr_vect_3d, data): - """ " Show the topic clusters over an 3d embedding space""" - import plotly.express as px - - fig = px.scatter_3d( - x=descr_vect_3d[:, 0], - y=descr_vect_3d[:, 1], - z=descr_vect_3d[:, 2], - color=data["cluster_label"], - ) - return fig - - -# gradio interface -blocks = gr.Blocks() -with blocks: - # physical elements - session_state = gr.State([]) - gr.Markdown(TITLE) - gr.Markdown(DESCRIPTION) - with gr.Row(): - with gr.Column(): - gr.Markdown( - "## Load the data (must be a csv file with a column named 'text')" - ) - in_file = gr.File() - gr.Markdown("## Inspect the data") - in_data = gr.Dataframe(max_rows=5) - submit_button = gr.Button("Run BERTopic!") - gr.Examples(inputs=in_file, examples=EXAMPLES) - with gr.Column(): - gr.Markdown("## BERTopic Flow") - gr.Markdown( - "Text -> Word-Piece Tokenization -> BERT-embedding -> UMAP -> HDBSCAN -> Topic" - ) - gr.Markdown("## Processed Text") - out_dataset = gr.Dataframe(max_rows=5) - gr.Markdown("## Embedding + Projection + Clustering") - embedding_plot = gr.Plot(label="BERTopic projections") - gr.Markdown("## Extracted Topics") - topics_text = gr.Textbox(label="Topics", lines=50) - gr.Markdown(ARTICLE) - # event listeners - in_file = in_file.upload(inputs=in_file, outputs=in_data, fn=load_data) - submit_button.click(inputs=in_data, outputs=out_dataset, fn=run_bert_tokenization) - # out_dataset.change(inputs=out_dataset, outputs=embedding_plot, fn=run_bertopic) - -blocks.launch() diff --git a/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/criteria/id_loss.py b/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/criteria/id_loss.py deleted file mode 100644 index a828023e115243e48918538d31b91d662cd12d0f..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/criteria/id_loss.py +++ /dev/null @@ -1,39 +0,0 @@ -import torch -from torch import nn - -from models.facial_recognition.model_irse import Backbone - - -class IDLoss(nn.Module): - def __init__(self, opts): - super(IDLoss, self).__init__() - print('Loading ResNet ArcFace') - self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6, mode='ir_se') - self.facenet.load_state_dict(torch.load(opts.ir_se50_weights)) - self.pool = torch.nn.AdaptiveAvgPool2d((256, 256)) - self.face_pool = torch.nn.AdaptiveAvgPool2d((112, 112)) - self.facenet.eval() - self.opts = opts - - def extract_feats(self, x): - if x.shape[2] != 256: - x = self.pool(x) - x = x[:, :, 35:223, 32:220] # Crop interesting region - x = self.face_pool(x) - x_feats = self.facenet(x) - return x_feats - - def forward(self, y_hat, y): - n_samples = y.shape[0] - y_feats = self.extract_feats(y) # Otherwise use the feature from there - y_hat_feats = self.extract_feats(y_hat) - y_feats = y_feats.detach() - loss = 0 - sim_improvement = 0 - count = 0 - for i in range(n_samples): - diff_target = y_hat_feats[i].dot(y_feats[i]) - loss += 1 - diff_target - count += 1 - - return loss / count, sim_improvement / count diff --git a/spaces/DragGan/DragGan-Inversion/torch_utils/ops/__init__.py b/spaces/DragGan/DragGan-Inversion/torch_utils/ops/__init__.py deleted file mode 100644 index 939e7c6c8f94c4ea1141885c3c3295fe083b06aa..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/torch_utils/ops/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -# empty diff --git a/spaces/DragGan/DragGan/stylegan_human/pti/pti_configs/hyperparameters.py b/spaces/DragGan/DragGan/stylegan_human/pti/pti_configs/hyperparameters.py deleted file mode 100644 index c1014875cc3d46871056cf17fdc8c778ac6139de..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/stylegan_human/pti/pti_configs/hyperparameters.py +++ /dev/null @@ -1,28 +0,0 @@ -## Architechture -lpips_type = 'alex' -first_inv_type = 'w+'#'w+' -optim_type = 'adam' - -## Locality regularization -latent_ball_num_of_samples = 1 -locality_regularization_interval = 1 -use_locality_regularization = False -regulizer_l2_lambda = 0.1 -regulizer_lpips_lambda = 0.1 -regulizer_alpha = 30 - -## Loss -pt_l2_lambda = 1 -pt_lpips_lambda = 1 - -## Steps -LPIPS_value_threshold = 0.04 -max_pti_steps = 350 -first_inv_steps = 450 -max_images_to_invert = 30 - -## Optimization -pti_learning_rate = 5e-4 -first_inv_lr = 8e-3 -train_batch_size = 1 -use_last_w_pivots = False diff --git a/spaces/DragGan/DragGan/stylegan_human/pti/training/coaches/localitly_regulizer.py b/spaces/DragGan/DragGan/stylegan_human/pti/training/coaches/localitly_regulizer.py deleted file mode 100644 index 4a4edc3694dd4134d9caa6af0184909451032cc6..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/stylegan_human/pti/training/coaches/localitly_regulizer.py +++ /dev/null @@ -1,63 +0,0 @@ -import torch -import numpy as np -import wandb -from pti.pti_configs import hyperparameters, global_config -l2_criterion = torch.nn.MSELoss(reduction='mean') - - -def l2_loss(real_images, generated_images): - loss = l2_criterion(real_images, generated_images) - return loss - - -class Space_Regulizer: - def __init__(self, original_G, lpips_net): - self.original_G = original_G - self.morphing_regulizer_alpha = hyperparameters.regulizer_alpha - self.lpips_loss = lpips_net - - def get_morphed_w_code(self, new_w_code, fixed_w): - interpolation_direction = new_w_code - fixed_w - interpolation_direction_norm = torch.norm(interpolation_direction, p=2) - direction_to_move = hyperparameters.regulizer_alpha * interpolation_direction / interpolation_direction_norm - result_w = fixed_w + direction_to_move - self.morphing_regulizer_alpha * fixed_w + (1 - self.morphing_regulizer_alpha) * new_w_code - - return result_w - - def get_image_from_ws(self, w_codes, G): - return torch.cat([G.synthesis(w_code, noise_mode='none', force_fp32=True) for w_code in w_codes]) - - def ball_holder_loss_lazy(self, new_G, num_of_sampled_latents, w_batch, use_wandb=False): - loss = 0.0 - - z_samples = np.random.randn(num_of_sampled_latents, self.original_G.z_dim) - w_samples = self.original_G.mapping(torch.from_numpy(z_samples).to(global_config.device), None, - truncation_psi=0.5) - territory_indicator_ws = [self.get_morphed_w_code(w_code.unsqueeze(0), w_batch) for w_code in w_samples] - - for w_code in territory_indicator_ws: - new_img = new_G.synthesis(w_code, noise_mode='none', force_fp32=True) - with torch.no_grad(): - old_img = self.original_G.synthesis(w_code, noise_mode='none', force_fp32=True) - - if hyperparameters.regulizer_l2_lambda > 0: - l2_loss_val = l2_loss.l2_loss(old_img, new_img) - if use_wandb: - wandb.log({f'space_regulizer_l2_loss_val': l2_loss_val.detach().cpu()}, - step=global_config.training_step) - loss += l2_loss_val * hyperparameters.regulizer_l2_lambda - - if hyperparameters.regulizer_lpips_lambda > 0: - loss_lpips = self.lpips_loss(old_img, new_img) - loss_lpips = torch.mean(torch.squeeze(loss_lpips)) - if use_wandb: - wandb.log({f'space_regulizer_lpips_loss_val': loss_lpips.detach().cpu()}, - step=global_config.training_step) - loss += loss_lpips * hyperparameters.regulizer_lpips_lambda - - return loss / len(territory_indicator_ws) - - def space_regulizer_loss(self, new_G, w_batch, use_wandb): - ret_val = self.ball_holder_loss_lazy(new_G, hyperparameters.latent_ball_num_of_samples, w_batch, use_wandb) - return ret_val diff --git a/spaces/EDGAhab/VITS-Aatrox-AI/text/__init__.py b/spaces/EDGAhab/VITS-Aatrox-AI/text/__init__.py deleted file mode 100644 index 227cdc4a81d7bcdd3dbae299947278998d12276b..0000000000000000000000000000000000000000 --- a/spaces/EDGAhab/VITS-Aatrox-AI/text/__init__.py +++ /dev/null @@ -1,66 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners -from text.symbols import symbols,symbols_zh - - -# Mappings from symbol to numeric ID and vice versa: -# _symbol_to_id = {s: i for i, s in enumerate(symbols)} -# _id_to_symbol = {i: s for i, s in enumerate(symbols)} - -chinese_mode = True -if chinese_mode: - _symbol_to_id = {s: i for i, s in enumerate(symbols_zh)} - _id_to_symbol = {i: s for i, s in enumerate(symbols_zh)} -else: - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - _id_to_symbol = {i: s for i, s in enumerate(symbols)} - -def text_to_sequence(text, cleaner_names, ): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - coutinue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence - - -def cleaned_text_to_sequence(cleaned_text, chinese_mode=True): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - # if chinese_mode: - # sequence = [_symbol_to_id_zh[symbol] for symbol in cleaned_text] - # else: - sequence = [_symbol_to_id[symbol] for symbol in cleaned_text] - return sequence - - -def sequence_to_text(sequence): - '''Converts a sequence of IDs back to a string''' - result = '' - for symbol_id in sequence: - s = _id_to_symbol[symbol_id] - result += s - return result - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/EPFL-VILAB/MultiMAE/mask2former/modeling/pixel_decoder/msdeformattn.py b/spaces/EPFL-VILAB/MultiMAE/mask2former/modeling/pixel_decoder/msdeformattn.py deleted file mode 100644 index 0ff1a81a3ed0c05464dad2143830bacac5951dfe..0000000000000000000000000000000000000000 --- a/spaces/EPFL-VILAB/MultiMAE/mask2former/modeling/pixel_decoder/msdeformattn.py +++ /dev/null @@ -1,358 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import logging -import numpy as np -from typing import Callable, Dict, List, Optional, Tuple, Union - -import fvcore.nn.weight_init as weight_init -import torch -from torch import nn -from torch.nn import functional as F -from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_ -from torch.cuda.amp import autocast - -from detectron2.config import configurable -from detectron2.layers import Conv2d, ShapeSpec, get_norm -from detectron2.modeling import SEM_SEG_HEADS_REGISTRY - -from ..transformer_decoder.position_encoding import PositionEmbeddingSine -from ..transformer_decoder.transformer import _get_clones, _get_activation_fn -from .ops.modules import MSDeformAttn - - -# MSDeformAttn Transformer encoder in deformable detr -class MSDeformAttnTransformerEncoderOnly(nn.Module): - def __init__(self, d_model=256, nhead=8, - num_encoder_layers=6, dim_feedforward=1024, dropout=0.1, - activation="relu", - num_feature_levels=4, enc_n_points=4, - ): - super().__init__() - - self.d_model = d_model - self.nhead = nhead - - encoder_layer = MSDeformAttnTransformerEncoderLayer(d_model, dim_feedforward, - dropout, activation, - num_feature_levels, nhead, enc_n_points) - self.encoder = MSDeformAttnTransformerEncoder(encoder_layer, num_encoder_layers) - - self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model)) - - self._reset_parameters() - - def _reset_parameters(self): - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - for m in self.modules(): - if isinstance(m, MSDeformAttn): - m._reset_parameters() - normal_(self.level_embed) - - def get_valid_ratio(self, mask): - _, H, W = mask.shape - valid_H = torch.sum(~mask[:, :, 0], 1) - valid_W = torch.sum(~mask[:, 0, :], 1) - valid_ratio_h = valid_H.float() / H - valid_ratio_w = valid_W.float() / W - valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1) - return valid_ratio - - def forward(self, srcs, pos_embeds): - masks = [torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) for x in srcs] - # prepare input for encoder - src_flatten = [] - mask_flatten = [] - lvl_pos_embed_flatten = [] - spatial_shapes = [] - for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)): - bs, c, h, w = src.shape - spatial_shape = (h, w) - spatial_shapes.append(spatial_shape) - src = src.flatten(2).transpose(1, 2) - mask = mask.flatten(1) - pos_embed = pos_embed.flatten(2).transpose(1, 2) - lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1) - lvl_pos_embed_flatten.append(lvl_pos_embed) - src_flatten.append(src) - mask_flatten.append(mask) - src_flatten = torch.cat(src_flatten, 1) - mask_flatten = torch.cat(mask_flatten, 1) - lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) - spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device) - level_start_index = torch.cat((spatial_shapes.new_zeros((1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) - valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1) - - # encoder - memory = self.encoder(src_flatten, spatial_shapes, level_start_index, valid_ratios, lvl_pos_embed_flatten, mask_flatten) - - return memory, spatial_shapes, level_start_index - - -class MSDeformAttnTransformerEncoderLayer(nn.Module): - def __init__(self, - d_model=256, d_ffn=1024, - dropout=0.1, activation="relu", - n_levels=4, n_heads=8, n_points=4): - super().__init__() - - # self attention - self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points) - self.dropout1 = nn.Dropout(dropout) - self.norm1 = nn.LayerNorm(d_model) - - # ffn - self.linear1 = nn.Linear(d_model, d_ffn) - self.activation = _get_activation_fn(activation) - self.dropout2 = nn.Dropout(dropout) - self.linear2 = nn.Linear(d_ffn, d_model) - self.dropout3 = nn.Dropout(dropout) - self.norm2 = nn.LayerNorm(d_model) - - @staticmethod - def with_pos_embed(tensor, pos): - return tensor if pos is None else tensor + pos - - def forward_ffn(self, src): - src2 = self.linear2(self.dropout2(self.activation(self.linear1(src)))) - src = src + self.dropout3(src2) - src = self.norm2(src) - return src - - def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, padding_mask=None): - # self attention - src2 = self.self_attn(self.with_pos_embed(src, pos), reference_points, src, spatial_shapes, level_start_index, padding_mask) - src = src + self.dropout1(src2) - src = self.norm1(src) - - # ffn - src = self.forward_ffn(src) - - return src - - -class MSDeformAttnTransformerEncoder(nn.Module): - def __init__(self, encoder_layer, num_layers): - super().__init__() - self.layers = _get_clones(encoder_layer, num_layers) - self.num_layers = num_layers - - @staticmethod - def get_reference_points(spatial_shapes, valid_ratios, device): - reference_points_list = [] - for lvl, (H_, W_) in enumerate(spatial_shapes): - - ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device), - torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device)) - ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_) - ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_) - ref = torch.stack((ref_x, ref_y), -1) - reference_points_list.append(ref) - reference_points = torch.cat(reference_points_list, 1) - reference_points = reference_points[:, :, None] * valid_ratios[:, None] - return reference_points - - def forward(self, src, spatial_shapes, level_start_index, valid_ratios, pos=None, padding_mask=None): - output = src - reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device) - for _, layer in enumerate(self.layers): - output = layer(output, pos, reference_points, spatial_shapes, level_start_index, padding_mask) - - return output - - -@SEM_SEG_HEADS_REGISTRY.register() -class MSDeformAttnPixelDecoder(nn.Module): - @configurable - def __init__( - self, - input_shape: Dict[str, ShapeSpec], - *, - transformer_dropout: float, - transformer_nheads: int, - transformer_dim_feedforward: int, - transformer_enc_layers: int, - conv_dim: int, - mask_dim: int, - norm: Optional[Union[str, Callable]] = None, - # deformable transformer encoder args - transformer_in_features: List[str], - common_stride: int, - ): - """ - NOTE: this interface is experimental. - Args: - input_shape: shapes (channels and stride) of the input features - transformer_dropout: dropout probability in transformer - transformer_nheads: number of heads in transformer - transformer_dim_feedforward: dimension of feedforward network - transformer_enc_layers: number of transformer encoder layers - conv_dims: number of output channels for the intermediate conv layers. - mask_dim: number of output channels for the final conv layer. - norm (str or callable): normalization for all conv layers - """ - super().__init__() - transformer_input_shape = { - k: v for k, v in input_shape.items() if k in transformer_in_features - } - - # this is the input shape of pixel decoder - input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) - self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5" - self.feature_strides = [v.stride for k, v in input_shape] - self.feature_channels = [v.channels for k, v in input_shape] - - # this is the input shape of transformer encoder (could use less features than pixel decoder - transformer_input_shape = sorted(transformer_input_shape.items(), key=lambda x: x[1].stride) - self.transformer_in_features = [k for k, v in transformer_input_shape] # starting from "res2" to "res5" - transformer_in_channels = [v.channels for k, v in transformer_input_shape] - self.transformer_feature_strides = [v.stride for k, v in transformer_input_shape] # to decide extra FPN layers - - self.transformer_num_feature_levels = len(self.transformer_in_features) - if self.transformer_num_feature_levels > 1: - input_proj_list = [] - # from low resolution to high resolution (res5 -> res2) - for in_channels in transformer_in_channels[::-1]: - input_proj_list.append(nn.Sequential( - nn.Conv2d(in_channels, conv_dim, kernel_size=1), - nn.GroupNorm(32, conv_dim), - )) - self.input_proj = nn.ModuleList(input_proj_list) - else: - self.input_proj = nn.ModuleList([ - nn.Sequential( - nn.Conv2d(transformer_in_channels[-1], conv_dim, kernel_size=1), - nn.GroupNorm(32, conv_dim), - )]) - - for proj in self.input_proj: - nn.init.xavier_uniform_(proj[0].weight, gain=1) - nn.init.constant_(proj[0].bias, 0) - - self.transformer = MSDeformAttnTransformerEncoderOnly( - d_model=conv_dim, - dropout=transformer_dropout, - nhead=transformer_nheads, - dim_feedforward=transformer_dim_feedforward, - num_encoder_layers=transformer_enc_layers, - num_feature_levels=self.transformer_num_feature_levels, - ) - N_steps = conv_dim // 2 - self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True) - - self.mask_dim = mask_dim - # use 1x1 conv instead - self.mask_features = Conv2d( - conv_dim, - mask_dim, - kernel_size=1, - stride=1, - padding=0, - ) - weight_init.c2_xavier_fill(self.mask_features) - - self.maskformer_num_feature_levels = 3 # always use 3 scales - self.common_stride = common_stride - - # extra fpn levels - stride = min(self.transformer_feature_strides) - self.num_fpn_levels = int(np.log2(stride) - np.log2(self.common_stride)) - - lateral_convs = [] - output_convs = [] - - use_bias = norm == "" - for idx, in_channels in enumerate(self.feature_channels[:self.num_fpn_levels]): - lateral_norm = get_norm(norm, conv_dim) - output_norm = get_norm(norm, conv_dim) - - lateral_conv = Conv2d( - in_channels, conv_dim, kernel_size=1, bias=use_bias, norm=lateral_norm - ) - output_conv = Conv2d( - conv_dim, - conv_dim, - kernel_size=3, - stride=1, - padding=1, - bias=use_bias, - norm=output_norm, - activation=F.relu, - ) - weight_init.c2_xavier_fill(lateral_conv) - weight_init.c2_xavier_fill(output_conv) - self.add_module("adapter_{}".format(idx + 1), lateral_conv) - self.add_module("layer_{}".format(idx + 1), output_conv) - - lateral_convs.append(lateral_conv) - output_convs.append(output_conv) - # Place convs into top-down order (from low to high resolution) - # to make the top-down computation in forward clearer. - self.lateral_convs = lateral_convs[::-1] - self.output_convs = output_convs[::-1] - - @classmethod - def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): - ret = {} - ret["input_shape"] = { - k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES - } - ret["conv_dim"] = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM - ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM - ret["norm"] = cfg.MODEL.SEM_SEG_HEAD.NORM - ret["transformer_dropout"] = cfg.MODEL.MASK_FORMER.DROPOUT - ret["transformer_nheads"] = cfg.MODEL.MASK_FORMER.NHEADS - # ret["transformer_dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD - ret["transformer_dim_feedforward"] = 1024 # use 1024 for deformable transformer encoder - ret[ - "transformer_enc_layers" - ] = cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS # a separate config - ret["transformer_in_features"] = cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES - ret["common_stride"] = cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE - return ret - - @autocast(enabled=False) - def forward_features(self, features): - srcs = [] - pos = [] - # Reverse feature maps into top-down order (from low to high resolution) - for idx, f in enumerate(self.transformer_in_features[::-1]): - x = features[f].float() # deformable detr does not support half precision - srcs.append(self.input_proj[idx](x)) - pos.append(self.pe_layer(x)) - - y, spatial_shapes, level_start_index = self.transformer(srcs, pos) - bs = y.shape[0] - - split_size_or_sections = [None] * self.transformer_num_feature_levels - for i in range(self.transformer_num_feature_levels): - if i < self.transformer_num_feature_levels - 1: - split_size_or_sections[i] = level_start_index[i + 1] - level_start_index[i] - else: - split_size_or_sections[i] = y.shape[1] - level_start_index[i] - y = torch.split(y, split_size_or_sections, dim=1) - - out = [] - multi_scale_features = [] - num_cur_levels = 0 - for i, z in enumerate(y): - out.append(z.transpose(1, 2).view(bs, -1, spatial_shapes[i][0], spatial_shapes[i][1])) - - # append `out` with extra FPN levels - # Reverse feature maps into top-down order (from low to high resolution) - for idx, f in enumerate(self.in_features[:self.num_fpn_levels][::-1]): - x = features[f].float() - lateral_conv = self.lateral_convs[idx] - output_conv = self.output_convs[idx] - cur_fpn = lateral_conv(x) - # Following FPN implementation, we use nearest upsampling here - y = cur_fpn + F.interpolate(out[-1], size=cur_fpn.shape[-2:], mode="bilinear", align_corners=False) - y = output_conv(y) - out.append(y) - - for o in out: - if num_cur_levels < self.maskformer_num_feature_levels: - multi_scale_features.append(o) - num_cur_levels += 1 - - return self.mask_features(out[-1]), out[0], multi_scale_features diff --git a/spaces/ESG-TFM-UV/ESG_API_BATCH/app.py b/spaces/ESG-TFM-UV/ESG_API_BATCH/app.py deleted file mode 100644 index 241e2aada96ea6d49dfba56e47d642731f8834af..0000000000000000000000000000000000000000 --- a/spaces/ESG-TFM-UV/ESG_API_BATCH/app.py +++ /dev/null @@ -1,428 +0,0 @@ - -import os -import re -import math -import requests -import json -import itertools - -import numpy as np -import pandas as pd - -import onnxruntime -import onnx -import gradio as gr - -from huggingface_hub import hf_hub_url, cached_download -from transformers import AutoTokenizer -from transformers import pipeline - -try: - from extractnet import Extractor - EXTRACTOR_NET = 'extractnet' -except ImportError: - try: - from dragnet import extract_content - EXTRACTOR_NET = 'dragnet' - except ImportError: - try: - import trafilatura - from trafilatura.settings import use_config - EXTRACTOR_NET = 'trafilatura' - trafilatura_config = use_config() - trafilatura_config.set("DEFAULT", "EXTRACTION_TIMEOUT", "0") #To avoid it runnig signals to avoid clashing with gradio threads - except ImportError: - raise ImportError - -print('[i] Using',EXTRACTOR_NET) - -import spacy - -from bertopic import BERTopic - -import nltk -nltk.download('stopwords') -nltk.download('wordnet') -nltk.download('omw-1.4') -from nltk.corpus import stopwords -from nltk.stem import WordNetLemmatizer -from nltk.stem import PorterStemmer - -from unicodedata import normalize - - - -OUT_HEADERS = ['E','S','G'] -DF_SP500 = pd.read_csv('SP500_constituents.zip',compression=dict(method='zip')) - -MODEL_TRANSFORMER_BASED = "distilbert-base-uncased" -MODEL_ONNX_FNAME = "ESG_classifier_batch.onnx" -MODEL_SENTIMENT_ANALYSIS = "ProsusAI/finbert" -#MODEL3 -#BERTOPIC_REPO_ID = "oMateos2020/BERTopic-paraphrase-MiniLM-L3-v2-51topics-guided-model3" -#BERTOPIC_FILENAME = "BERTopic-paraphrase-MiniLM-L3-v2-51topics-guided-model3" -#bertopic_model = BERTopic.load(cached_download(hf_hub_url(BERTOPIC_REPO_ID , BERTOPIC_FILENAME )), embedding_model="paraphrase-MiniLM-L3-v2") - -BERTOPIC_REPO_ID = "oMateos2020/BERTopic-distilbert-base-nli-mean-tokens" -BERTOPIC_FILENAME = "BERTopic-distilbert-base-nli-mean-tokens" -bertopic_model = BERTopic.load(cached_download(hf_hub_url(BERTOPIC_REPO_ID , BERTOPIC_FILENAME ))) - -#SECTOR_LIST = list(DF_SP500.Sector.unique()) -SECTOR_LIST = ['Industry', - 'Health', - 'Technology', - 'Communication', - 'Consumer Staples', - 'Consumer Discretionary', - 'Utilities', - 'Financials', - 'Materials', - 'Real Estate', - 'Energy'] - - -def _topic_sanitize_word(text): - """Función realiza una primera limpieza-normalización del texto a traves de expresiones regex""" - text = re.sub(r'@[\w_]+|#[\w_]+|https?://[\w_./]+', '', text) # Elimina menciones y URL, esto sería más para Tweets pero por si hay alguna mención o URL al ser criticas web - text = re.sub('\S*@\S*\s?', '', text) # Elimina correos electronicos - text = re.sub(r'\((\d+)\)', '', text) #Elimina numeros entre parentesis - text = re.sub(r'^\d+', '', text) #Elimina numeros sueltos - text = re.sub(r'\n', '', text) #Elimina saltos de linea - text = re.sub('\s+', ' ', text) # Elimina espacios en blanco adicionales - text = re.sub(r'[“”]', '', text) # Elimina caracter citas - text = re.sub(r'[()]', '', text) # Elimina parentesis - text = re.sub('\.', '', text) # Elimina punto - text = re.sub('\,', '', text) # Elimina coma - text = re.sub('’s', '', text) # Elimina posesivos - #text = re.sub(r'-+', '', text) # Quita guiones para unir palabras compuestas (normalizaría algunos casos, exmujer y ex-mujer, todos a exmujer) - text = re.sub(r'\.{3}', ' ', text) # Reemplaza puntos suspensivos - # Esta exp regular se ha incluido "a mano" tras ver que era necesaria para algunos ejemplos - text = re.sub(r"([\.\?])", r"\1 ", text) # Introduce espacio despues de punto e interrogacion - # -> NFD (Normalization Form Canonical Decomposition) y eliminar diacríticos - text = re.sub(r"([^n\u0300-\u036f]|n(?!\u0303(?![\u0300-\u036f])))[\u0300-\u036f]+", r"\1", - normalize( "NFD", text), 0, re.I) # Eliminación de diacriticos (acentos y variantes puntuadas de caracteres por su forma simple excepto la 'ñ') - # -> NFC (Normalization Form Canonical Composition) - text = normalize( 'NFC', text) - - return text.lower().strip() - -def _topic_clean_text(text, lemmatize=True, stem=True): - words = text.split() - non_stopwords = [word for word in words if word not in stopwords.words('english')] - clean_text = [_topic_sanitize_word(word) for word in non_stopwords] - if lemmatize: - lemmatizer = WordNetLemmatizer() - clean_text = [lemmatizer.lemmatize(word) for word in clean_text] - if stem: - ps =PorterStemmer() - clean_text = [ps.stem(word) for word in clean_text] - - return ' '.join(clean_text).strip() - -SECTOR_TOPICS = [] -for sector in SECTOR_LIST: - topics, _ = bertopic_model.find_topics(_topic_clean_text(sector), top_n=5) - SECTOR_TOPICS.append(topics) - -def _topic2sector(pred_topics): - out = [] - for pred_topic in pred_topics: - relevant_sectors = [] - for i in range(len(SECTOR_LIST)): - if pred_topic in SECTOR_TOPICS[i]: - relevant_sectors.append(list(DF_SP500.Sector.unique())[i]) - out.append(relevant_sectors) - return out - -def _inference_topic_match(text): - out, _ = bertopic_model.transform([_topic_clean_text(t) for t in text]) - return out - -def get_company_sectors(extracted_names, threshold=0.95): - ''' - ''' - from thefuzz import process, fuzz - output = [] - standard_names_tuples = [] - for extracted_name in extracted_names: - name_match = process.extractOne(extracted_name, - DF_SP500.Name, - scorer=fuzz.token_set_ratio) - similarity = name_match[1]/100 - if similarity >= threshold: - standard_names_tuples.append(name_match[:2]) - - for extracted_name in extracted_names: - name_match = process.extractOne(extracted_name, - DF_SP500.Symbol, - scorer=fuzz.token_set_ratio) - similarity = name_match[1]/100 - if similarity >= threshold: - standard_names_tuples.append(name_match[:2]) - - for std_comp_name, _ in standard_names_tuples: - sectors = list(DF_SP500[['Name','Sector','Symbol']].where( (DF_SP500.Name == std_comp_name) | (DF_SP500.Symbol == std_comp_name)).dropna().itertuples(index=False, name=None)) - output += sectors - return output - -def filter_spans(spans, keep_longest=True): - """Filter a sequence of spans and remove duplicates or overlaps. Useful for - creating named entities (where one token can only be part of one entity) or - when merging spans with `Retokenizer.merge`. When spans overlap, the (first) - longest span is preferred over shorter spans. - spans (Iterable[Span]): The spans to filter. - keep_longest (bool): Specify whether to keep longer or shorter spans. - RETURNS (List[Span]): The filtered spans. - """ - get_sort_key = lambda span: (span.end - span.start, -span.start) - sorted_spans = sorted(spans, key=get_sort_key, reverse=keep_longest) - #print(f'sorted_spans: {sorted_spans}') - result = [] - seen_tokens = set() - for span in sorted_spans: - # Check for end - 1 here because boundaries are inclusive - if span.start not in seen_tokens and span.end - 1 not in seen_tokens: - result.append(span) - seen_tokens.update(range(span.start, span.end)) - result = sorted(result, key=lambda span: span.start) - return result - - -def _inference_ner_spancat(text, limit_outputs=10): - nlp = spacy.load("en_pipeline") - out = [] - for doc in nlp.pipe(text): - spans = doc.spans["sc"] - #comp_raw_text = dict( sorted( dict(zip([str(x) for x in spans],[float(x)*penalty for x in spans.attrs['scores']])).items(), key=lambda x: x[1], reverse=True) ) - company_list = list(set([str(span).replace('\'s', '').replace('\u2019s','') for span in filter_spans(spans, keep_longest=True)]))[:limit_outputs] - out.append(get_company_sectors(company_list)) - return out - -#def _inference_summary_model_pipeline(text): -# pipe = pipeline("text2text-generation", model=MODEL_SUMMARY_PEGASUS) -# return pipe(text,truncation='longest_first') - -def _inference_sentiment_model_pipeline(text): - tokenizer_kwargs = {'padding':True,'truncation':True,'max_length':512}#,'return_tensors':'pt'} - pipe = pipeline("sentiment-analysis", model=MODEL_SENTIMENT_ANALYSIS ) - return pipe(text,**tokenizer_kwargs) - -#def _inference_sentiment_model_via_api_query(payload): -# response = requests.post(API_HF_SENTIMENT_URL , headers={"Authorization": os.environ['hf_api_token']}, json=payload) -# return response.json() - -def _lematise_text(text): - nlp = spacy.load("en_core_web_sm", disable=['ner']) - text_out = [] - for doc in nlp.pipe(text): #see https://spacy.io/models#design - new_text = "" - for token in doc: - if (not token.is_punct - and not token.is_stop - and not token.like_url - and not token.is_space - and not token.like_email - #and not token.like_num - and not token.pos_ == "CONJ"): - - new_text = new_text + " " + token.lemma_ - - text_out.append( new_text ) - return text_out - -def sigmoid(x): - return 1 / (1 + np.exp(-x)) - -def to_numpy(tensor): - return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy() - -def is_in_archive(url): - try: - r = requests.get('http://archive.org/wayback/available?url='+url) - archive = json.loads(r.text) - - if archive['archived_snapshots'] : - archive['archived_snapshots']['closest'] - return {'archived':archive['archived_snapshots']['closest']['available'], 'url':archive['archived_snapshots']['closest']['url'],'error':0} - else: - return {'archived':False, 'url':"", 'error':0} - except: - print(f"[E] Quering URL ({url}) from archive.org") - return {'archived':False, 'url':"", 'error':-1} - -#def _inference_ner(text): -# return labels - -def _inference_classifier(text): - tokenizer = AutoTokenizer.from_pretrained(MODEL_TRANSFORMER_BASED) - inputs = tokenizer(_lematise_text(text), return_tensors="np", padding="max_length", truncation=True) #this assumes head-only! - ort_session = onnxruntime.InferenceSession(MODEL_ONNX_FNAME) - onnx_model = onnx.load(MODEL_ONNX_FNAME) - onnx.checker.check_model(onnx_model) - - # compute ONNX Runtime output prediction - ort_outs = ort_session.run(None, input_feed=dict(inputs)) - - return sigmoid(ort_outs[0]) - -def inference(input_batch,isurl,use_archive,filt_companies_topic,limit_companies=10): - url_list = [] #Only used if isurl - input_batch_content = [] -# if file_in.name is not "": -# print("[i] Input is file:",file_in.name) -# dft = pd.read_csv( -# file_in.name, -# compression=dict(method='zip') -# ) -# assert file_col_name in dft.columns, "Indicated col_name not found in file" -# input_batch_r = dft[file_col_name].values.tolist() -# else: - print("[i] Input is list") - assert len(input_batch) > 0, "input_batch array is empty" - input_batch_r = input_batch - - print("[i] Input size:",len(input_batch_r)) - - if isurl: - print("[i] Data is URL") - if use_archive: - print("[i] Use chached URL from archive.org") - print("[i] Extracting contents using",EXTRACTOR_NET) - for row_in in input_batch_r: - if isinstance(row_in , list): - url = row_in[0] - else: - url = row_in - url_list.append(url) - if use_archive: - archive = is_in_archive(url) - if archive['archived']: - url = archive['url'] - #Extract the data from url - if(EXTRACTOR_NET == 'extractnet'): - extracted = Extractor().extract(requests.get(url).text) - input_batch_content.append(extracted['content']) - elif(EXTRACTOR_NET == 'dragnet'): - extracted = extract_content(requests.get(url).content) - input_batch_content.append(extracted) - elif(EXTRACTOR_NET == 'trafilatura'): - try: - extracted = trafilatura.extract(trafilatura.fetch_url(url), include_comments=False, config=trafilatura_config, include_tables=False) - assert len(extracted)>100, "[W] Failed extracting "+url+" retrying with archived version" - except: - archive = is_in_archive(url) - if archive['archived']: - print("[W] Using archive.org version of",url) - url = archive['url'] - extracted = trafilatura.extract(trafilatura.fetch_url(url), include_comments=False, config=trafilatura_config, include_tables=False) - else: - print("[E] URL=",url,"not found") - extracted = "" - url_list.pop() #Remove last from list - - if len(extracted)>100: - input_batch_content.append(extracted) - else: - print("[i] Data is news contents") - if isinstance(input_batch_r[0], list): - print("[i] Data is list of lists format") - for row_in in input_batch_r: - input_batch_content.append(row_in[0]) - else: - print("[i] Data is single list format") - input_batch_content = input_batch_r - - print("[i] Batch size:",len(input_batch_content)) - print("[i] Running ESG classifier inference...") - prob_outs = _inference_classifier(input_batch_content) - print("[i] Classifier output shape:",prob_outs.shape) - print("[i] Running sentiment using",MODEL_SENTIMENT_ANALYSIS ,"inference...") - sentiment = _inference_sentiment_model_pipeline(input_batch_content ) - print("[i] Running NER using custom spancat inference...") - ner_labels = _inference_ner_spancat(input_batch_content ,limit_outputs=limit_companies) - print("[i] Extracting topic using custom BERTopic...") - topics = _inference_topic_match(input_batch_content) - news_sectors = _topic2sector(topics) - - df = pd.DataFrame(prob_outs,columns =['E','S','G']) - if isurl: - df['URL'] = url_list - else: - df['content_id'] = range(1, len(input_batch_r)+1) - df['sent_lbl'] = [d['label'] for d in sentiment ] - df['sent_score'] = [d['score'] for d in sentiment ] - df['topic'] = pd.DataFrame(news_sectors).iloc[:, 0] - #df['sector_pred'] = pd.DataFrame(_topic2sector(topics)).iloc[:, 0] - print("[i] Pandas output shape:",df.shape) - #[[], [('Nvidia', 'Information Technology')], [('Twitter', 'Communication Services'), ('Apple', 'Information Technology')], [], [], [], [], [], []] - df["company"] = np.nan - df["sector"] = np.nan - df["symbol"] = np.nan - dfo = pd.DataFrame(columns=['E','S','G','URL','sent_lbl','sent_score','topic','company','sector','symbol']) - for idx in range(len(df.index)): - if ner_labels[idx]: #not empty - for ner in ner_labels[idx]: - if filt_companies_topic: - if news_sectors[idx]: #not empty - if news_sectors[idx][0] not in ner[1]: - continue - dfo = pd.concat( [dfo, df.loc[[idx]].assign(company=ner[0], sector=ner[1], symbol=ner[2])], join='outer', ignore_index=True) #axis=0 - print("[i] Pandas output shape:",dfo.shape) - return dfo.drop_duplicates() - -title = "ESG API Demo" -description = """This is a demonstration of the full ESG pipeline backend where given a list of URL (english, news) the news contents are extracted, using extractnet, and fed to three models: - -- A custom scheme for company extraction -- A custom ESG classifier for the ESG labeling of the news -- An off-the-shelf sentiment classification model (ProsusAI/finbert) - -API input parameters: -- List: list of text. Either list of Url of the news (english) or list of extracted news contents -- 'Data type': int. 0=list is of extracted news contents, 1=list is of urls. -- `use_archive`: boolean. The model will extract the archived version in archive.org of the url indicated. This is useful with old news and to bypass news behind paywall -- `filter_companies`: boolean. Filter companies by news' topic -- `limit_companies`: integer. Number of found relevant companies to report. - -""" -examples = [[ [['https://www.bbc.com/news/uk-62732447'], - ["https://www.science.org/content/article/suspicions-grow-nanoparticles-pfizer-s-covid-19-vaccine-trigger-rare-allergic-reactions"], - ["https://www.cnbc.com/2022/09/14/omicron-specific-covid-booster-shot-side-effects-what-to-expect.html"], - ["https://www.reuters.com/business/healthcare-pharmaceuticals/brazil-approves-pfizer-vaccine-children-young-six-months-2022-09-17/"], - ["https://www.statnews.com/2022/09/06/pfizer-covid-vaccines-researchers-next-gen-studies/"], - ["https://www.cms.gov/newsroom/news-alert/updated-covid-19-vaccines-providing-protection-against-omicron-variant-available-no-cost"], - ["https://www.bbc.com/news/health-62691102"], - ["https://news.bloomberglaw.com/esg/abbvie-board-faces-new-investor-suit-over-humira-kickback-claims"], - ["https://esgnews.com/amazon-backed-infinium-to-provide-ultra-low-carbon-electrofuels-for-use-in-trucking-fleet-in-2023/"], - ["https://esgnews.com/comcast-announces-plan-to-double-energy-efficiency-by-2030-to-power-a-greener-internet/"], - ["https://esgnews.com/ges-facts-technology-helps-the-city-of-los-angeles-move-closer-to-its-renewable-energy-goals/"], - ['https://www.bbc.com/news/science-environment-62758811'], - ['https://www.bbc.com/news/business-62524031'], - ["https://www.knowesg.com/investors/blackstone-and-sphera-work-together-for-portfolio-decarbonization-program-17022022"], - ["https://www.esgtoday.com/amazon-partners-with-matt-damons-water-org-to-provide-water-access-to-100-million-people/"], - ["https://www.esgtoday.com/walmart-allocates-over-1-billion-to-renewable-energy-sustainable-buildings-circular-economy/"], - ["https://www.esgtoday.com/anglo-american-ties-interest-on-745-million-bond-to-climate-water-job-creation-goals/"], - ["https://www.esgtoday.com/blackrock-acquires-new-zealand-solar-as-a-service-provider-solarzero/"], - ["https://www.esgtoday.com/blackrock-strikes-back-against-climate-activism-claims/"], - ["https://www.esgtoday.com/hm-to-remove-sustainability-labels-from-products-following-investigation-by-regulator/"], - ["https://www.knowesg.com/sustainable-finance/exxonmobil-fails-the-energy-transition-due-to-failed-governance-structure-04122021"], - ["https://www.knowesg.com/companies/tesla-is-investigated-by-the-securities-and-exchange-commission-sec-on-solar-07122021"], - ["https://www.knowesg.com/tech/pcg-and-exxonmobil-will-collaborate-on-plastic-recycling-in-malaysia-20092022"], - ["https://esgnews.com/nike-launches-community-climate-resilience-program-with-2-million-grant-to-trust-for-public-land/"], - ["https://esgnews.com/walmart-and-unitedhealth-group-collaborate-to-deliver-access-to-high-quality-affordable-health-care/"], - ['https://www.bbc.com/news/science-environment-62680423']],'url',False,False,5]] -demo = gr.Interface(fn=inference, - inputs=[gr.Dataframe(label='input batch', col_count=1, datatype='str', type='array', wrap=True), - gr.Dropdown(label='data type', choices=['text','url'], type='index', value='url'), - gr.Checkbox(label='Parse cached in archive.org'), - gr.Checkbox(label='Filter out companies by topic'), - gr.Slider(minimum=1, maximum=10, step=1, label='Limit NER output', value=5)], - outputs=[gr.Dataframe(label='output raw', col_count=1, type='pandas', wrap=True, header=OUT_HEADERS)], - #gr.Label(label='Company'), - #gr.Label(label='ESG'), - #gr.Label(label='Sentiment'), - #gr.Markdown()], - title=title, - description=description, - examples=examples) -demo.launch() diff --git a/spaces/Eddycrack864/Applio-Inference/utils/i18n.py b/spaces/Eddycrack864/Applio-Inference/utils/i18n.py deleted file mode 100644 index 8e75d2bc26ff86ab1716b8d7f239ad9f5cc1e32d..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/utils/i18n.py +++ /dev/null @@ -1,28 +0,0 @@ -import locale -import json -import os - - -def load_language_list(language): - with open(f"./i18n/{language}.json", "r", encoding="utf-8") as f: - language_list = json.load(f) - return language_list - - -class I18nAuto: - def __init__(self, language=None): - if language in ["Auto", None]: - language = "es_ES" - if not os.path.exists(f"./i18n/{language}.json"): - language = "es_ES" - language = "es_ES" - self.language = language - # print("Use Language:", language) - self.language_map = load_language_list(language) - - def __call__(self, key): - return self.language_map.get(key, key) - - def print(self): - # print("Use Language:", self.language) - print("") diff --git a/spaces/EuroPython2022/Paddy_Disease_Classification/app.py b/spaces/EuroPython2022/Paddy_Disease_Classification/app.py deleted file mode 100644 index 6c4c180a0bb8fd6d97908e2b6906421db6a65349..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/Paddy_Disease_Classification/app.py +++ /dev/null @@ -1,63 +0,0 @@ -import albumentations -import cv2 -import torch -import timm -import gradio as gr -import numpy as np -import os -import random - -device = torch.device('cpu') - -labels = { - 0: 'bacterial_leaf_blight', - 1: 'bacterial_leaf_streak', - 2: 'bacterial_panicle_blight', - 3: 'blast', - 4: 'brown_spot', - 5: 'dead_heart', - 6: 'downy_mildew', - 7: 'hispa', - 8: 'normal', - 9: 'tungro' - } - -def inference_fn(model, image=None): - model.eval() - image = image.to(device) - with torch.no_grad(): - output = model(image.unsqueeze(0)) - out = output.sigmoid().detach().cpu().numpy().flatten() - return out - - -def predict(image=None) -> dict: - mean = (0.485, 0.456, 0.406) - std = (0.229, 0.224, 0.225) - - augmentations = albumentations.Compose( - [ - albumentations.Resize(256, 256), - albumentations.HorizontalFlip(p=0.5), - albumentations.VerticalFlip(p=0.5), - albumentations.Normalize(mean, std, max_pixel_value=255.0, always_apply=True), - ] - ) - - augmented = augmentations(image=image) - image = augmented["image"] - image = np.transpose(image, (2, 0, 1)) - image = torch.tensor(image, dtype=torch.float32) - model = timm.create_model('efficientnet_b0', pretrained=False, num_classes=10) - model.load_state_dict(torch.load("paddy_model.pth", map_location=torch.device(device))) - model.to(device) - - predicted = inference_fn(model, image) - - return {labels[i]: float(predicted[i]) for i in range(10)} - - -gr.Interface(fn=predict, - inputs=gr.inputs.Image(), - outputs=gr.outputs.Label(num_top_classes=10), - examples=["200005.jpg", "200006.jpg"], interpretation='default').launch() \ No newline at end of file diff --git a/spaces/EuroPython2022/mmocr-demo/configs/_base_/det_datasets/synthtext.py b/spaces/EuroPython2022/mmocr-demo/configs/_base_/det_datasets/synthtext.py deleted file mode 100644 index fb9a44b3422dae5a9788d39b0901335dfc6076a9..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/mmocr-demo/configs/_base_/det_datasets/synthtext.py +++ /dev/null @@ -1,18 +0,0 @@ -dataset_type = 'TextDetDataset' -data_root = 'data/synthtext' - -train = dict( - type=dataset_type, - ann_file=f'{data_root}/instances_training.lmdb', - loader=dict( - type='AnnFileLoader', - repeat=1, - file_format='lmdb', - parser=dict( - type='LineJsonParser', - keys=['file_name', 'height', 'width', 'annotations'])), - img_prefix=f'{data_root}/imgs', - pipeline=None) - -train_list = [train] -test_list = [train] diff --git a/spaces/EuroPython2022/mmocr-demo/configs/_base_/det_models/textsnake_r50_fpn_unet.py b/spaces/EuroPython2022/mmocr-demo/configs/_base_/det_models/textsnake_r50_fpn_unet.py deleted file mode 100644 index 7d74f376b8c635451a3036e780ffc88e7640bf2c..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/mmocr-demo/configs/_base_/det_models/textsnake_r50_fpn_unet.py +++ /dev/null @@ -1,22 +0,0 @@ -model = dict( - type='TextSnake', - backbone=dict( - type='mmdet.ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=-1, - norm_cfg=dict(type='BN', requires_grad=True), - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - norm_eval=True, - style='caffe'), - neck=dict( - type='FPN_UNet', in_channels=[256, 512, 1024, 2048], out_channels=32), - bbox_head=dict( - type='TextSnakeHead', - in_channels=32, - loss=dict(type='TextSnakeLoss'), - postprocessor=dict( - type='TextSnakePostprocessor', text_repr_type='poly')), - train_cfg=None, - test_cfg=None) diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/losses/loss_util.py b/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/losses/loss_util.py deleted file mode 100644 index 744eeb46d1f3b5a7b4553ca23237ddd9c899a698..0000000000000000000000000000000000000000 --- a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/losses/loss_util.py +++ /dev/null @@ -1,95 +0,0 @@ -import functools -from torch.nn import functional as F - - -def reduce_loss(loss, reduction): - """Reduce loss as specified. - - Args: - loss (Tensor): Elementwise loss tensor. - reduction (str): Options are 'none', 'mean' and 'sum'. - - Returns: - Tensor: Reduced loss tensor. - """ - reduction_enum = F._Reduction.get_enum(reduction) - # none: 0, elementwise_mean:1, sum: 2 - if reduction_enum == 0: - return loss - elif reduction_enum == 1: - return loss.mean() - else: - return loss.sum() - - -def weight_reduce_loss(loss, weight=None, reduction='mean'): - """Apply element-wise weight and reduce loss. - - Args: - loss (Tensor): Element-wise loss. - weight (Tensor): Element-wise weights. Default: None. - reduction (str): Same as built-in losses of PyTorch. Options are - 'none', 'mean' and 'sum'. Default: 'mean'. - - Returns: - Tensor: Loss values. - """ - # if weight is specified, apply element-wise weight - if weight is not None: - assert weight.dim() == loss.dim() - assert weight.size(1) == 1 or weight.size(1) == loss.size(1) - loss = loss * weight - - # if weight is not specified or reduction is sum, just reduce the loss - if weight is None or reduction == 'sum': - loss = reduce_loss(loss, reduction) - # if reduction is mean, then compute mean over weight region - elif reduction == 'mean': - if weight.size(1) > 1: - weight = weight.sum() - else: - weight = weight.sum() * loss.size(1) - loss = loss.sum() / weight - - return loss - - -def weighted_loss(loss_func): - """Create a weighted version of a given loss function. - - To use this decorator, the loss function must have the signature like - `loss_func(pred, target, **kwargs)`. The function only needs to compute - element-wise loss without any reduction. This decorator will add weight - and reduction arguments to the function. The decorated function will have - the signature like `loss_func(pred, target, weight=None, reduction='mean', - **kwargs)`. - - :Example: - - >>> import torch - >>> @weighted_loss - >>> def l1_loss(pred, target): - >>> return (pred - target).abs() - - >>> pred = torch.Tensor([0, 2, 3]) - >>> target = torch.Tensor([1, 1, 1]) - >>> weight = torch.Tensor([1, 0, 1]) - - >>> l1_loss(pred, target) - tensor(1.3333) - >>> l1_loss(pred, target, weight) - tensor(1.5000) - >>> l1_loss(pred, target, reduction='none') - tensor([1., 1., 2.]) - >>> l1_loss(pred, target, weight, reduction='sum') - tensor(3.) - """ - - @functools.wraps(loss_func) - def wrapper(pred, target, weight=None, reduction='mean', **kwargs): - # get element-wise loss - loss = loss_func(pred, target, **kwargs) - loss = weight_reduce_loss(loss, weight, reduction) - return loss - - return wrapper diff --git a/spaces/Feraxin/chatGPT/baidu_translate/module.py b/spaces/Feraxin/chatGPT/baidu_translate/module.py deleted file mode 100644 index b9be1ed0230456ff6b53fe62fa6e550056f917d8..0000000000000000000000000000000000000000 --- a/spaces/Feraxin/chatGPT/baidu_translate/module.py +++ /dev/null @@ -1,106 +0,0 @@ -import argparse -import random, os -from hashlib import md5 -from typing import Optional - -import requests - -import paddlehub as hub -from paddlehub.module.module import moduleinfo -from paddlehub.module.module import runnable -from paddlehub.module.module import serving - - -def make_md5(s, encoding='utf-8'): - return md5(s.encode(encoding)).hexdigest() - - -@moduleinfo(name="baidu_translate", - version="1.0.0", - type="text/machine_translation", - summary="", - author="baidu-nlp", - author_email="paddle-dev@baidu.com") -class BaiduTranslate: - - def __init__(self, appid=None, appkey=None): - """ - :param appid: appid for requesting Baidu translation service. - :param appkey: appkey for requesting Baidu translation service. - """ - appid = os.environ.get('baidu_translate_appid') - appkey = os.environ.get('baidu_translate_appkey') - # Set your own appid/appkey. - if appid is None: - self.appid = '' - else: - self.appid = appid - if appkey is None: - self.appkey = '' - else: - self.appkey = appkey - self.url = 'http://api.fanyi.baidu.com/api/trans/vip/translate' - - def translate(self, query: str, from_lang: Optional[str] = "en", to_lang: Optional[int] = "zh"): - """ - Create image by text prompts using ErnieVilG model. - - :param query: Text to be translated. - :param from_lang: Source language. - :param to_lang: Dst language. - - Return translated string. - """ - # Generate salt and sign - salt = random.randint(32768, 65536) - sign = make_md5(self.appid + query + str(salt) + self.appkey) - - # Build request - headers = {'Content-Type': 'application/x-www-form-urlencoded'} - payload = {'appid': self.appid, 'q': query, 'from': from_lang, 'to': to_lang, 'salt': salt, 'sign': sign} - - # Send request - try: - r = requests.post(self.url, params=payload, headers=headers) - result = r.json() - except Exception as e: - error_msg = str(e) - raise RuntimeError(error_msg) - if 'error_code' in result: - raise RuntimeError(result['error_msg']) - return result['trans_result'][0]['dst'] - - @runnable - def run_cmd(self, argvs): - """ - Run as a command. - """ - self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name), - prog='hub run {}'.format(self.name), - usage='%(prog)s', - add_help=True) - self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required") - self.add_module_input_arg() - args = self.parser.parse_args(argvs) - if args.appid is not None and args.appkey is not None: - self.appid = args.appid - self.appkey = args.appkey - result = self.translate(args.query, args.from_lang, args.to_lang) - return result - - @serving - def serving_method(self, query, from_lang, to_lang): - """ - Run as a service. - """ - return self.translate(query, from_lang, to_lang) - - def add_module_input_arg(self): - """ - Add the command input options. - """ - self.arg_input_group.add_argument('--query', type=str) - self.arg_input_group.add_argument('--from_lang', type=str, default='en', help="源语言") - self.arg_input_group.add_argument('--to_lang', type=str, default='zh', help="目标语言") - self.arg_input_group.add_argument('--appid', type=str, default=None, help="注册得到的个人appid") - self.arg_input_group.add_argument('--appkey', type=str, default=None, help="注册得到的个人appkey") diff --git a/spaces/GXSA/bingo/src/components/tone-selector.tsx b/spaces/GXSA/bingo/src/components/tone-selector.tsx deleted file mode 100644 index 5c6e464c91f564b895acd121f0a4a79ed9c5c356..0000000000000000000000000000000000000000 --- a/spaces/GXSA/bingo/src/components/tone-selector.tsx +++ /dev/null @@ -1,43 +0,0 @@ -import React from 'react' -import { BingConversationStyle } from '@/lib/bots/bing/types' -import { cn } from '@/lib/utils' - -type ToneItem = { - type: BingConversationStyle, - name: string -} - -const ToneList: ToneItem[] = [ - { name: '有创造力', type: BingConversationStyle.Creative }, - { name: '更平衡', type: BingConversationStyle.Balanced }, - { name: '更精确', type: BingConversationStyle.Precise } -] - -interface ToneSelectorProps { - type: BingConversationStyle | '' - onChange?: (type: BingConversationStyle) => void -} - -export function ToneSelector({ type, onChange }: ToneSelectorProps) { - return ( -
    -
    - 选择对话样式 -
    -
    -
      - { - ToneList.map(tone => ( -
    • onChange?.(tone.type)}> - -
    • - )) - } -
    -
    -
    - ) -} diff --git a/spaces/GeorgeOrville/bingo/src/pages/api/kblob.ts b/spaces/GeorgeOrville/bingo/src/pages/api/kblob.ts deleted file mode 100644 index 0ce7e6063cdc06838e76f1cff1d5982d34ef52de..0000000000000000000000000000000000000000 --- a/spaces/GeorgeOrville/bingo/src/pages/api/kblob.ts +++ /dev/null @@ -1,56 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import FormData from 'form-data' -import { fetch } from '@/lib/isomorphic' -import { KBlobRequest } from '@/lib/bots/bing/types' - -const API_DOMAIN = 'https://bing.vcanbb.top' - -export const config = { - api: { - bodyParser: { - sizeLimit: '10mb' // Set desired value here - } - } -} - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const { knowledgeRequest, imageBase64 } = req.body as KBlobRequest - - const formData = new FormData() - formData.append('knowledgeRequest', JSON.stringify(knowledgeRequest)) - if (imageBase64) { - formData.append('imageBase64', imageBase64) - } - - const response = await fetch(`${API_DOMAIN}/images/kblob`, - { - method: 'POST', - body: formData.getBuffer(), - headers: { - "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"", - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": "\"Windows\"", - "Referer": `${API_DOMAIN}/web/index.html`, - "Referrer-Policy": "origin-when-cross-origin", - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - ...formData.getHeaders() - } - } - ).then(res => res.text()) - - res.writeHead(200, { - 'Content-Type': 'application/json', - }) - res.end(response || JSON.stringify({ result: { value: 'UploadFailed', message: '请更换 IP 或代理后重试' } })) - } catch (e) { - return res.json({ - result: { - value: 'UploadFailed', - message: `${e}` - } - }) - } -} diff --git a/spaces/Gradio-Blocks/ViTPose/model.py b/spaces/Gradio-Blocks/ViTPose/model.py deleted file mode 100644 index f4a2d2b0480e4ba3c036006b6b27104d67d6d57b..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/ViTPose/model.py +++ /dev/null @@ -1,221 +0,0 @@ -from __future__ import annotations - -import os -import pathlib -import shlex -import subprocess -import sys - -if os.getenv('SYSTEM') == 'spaces': - import mim - - mim.uninstall('mmcv-full', confirm_yes=True) - mim.install('mmcv-full==1.5.0', is_yes=True) - - subprocess.run(shlex.split('pip uninstall -y opencv-python')) - subprocess.run(shlex.split('pip uninstall -y opencv-python-headless')) - subprocess.run(shlex.split('pip install opencv-python-headless==4.8.0.74')) - -import huggingface_hub -import numpy as np -import torch -import torch.nn as nn - -app_dir = pathlib.Path(__file__).parent -submodule_dir = app_dir / 'ViTPose' -sys.path.insert(0, submodule_dir.as_posix()) - -from mmdet.apis import inference_detector, init_detector -from mmpose.apis import (inference_top_down_pose_model, init_pose_model, - process_mmdet_results, vis_pose_result) - - -class DetModel: - MODEL_DICT = { - 'YOLOX-tiny': { - 'config': - 'mmdet_configs/configs/yolox/yolox_tiny_8x8_300e_coco.py', - 'model': - 'https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth', - }, - 'YOLOX-s': { - 'config': - 'mmdet_configs/configs/yolox/yolox_s_8x8_300e_coco.py', - 'model': - 'https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth', - }, - 'YOLOX-l': { - 'config': - 'mmdet_configs/configs/yolox/yolox_l_8x8_300e_coco.py', - 'model': - 'https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth', - }, - 'YOLOX-x': { - 'config': - 'mmdet_configs/configs/yolox/yolox_x_8x8_300e_coco.py', - 'model': - 'https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth', - }, - } - - def __init__(self): - self.device = torch.device( - 'cuda:0' if torch.cuda.is_available() else 'cpu') - self._load_all_models_once() - self.model_name = 'YOLOX-l' - self.model = self._load_model(self.model_name) - - def _load_all_models_once(self) -> None: - for name in self.MODEL_DICT: - self._load_model(name) - - def _load_model(self, name: str) -> nn.Module: - d = self.MODEL_DICT[name] - return init_detector(d['config'], d['model'], device=self.device) - - def set_model(self, name: str) -> None: - if name == self.model_name: - return - self.model_name = name - self.model = self._load_model(name) - - def detect_and_visualize( - self, image: np.ndarray, - score_threshold: float) -> tuple[list[np.ndarray], np.ndarray]: - out = self.detect(image) - vis = self.visualize_detection_results(image, out, score_threshold) - return out, vis - - def detect(self, image: np.ndarray) -> list[np.ndarray]: - image = image[:, :, ::-1] # RGB -> BGR - out = inference_detector(self.model, image) - return out - - def visualize_detection_results( - self, - image: np.ndarray, - detection_results: list[np.ndarray], - score_threshold: float = 0.3) -> np.ndarray: - person_det = [detection_results[0]] + [np.array([]).reshape(0, 5)] * 79 - - image = image[:, :, ::-1] # RGB -> BGR - vis = self.model.show_result(image, - person_det, - score_thr=score_threshold, - bbox_color=None, - text_color=(200, 200, 200), - mask_color=None) - return vis[:, :, ::-1] # BGR -> RGB - - -class AppDetModel(DetModel): - def run(self, model_name: str, image: np.ndarray, - score_threshold: float) -> tuple[list[np.ndarray], np.ndarray]: - self.set_model(model_name) - return self.detect_and_visualize(image, score_threshold) - - -class PoseModel: - MODEL_DICT = { - 'ViTPose-B (single-task train)': { - 'config': - 'ViTPose/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/ViTPose_base_coco_256x192.py', - 'model': 'models/vitpose-b.pth', - }, - 'ViTPose-L (single-task train)': { - 'config': - 'ViTPose/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/ViTPose_large_coco_256x192.py', - 'model': 'models/vitpose-l.pth', - }, - 'ViTPose-B (multi-task train, COCO)': { - 'config': - 'ViTPose/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/ViTPose_base_coco_256x192.py', - 'model': 'models/vitpose-b-multi-coco.pth', - }, - 'ViTPose-L (multi-task train, COCO)': { - 'config': - 'ViTPose/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/ViTPose_large_coco_256x192.py', - 'model': 'models/vitpose-l-multi-coco.pth', - }, - } - - def __init__(self): - self.device = torch.device( - 'cuda:0' if torch.cuda.is_available() else 'cpu') - self.model_name = 'ViTPose-B (multi-task train, COCO)' - self.model = self._load_model(self.model_name) - - def _load_all_models_once(self) -> None: - for name in self.MODEL_DICT: - self._load_model(name) - - def _load_model(self, name: str) -> nn.Module: - d = self.MODEL_DICT[name] - ckpt_path = huggingface_hub.hf_hub_download('public-data/ViTPose', - d['model']) - model = init_pose_model(d['config'], ckpt_path, device=self.device) - return model - - def set_model(self, name: str) -> None: - if name == self.model_name: - return - self.model_name = name - self.model = self._load_model(name) - - def predict_pose_and_visualize( - self, - image: np.ndarray, - det_results: list[np.ndarray], - box_score_threshold: float, - kpt_score_threshold: float, - vis_dot_radius: int, - vis_line_thickness: int, - ) -> tuple[list[dict[str, np.ndarray]], np.ndarray]: - out = self.predict_pose(image, det_results, box_score_threshold) - vis = self.visualize_pose_results(image, out, kpt_score_threshold, - vis_dot_radius, vis_line_thickness) - return out, vis - - def predict_pose( - self, - image: np.ndarray, - det_results: list[np.ndarray], - box_score_threshold: float = 0.5) -> list[dict[str, np.ndarray]]: - image = image[:, :, ::-1] # RGB -> BGR - person_results = process_mmdet_results(det_results, 1) - out, _ = inference_top_down_pose_model(self.model, - image, - person_results=person_results, - bbox_thr=box_score_threshold, - format='xyxy') - return out - - def visualize_pose_results(self, - image: np.ndarray, - pose_results: list[np.ndarray], - kpt_score_threshold: float = 0.3, - vis_dot_radius: int = 4, - vis_line_thickness: int = 1) -> np.ndarray: - image = image[:, :, ::-1] # RGB -> BGR - vis = vis_pose_result(self.model, - image, - pose_results, - kpt_score_thr=kpt_score_threshold, - radius=vis_dot_radius, - thickness=vis_line_thickness) - return vis[:, :, ::-1] # BGR -> RGB - - -class AppPoseModel(PoseModel): - def run( - self, model_name: str, image: np.ndarray, - det_results: list[np.ndarray], box_score_threshold: float, - kpt_score_threshold: float, vis_dot_radius: int, - vis_line_thickness: int - ) -> tuple[list[dict[str, np.ndarray]], np.ndarray]: - self.set_model(model_name) - return self.predict_pose_and_visualize(image, det_results, - box_score_threshold, - kpt_score_threshold, - vis_dot_radius, - vis_line_thickness) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/exp/mask_rcnn_1x_hybrid_small/run.sh b/spaces/Gradio-Blocks/uniformer_image_detection/exp/mask_rcnn_1x_hybrid_small/run.sh deleted file mode 100644 index fbe76fb398212d2eb93f98007ea28d31cbb65ebe..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/exp/mask_rcnn_1x_hybrid_small/run.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash - -work_path=$(dirname $0) -PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \ -python -m torch.distributed.launch --nproc_per_node=8 \ - tools/train.py ${work_path}/config.py \ - --launcher pytorch \ - --cfg-options model.backbone.pretrained_path='your_model_path/uniformer_small_in1k.pth' \ - --work-dir ${work_path}/ckpt \ - 2>&1 | tee -a ${work_path}/log.txt diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/danet/danet_r50-d8_512x512_20k_voc12aug.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/danet/danet_r50-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 010f86f1aac1b5c827dec29f692d137dc1c399bf..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/danet/danet_r50-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/danet_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_20k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/dnlnet/dnl_r50-d8_512x512_80k_ade20k.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/dnlnet/dnl_r50-d8_512x512_80k_ade20k.py deleted file mode 100644 index 09604c39729abfc9015eb971069b987c8d8a82cb..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/dnlnet/dnl_r50-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/dnl_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/resnest/deeplabv3_s101-d8_512x512_160k_ade20k.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/resnest/deeplabv3_s101-d8_512x512_160k_ade20k.py deleted file mode 100644 index e3924ad679cb3d7ba731322f9cdb67410baae59a..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/resnest/deeplabv3_s101-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = '../deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k.py' -model = dict( - pretrained='open-mmlab://resnest101', - backbone=dict( - type='ResNeSt', - stem_channels=128, - radix=2, - reduction_factor=4, - avg_down_stride=True)) diff --git a/spaces/HUBioDataLab/DrugGEN/models.py b/spaces/HUBioDataLab/DrugGEN/models.py deleted file mode 100644 index 9927302aee0f987095ad035513e55f34c27fe1d5..0000000000000000000000000000000000000000 --- a/spaces/HUBioDataLab/DrugGEN/models.py +++ /dev/null @@ -1,210 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from layers import TransformerEncoder, TransformerDecoder - -class Generator(nn.Module): - """Generator network.""" - def __init__(self, z_dim, act, vertexes, edges, nodes, dropout, dim, depth, heads, mlp_ratio, submodel): - super(Generator, self).__init__() - - self.submodel = submodel - self.vertexes = vertexes - self.edges = edges - self.nodes = nodes - self.depth = depth - self.dim = dim - self.heads = heads - self.mlp_ratio = mlp_ratio - - self.dropout = dropout - self.z_dim = z_dim - - if act == "relu": - act = nn.ReLU() - elif act == "leaky": - act = nn.LeakyReLU() - elif act == "sigmoid": - act = nn.Sigmoid() - elif act == "tanh": - act = nn.Tanh() - self.features = vertexes * vertexes * edges + vertexes * nodes - self.transformer_dim = vertexes * vertexes * dim + vertexes * dim - self.pos_enc_dim = 5 - #self.pos_enc = nn.Linear(self.pos_enc_dim, self.dim) - - self.node_layers = nn.Sequential(nn.Linear(nodes, 64), act, nn.Linear(64,dim), act, nn.Dropout(self.dropout)) - self.edge_layers = nn.Sequential(nn.Linear(edges, 64), act, nn.Linear(64,dim), act, nn.Dropout(self.dropout)) - - self.TransformerEncoder = TransformerEncoder(dim=self.dim, depth=self.depth, heads=self.heads, act = act, - mlp_ratio=self.mlp_ratio, drop_rate=self.dropout) - - self.readout_e = nn.Linear(self.dim, edges) - self.readout_n = nn.Linear(self.dim, nodes) - self.softmax = nn.Softmax(dim = -1) - - def _generate_square_subsequent_mask(self, sz): - mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1) - mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) - return mask - - def laplacian_positional_enc(self, adj): - - A = adj - D = torch.diag(torch.count_nonzero(A, dim=-1)) - L = torch.eye(A.shape[0], device=A.device) - D * A * D - - EigVal, EigVec = torch.linalg.eig(L) - - idx = torch.argsort(torch.real(EigVal)) - EigVal, EigVec = EigVal[idx], torch.real(EigVec[:,idx]) - pos_enc = EigVec[:,1:self.pos_enc_dim + 1] - - return pos_enc - - def forward(self, z_e, z_n): - b, n, c = z_n.shape - _, _, _ , d = z_e.shape - #random_mask_e = torch.randint(low=0,high=2,size=(b,n,n,d)).to(z_e.device).float() - #random_mask_n = torch.randint(low=0,high=2,size=(b,n,c)).to(z_n.device).float() - #z_e = F.relu(z_e - random_mask_e) - #z_n = F.relu(z_n - random_mask_n) - - #mask = self._generate_square_subsequent_mask(self.vertexes).to(z_e.device) - - node = self.node_layers(z_n) - - edge = self.edge_layers(z_e) - - edge = (edge + edge.permute(0,2,1,3))/2 - - #lap = [self.laplacian_positional_enc(torch.max(x,-1)[1]) for x in edge] - - #lap = torch.stack(lap).to(node.device) - - #pos_enc = self.pos_enc(lap) - - #node = node + pos_enc - - node, edge = self.TransformerEncoder(node,edge) - - node_sample = self.softmax(self.readout_n(node)) - - edge_sample = self.softmax(self.readout_e(edge)) - - return node, edge, node_sample, edge_sample - - - -class Generator2(nn.Module): - def __init__(self, dim, dec_dim, depth, heads, mlp_ratio, drop_rate, drugs_m_dim, drugs_b_dim, submodel): - super().__init__() - self.submodel = submodel - self.depth = depth - self.dim = dim - self.mlp_ratio = mlp_ratio - self.heads = heads - self.dropout_rate = drop_rate - self.drugs_m_dim = drugs_m_dim - self.drugs_b_dim = drugs_b_dim - - self.pos_enc_dim = 5 - - - if self.submodel == "Prot": - self.prot_n = torch.nn.Linear(3822, 45) ## exact dimension of protein features - self.prot_e = torch.nn.Linear(298116, 2025) ## exact dimension of protein features - - self.protn_dim = torch.nn.Linear(1, dec_dim) - self.prote_dim = torch.nn.Linear(1, dec_dim) - - - self.mol_nodes = nn.Linear(dim, dec_dim) - self.mol_edges = nn.Linear(dim, dec_dim) - - self.drug_nodes = nn.Linear(self.drugs_m_dim, dec_dim) - self.drug_edges = nn.Linear(self.drugs_b_dim, dec_dim) - - self.TransformerDecoder = TransformerDecoder(dec_dim, depth, heads, mlp_ratio, drop_rate=self.dropout_rate) - - self.nodes_output_layer = nn.Linear(dec_dim, self.drugs_m_dim) - self.edges_output_layer = nn.Linear(dec_dim, self.drugs_b_dim) - self.softmax = nn.Softmax(dim=-1) - - def laplacian_positional_enc(self, adj): - - A = adj - D = torch.diag(torch.count_nonzero(A, dim=-1)) - L = torch.eye(A.shape[0], device=A.device) - D * A * D - - EigVal, EigVec = torch.linalg.eig(L) - - idx = torch.argsort(torch.real(EigVal)) - EigVal, EigVec = EigVal[idx], torch.real(EigVec[:,idx]) - pos_enc = EigVec[:,1:self.pos_enc_dim + 1] - - return pos_enc - - def _generate_square_subsequent_mask(self, sz): - mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1) - mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) - return mask - - def forward(self, edges_logits, nodes_logits ,akt1_adj,akt1_annot): - - edges_logits = self.mol_edges(edges_logits) - nodes_logits = self.mol_nodes(nodes_logits) - - if self.submodel != "Prot": - akt1_annot = self.drug_nodes(akt1_annot) - akt1_adj = self.drug_edges(akt1_adj) - - else: - akt1_adj = self.prote_dim(self.prot_e(akt1_adj).view(1,45,45,1)) - akt1_annot = self.protn_dim(self.prot_n(akt1_annot).view(1,45,1)) - - - #lap = [self.laplacian_positional_enc(torch.max(x,-1)[1]) for x in drug_e] - #lap = torch.stack(lap).to(drug_e.device) - #pos_enc = self.pos_enc(lap) - #drug_n = drug_n + pos_enc - - if self.submodel == "Ligand" or self.submodel == "RL" : - nodes_logits,akt1_annot, edges_logits, akt1_adj = self.TransformerDecoder(akt1_annot,nodes_logits,akt1_adj,edges_logits) - - else: - nodes_logits,akt1_annot, edges_logits, akt1_adj = self.TransformerDecoder(nodes_logits,akt1_annot,edges_logits,akt1_adj) - - edges_logits = self.edges_output_layer(edges_logits) - nodes_logits = self.nodes_output_layer(nodes_logits) - - edges_logits = self.softmax(edges_logits) - nodes_logits = self.softmax(nodes_logits) - - return edges_logits, nodes_logits - - -class simple_disc(nn.Module): - def __init__(self, act, m_dim, vertexes, b_dim): - super().__init__() - if act == "relu": - act = nn.ReLU() - elif act == "leaky": - act = nn.LeakyReLU() - elif act == "sigmoid": - act = nn.Sigmoid() - elif act == "tanh": - act = nn.Tanh() - features = vertexes * m_dim + vertexes * vertexes * b_dim - - self.predictor = nn.Sequential(nn.Linear(features,256), act, nn.Linear(256,128), act, nn.Linear(128,64), act, - nn.Linear(64,32), act, nn.Linear(32,16), act, - nn.Linear(16,1)) - - def forward(self, x): - - prediction = self.predictor(x) - - #prediction = F.softmax(prediction,dim=-1) - - return prediction \ No newline at end of file diff --git a/spaces/Hallucinate/demo/k_diffusion/layers.py b/spaces/Hallucinate/demo/k_diffusion/layers.py deleted file mode 100644 index aa647bd3c1e0bef91e475f2376b4a79f6bb0823d..0000000000000000000000000000000000000000 --- a/spaces/Hallucinate/demo/k_diffusion/layers.py +++ /dev/null @@ -1,256 +0,0 @@ -import math - -from einops import rearrange, repeat -import torch -from torch import nn -from torch.nn import functional as F - -from . import sampling, utils - -# Karras et al. preconditioned denoiser - -class Denoiser(nn.Module): - """A Karras et al. preconditioner for denoising diffusion models.""" - - def __init__(self, inner_model, sigma_data=1.): - super().__init__() - self.inner_model = inner_model - self.sigma_data = sigma_data - - def get_scalings(self, sigma): - c_skip = self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - c_out = sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 - c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 - return c_skip, c_out, c_in - - def loss(self, input, noise, sigma, **kwargs): - c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)] - noised_input = input + noise * utils.append_dims(sigma, input.ndim) - model_output = self.inner_model(noised_input * c_in, sigma, **kwargs) - target = (input - c_skip * noised_input) / c_out - return (model_output - target).pow(2).flatten(1).mean(1) - - def forward(self, input, sigma, **kwargs): - c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)] - return self.inner_model(input * c_in, sigma, **kwargs) * c_out + input * c_skip - - -class DenoiserWithVariance(Denoiser): - def loss(self, input, noise, sigma, **kwargs): - c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)] - noised_input = input + noise * utils.append_dims(sigma, input.ndim) - model_output, logvar = self.inner_model(noised_input * c_in, sigma, return_variance=True, **kwargs) - logvar = utils.append_dims(logvar, model_output.ndim) - target = (input - c_skip * noised_input) / c_out - losses = ((model_output - target) ** 2 / logvar.exp() + logvar) / 2 - return losses.flatten(1).mean(1) - - -class SimpleLossDenoiser(Denoiser): - """L_simple with the Karras et al. preconditioner.""" - - def loss(self, input, noise, sigma, **kwargs): - noised_input = input + noise * utils.append_dims(sigma, input.ndim) - denoised = self(noised_input, sigma, **kwargs) - eps = sampling.to_d(noised_input, sigma, denoised) - return (eps - noise).pow(2).flatten(1).mean(1) - - -# Residual blocks - -class ResidualBlock(nn.Module): - def __init__(self, *main, skip=None): - super().__init__() - self.main = nn.Sequential(*main) - self.skip = skip if skip else nn.Identity() - - def forward(self, input): - return self.main(input) + self.skip(input) - - -# Noise level (and other) conditioning - -class ConditionedModule(nn.Module): - pass - - -class UnconditionedModule(ConditionedModule): - def __init__(self, module): - super().__init__() - self.module = module - - def forward(self, input, cond=None): - return self.module(input) - - -class ConditionedSequential(nn.Sequential, ConditionedModule): - def forward(self, input, cond): - for module in self: - if isinstance(module, ConditionedModule): - input = module(input, cond) - else: - input = module(input) - return input - - -class ConditionedResidualBlock(ConditionedModule): - def __init__(self, *main, skip=None): - super().__init__() - self.main = ConditionedSequential(*main) - self.skip = skip if skip else nn.Identity() - - def forward(self, input, cond): - skip = self.skip(input, cond) if isinstance(self.skip, ConditionedModule) else self.skip(input) - return self.main(input, cond) + skip - - -class AdaGN(ConditionedModule): - def __init__(self, feats_in, c_out, num_groups, eps=1e-5, cond_key='cond'): - super().__init__() - self.num_groups = num_groups - self.eps = eps - self.cond_key = cond_key - self.mapper = nn.Linear(feats_in, c_out * 2) - - def forward(self, input, cond): - weight, bias = self.mapper(cond[self.cond_key]).chunk(2, dim=-1) - input = F.group_norm(input, self.num_groups, eps=self.eps) - return torch.addcmul(utils.append_dims(bias, input.ndim), input, utils.append_dims(weight, input.ndim) + 1) - - -# Attention - -class SelfAttention2d(ConditionedModule): - def __init__(self, c_in, n_head, norm, dropout_rate=0.): - super().__init__() - assert c_in % n_head == 0 - self.norm_in = norm(c_in) - self.n_head = n_head - self.qkv_proj = nn.Conv2d(c_in, c_in * 3, 1) - self.out_proj = nn.Conv2d(c_in, c_in, 1) - self.dropout = nn.Dropout(dropout_rate) - - def forward(self, input, cond): - n, c, h, w = input.shape - qkv = self.qkv_proj(self.norm_in(input, cond)) - qkv = qkv.view([n, self.n_head * 3, c // self.n_head, h * w]).transpose(2, 3) - q, k, v = qkv.chunk(3, dim=1) - scale = k.shape[3] ** -0.25 - att = ((q * scale) @ (k.transpose(2, 3) * scale)).softmax(3) - att = self.dropout(att) - y = (att @ v).transpose(2, 3).contiguous().view([n, c, h, w]) - return input + self.out_proj(y) - - -class CrossAttention2d(ConditionedModule): - def __init__(self, c_dec, c_enc, n_head, norm_dec, dropout_rate=0., - cond_key='cross', cond_key_padding='cross_padding'): - super().__init__() - assert c_dec % n_head == 0 - self.cond_key = cond_key - self.cond_key_padding = cond_key_padding - self.norm_enc = nn.LayerNorm(c_enc) - self.norm_dec = norm_dec(c_dec) - self.n_head = n_head - self.q_proj = nn.Conv2d(c_dec, c_dec, 1) - self.kv_proj = nn.Linear(c_enc, c_dec * 2) - self.out_proj = nn.Conv2d(c_dec, c_dec, 1) - self.dropout = nn.Dropout(dropout_rate) - - def forward(self, input, cond): - n, c, h, w = input.shape - q = self.q_proj(self.norm_dec(input, cond)) - q = q.view([n, self.n_head, c // self.n_head, h * w]).transpose(2, 3) - kv = self.kv_proj(self.norm_enc(cond[self.cond_key])) - kv = kv.view([n, -1, self.n_head * 2, c // self.n_head]).transpose(1, 2) - k, v = kv.chunk(2, dim=1) - scale = k.shape[3] ** -0.25 - att = ((q * scale) @ (k.transpose(2, 3) * scale)) - att = att - (cond[self.cond_key_padding][:, None, None, :]) * 10000 - att = att.softmax(3) - att = self.dropout(att) - y = (att @ v).transpose(2, 3) - y = y.contiguous().view([n, c, h, w]) - return input + self.out_proj(y) - - -# Downsampling/upsampling - -_kernels = { - 'linear': - [1 / 8, 3 / 8, 3 / 8, 1 / 8], - 'cubic': - [-0.01171875, -0.03515625, 0.11328125, 0.43359375, - 0.43359375, 0.11328125, -0.03515625, -0.01171875], - 'lanczos3': - [0.003689131001010537, 0.015056144446134567, -0.03399861603975296, - -0.066637322306633, 0.13550527393817902, 0.44638532400131226, - 0.44638532400131226, 0.13550527393817902, -0.066637322306633, - -0.03399861603975296, 0.015056144446134567, 0.003689131001010537] -} -_kernels['bilinear'] = _kernels['linear'] -_kernels['bicubic'] = _kernels['cubic'] - - -class Downsample2d(nn.Module): - def __init__(self, kernel='linear', pad_mode='reflect'): - super().__init__() - self.pad_mode = pad_mode - kernel_1d = torch.tensor([_kernels[kernel]]) - self.pad = kernel_1d.shape[1] // 2 - 1 - self.register_buffer('kernel', kernel_1d.T @ kernel_1d) - - def forward(self, x): - x = F.pad(x, (self.pad,) * 4, self.pad_mode) - weight = x.new_zeros([x.shape[1], x.shape[1], self.kernel.shape[0], self.kernel.shape[1]]) - indices = torch.arange(x.shape[1], device=x.device) - weight[indices, indices] = self.kernel.to(weight) - return F.conv2d(x, weight, stride=2) - - -class Upsample2d(nn.Module): - def __init__(self, kernel='linear', pad_mode='reflect'): - super().__init__() - self.pad_mode = pad_mode - kernel_1d = torch.tensor([_kernels[kernel]]) * 2 - self.pad = kernel_1d.shape[1] // 2 - 1 - self.register_buffer('kernel', kernel_1d.T @ kernel_1d) - - def forward(self, x): - x = F.pad(x, ((self.pad + 1) // 2,) * 4, self.pad_mode) - weight = x.new_zeros([x.shape[1], x.shape[1], self.kernel.shape[0], self.kernel.shape[1]]) - indices = torch.arange(x.shape[1], device=x.device) - weight[indices, indices] = self.kernel.to(weight) - return F.conv_transpose2d(x, weight, stride=2, padding=self.pad * 2 + 1) - - -# Embeddings - -class FourierFeatures(nn.Module): - def __init__(self, in_features, out_features, std=1.): - super().__init__() - assert out_features % 2 == 0 - self.register_buffer('weight', torch.randn([out_features // 2, in_features]) * std) - - def forward(self, input): - f = 2 * math.pi * input @ self.weight.T - return torch.cat([f.cos(), f.sin()], dim=-1) - - -# U-Nets - -class UNet(ConditionedModule): - def __init__(self, d_blocks, u_blocks, skip_stages=0): - super().__init__() - self.d_blocks = nn.ModuleList(d_blocks) - self.u_blocks = nn.ModuleList(u_blocks) - self.skip_stages = skip_stages - - def forward(self, input, cond): - skips = [] - for block in self.d_blocks[self.skip_stages:]: - input = block(input, cond) - skips.append(input) - for i, (block, skip) in enumerate(zip(self.u_blocks, reversed(skips))): - input = block(input, cond, skip if i > 0 else None) - return input diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/speech2unit/__init__.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/speech2unit/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/fairseq_incremental_decoder.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/fairseq_incremental_decoder.py deleted file mode 100644 index cc72a0f8f3da238a8ce846240e5008d91ce1bc1a..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/fairseq_incremental_decoder.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from typing import Dict, Optional - -from fairseq.incremental_decoding_utils import with_incremental_state -from fairseq.models import FairseqDecoder -from torch import Tensor - - -logger = logging.getLogger(__name__) - - -@with_incremental_state -class FairseqIncrementalDecoder(FairseqDecoder): - """Base class for incremental decoders. - - Incremental decoding is a special mode at inference time where the Model - only receives a single timestep of input corresponding to the previous - output token (for teacher forcing) and must produce the next output - *incrementally*. Thus the model must cache any long-term state that is - needed about the sequence, e.g., hidden states, convolutional states, etc. - - Compared to the standard :class:`FairseqDecoder` interface, the incremental - decoder interface allows :func:`forward` functions to take an extra keyword - argument (*incremental_state*) that can be used to cache state across - time-steps. - - The :class:`FairseqIncrementalDecoder` interface also defines the - :func:`reorder_incremental_state` method, which is used during beam search - to select and reorder the incremental state based on the selection of beams. - - To learn more about how incremental decoding works, refer to `this blog - `_. - """ - - def __init__(self, dictionary): - super().__init__(dictionary) - - def forward( - self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs - ): - """ - Args: - prev_output_tokens (LongTensor): shifted output tokens of shape - `(batch, tgt_len)`, for teacher forcing - encoder_out (dict, optional): output from the encoder, used for - encoder-side attention - incremental_state (dict, optional): dictionary used for storing - state during :ref:`Incremental decoding` - - Returns: - tuple: - - the decoder's output of shape `(batch, tgt_len, vocab)` - - a dictionary with any model-specific outputs - """ - raise NotImplementedError - - def extract_features( - self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs - ): - """ - Returns: - tuple: - - the decoder's features of shape `(batch, tgt_len, embed_dim)` - - a dictionary with any model-specific outputs - """ - raise NotImplementedError - - def reorder_incremental_state( - self, - incremental_state: Dict[str, Dict[str, Optional[Tensor]]], - new_order: Tensor, - ): - """Reorder incremental state. - - This will be called when the order of the input has changed from the - previous time step. A typical use case is beam search, where the input - order changes between time steps based on the selection of beams. - """ - pass - - def reorder_incremental_state_scripting( - self, - incremental_state: Dict[str, Dict[str, Optional[Tensor]]], - new_order: Tensor, - ): - """Main entry point for reordering the incremental state. - - Due to limitations in TorchScript, we call this function in - :class:`fairseq.sequence_generator.SequenceGenerator` instead of - calling :func:`reorder_incremental_state` directly. - """ - for module in self.modules(): - if hasattr(module, "reorder_incremental_state"): - result = module.reorder_incremental_state(incremental_state, new_order) - if result is not None: - incremental_state = result - - def set_beam_size(self, beam_size): - """Sets the beam size in the decoder and all children.""" - if getattr(self, "_beam_size", -1) != beam_size: - seen = set() - - def apply_set_beam_size(module): - if ( - module != self - and hasattr(module, "set_beam_size") - and module not in seen - ): - seen.add(module) - module.set_beam_size(beam_size) - - self.apply(apply_set_beam_size) - self._beam_size = beam_size diff --git a/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/glow_tts/text/symbols.py b/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/glow_tts/text/symbols.py deleted file mode 100644 index 3460be23cdf863cea1df9a57255c759175d37595..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/glow_tts/text/symbols.py +++ /dev/null @@ -1,23 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -""" -Defines the set of symbols used in text input to the model. - -The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. """ -import utils -import os - -hps = utils.get_hparams() - -# with open(os.path.abspath(hps.data.chars_file), encoding='utf-8') as file: -# chars = file.read() - -# with open(os.path.abspath(hps.data.punc_file), encoding='utf-8') as file: -# punc = file.read() - -_punctuation = hps.data.punc -_letters = hps.data.chars - -# export all characters as list - -symbols = list(_punctuation) + list(_letters) diff --git a/spaces/Hasani/Specific_Object_Recognition_in_the_Wild/app.py b/spaces/Hasani/Specific_Object_Recognition_in_the_Wild/app.py deleted file mode 100644 index 9ff15ae6048b4a41f3c64923d49d04a376962da3..0000000000000000000000000000000000000000 --- a/spaces/Hasani/Specific_Object_Recognition_in_the_Wild/app.py +++ /dev/null @@ -1,609 +0,0 @@ -import cv2 -import requests - -from PIL import Image -import PIL -from PIL import ImageDraw - -from matplotlib import pyplot as plt -import matplotlib -from matplotlib import rcParams - -import os -import tempfile -from io import BytesIO -from pathlib import Path -import argparse -import random -import numpy as np -import torch -import matplotlib.cm as cm -import pandas as pd - - -from transformers import OwlViTProcessor, OwlViTForObjectDetection -from transformers.image_utils import ImageFeatureExtractionMixin - - -from SuperGluePretrainedNetwork.models.matching import Matching -from SuperGluePretrainedNetwork.models.utils import (compute_pose_error, compute_epipolar_error, - estimate_pose, - error_colormap, AverageTimer, pose_auc, read_image, - rotate_intrinsics, rotate_pose_inplane, - scale_intrinsics) - -torch.set_grad_enabled(False) - - - - -mixin = ImageFeatureExtractionMixin() -model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch32") -processor = OwlViTProcessor.from_pretrained("google/owlvit-base-patch32") - - -# Use GPU if available -if torch.cuda.is_available(): - device = torch.device("cuda") -else: - device = torch.device("cpu") - - -import requests -from PIL import Image, ImageDraw -from io import BytesIO -import matplotlib.pyplot as plt -import numpy as np -import torch -import cv2 -import tempfile - -def detect_and_crop2(target_image_path, - query_image_path, - model, - processor, - mixin, - device, - threshold=0.5, - nms_threshold=0.3, - visualize=True): - - # Open target image - image = Image.open(target_image_path).convert('RGB') - image_size = model.config.vision_config.image_size + 5 - image = mixin.resize(image, image_size) - target_sizes = torch.Tensor([image.size[::-1]]) - - # Open query image - query_image = Image.open(query_image_path).convert('RGB') - image_size = model.config.vision_config.image_size + 5 - query_image = mixin.resize(query_image, image_size) - - # Process input and query image - inputs = processor(images=image, query_images=query_image, return_tensors="pt").to(device) - - # Get predictions - with torch.no_grad(): - outputs = model.image_guided_detection(**inputs) - - # Convert predictions to CPU - img = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB) - outputs.logits = outputs.logits.cpu() - outputs.target_pred_boxes = outputs.target_pred_boxes.cpu() - - # Post process the predictions - results = processor.post_process_image_guided_detection(outputs=outputs, threshold=threshold, nms_threshold=nms_threshold, target_sizes=target_sizes) - boxes, scores = results[0]["boxes"], results[0]["scores"] - - # If no boxes, return an empty list - if len(boxes) == 0 and visualize: - print(f"No boxes detected for image: {target_image_path}") - fig, ax = plt.subplots(figsize=(6, 6)) - ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) - ax.set_title("Original Image") - ax.axis("off") - plt.show() - return [] - - # Filter boxes - img_with_all_boxes = img.copy() - filtered_boxes = [] - filtered_scores = [] - img_width, img_height = img.shape[1], img.shape[0] - for box, score in zip(boxes, scores): - x1, y1, x2, y2 = [int(i) for i in box.tolist()] - if x1 < 0 or y1 < 0 or x2 < 0 or y2 < 0: - continue - if (x2 - x1) / img_width >= 0.94 and (y2 - y1) / img_height >= 0.94: - continue - filtered_boxes.append([x1, y1, x2, y2]) - filtered_scores.append(score) - - # Draw boxes on original image - draw = ImageDraw.Draw(image) - for box in filtered_boxes: - draw.rectangle(box, outline="red",width=3) - - cropped_images = [] - for box in filtered_boxes: - x1, y1, x2, y2 = box - cropped_img = img[y1:y2, x1:x2] - if cropped_img.size != 0: - cropped_images.append(cropped_img) - - if visualize: - # Visualization - if not filtered_boxes: - fig, ax = plt.subplots(figsize=(6, 6)) - ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) - ax.set_title("Original Image") - ax.axis("off") - plt.show() - else: - fig, axs = plt.subplots(1, len(cropped_images) + 2, figsize=(15, 5)) - axs[0].imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) - axs[0].set_title("Original Image") - axs[0].axis("off") - - for i, (box, score) in enumerate(zip(filtered_boxes, filtered_scores)): - x1, y1, x2, y2 = box - cropped_img = img[y1:y2, x1:x2] - font = cv2.FONT_HERSHEY_SIMPLEX - text = f"{score:.2f}" - cv2.putText(cropped_img, text, (5, cropped_img.shape[0]-10), font, 0.5, (255,0,0), 1, cv2.LINE_AA) - axs[i+2].imshow(cv2.cvtColor(cropped_img, cv2.COLOR_BGR2RGB)) - axs[i+2].set_title("Score: " + text) - axs[i+2].axis("off") - plt.tight_layout() - plt.show() - - return cropped_images, image # return original image with boxes drawn - -def save_array_to_temp_image(arr): - # Convert the array to an image - img = Image.fromarray(arr) - - # Create a temporary file for the image - temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png', dir=tempfile.gettempdir()) - temp_file_name = temp_file.name - temp_file.close() # We close it because we're not writing to it directly, PIL will handle the writing - - # Save the image to the temp file - img.save(temp_file_name) - - return temp_file_name - -''' -def process_resize(w: int, h: int, resize_dims: list) -> tuple: - if len(resize_dims) == 1 and resize_dims[0] > -1: - scale = resize_dims[0] / max(h, w) - w_new, h_new = int(round(w * scale)), int(round(h * scale)) - return w_new, h_new - return w, h -''' - -def plot_image_pair(imgs, dpi=100, size=6, pad=.5): - n = len(imgs) - assert n == 2, 'number of images must be two' - figsize = (size*n, size*3/4) if size is not None else None - _, ax = plt.subplots(1, n, figsize=figsize, dpi=dpi) - for i in range(n): - ax[i].imshow(imgs[i], cmap=plt.get_cmap('gray'), vmin=0, vmax=255) - ax[i].get_yaxis().set_ticks([]) - ax[i].get_xaxis().set_ticks([]) - for spine in ax[i].spines.values(): # remove frame - spine.set_visible(False) - plt.tight_layout(pad=pad) - -def plot_keypoints(kpts0, kpts1, color='w', ps=2): - ax = plt.gcf().axes - ax[0].scatter(kpts0[:, 0], kpts0[:, 1], c=color, s=ps) - ax[1].scatter(kpts1[:, 0], kpts1[:, 1], c=color, s=ps) - -def plot_matches(kpts0, kpts1, color, lw=1.5, ps=4): - fig = plt.gcf() - ax = fig.axes - fig.canvas.draw() - - transFigure = fig.transFigure.inverted() - fkpts0 = transFigure.transform(ax[0].transData.transform(kpts0)) - fkpts1 = transFigure.transform(ax[1].transData.transform(kpts1)) - - fig.lines = [matplotlib.lines.Line2D( - (fkpts0[i, 0], fkpts1[i, 0]), (fkpts0[i, 1], fkpts1[i, 1]), zorder=1, - transform=fig.transFigure, c=color[i], linewidth=lw) - for i in range(len(kpts0))] - ax[0].scatter(kpts0[:, 0], kpts0[:, 1], c=color, s=ps) - ax[1].scatter(kpts1[:, 0], kpts1[:, 1], c=color, s=ps) - -def unified_matching_plot2(image0, image1, kpts0, kpts1, mkpts0, mkpts1, - color, text, path=None, show_keypoints=False, - fast_viz=False, opencv_display=False, - opencv_title='matches', small_text=[]): - - # Set the background color for the plot - plt.figure(facecolor='#eeeeee') - plot_image_pair([image0, image1]) - - # Elegant points and lines for matches - if show_keypoints: - plot_keypoints(kpts0, kpts1, color='k', ps=4) - plot_keypoints(kpts0, kpts1, color='w', ps=2) - plot_matches(mkpts0, mkpts1, color, lw=1) - - fig = plt.gcf() - - # Add text - fig.text( - 0.01, 0.01, '\n'.join(small_text), transform=fig.axes[0].transAxes, - fontsize=10, va='bottom', ha='left', color='#333333', fontweight='bold', - bbox=dict(facecolor='white', alpha=0.7, edgecolor='none', boxstyle="round,pad=0.3")) - - fig.text( - 0.01, 0.99, '\n'.join(text), transform=fig.axes[0].transAxes, - fontsize=15, va='top', ha='left', color='#333333', fontweight='bold', - bbox=dict(facecolor='white', alpha=0.7, edgecolor='none', boxstyle="round,pad=0.3")) - - # Optional: remove axis for a cleaner look - plt.axis('off') - - # Convert the figure to an OpenCV image - buf = BytesIO() - plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0) - buf.seek(0) - img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8) - buf.close() - img = cv2.imdecode(img_arr, 1) - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - - # Close the figure to free memory - plt.close(fig) - - return img - -def create_image_pyramid2(image_path, longest_side, scales=[0.25, 0.5, 1.0]): - original_image = cv2.imread(image_path) - oh, ow, _ = original_image.shape - - # Determine the scaling factor based on the longest side - if oh > ow: - output_height = longest_side - output_width = int((ow / oh) * longest_side) - else: - output_width = longest_side - output_height = int((oh / ow) * longest_side) - output_size = (output_width, output_height) - - pyramid = [] - - for scale in scales: - # Resize based on the scale factor - resized = cv2.resize(original_image, None, fx=scale, fy=scale) - rh, rw, _ = resized.shape - - if scale < 1.0: # downsampling - # Calculate the amount of padding required - dy_top = max((output_size[1] - rh) // 2, 0) - dy_bottom = output_size[1] - rh - dy_top - dx_left = max((output_size[0] - rw) // 2, 0) - dx_right = output_size[0] - rw - dx_left - - # Create padded image - padded = cv2.copyMakeBorder(resized, dy_top, dy_bottom, dx_left, dx_right, cv2.BORDER_CONSTANT, value=[255, 255, 255]) - pyramid.append(padded) - elif scale > 1.0: # upsampling - # We need to crop the image to fit the desired output size - dy = (rh - output_size[1]) // 2 - dx = (rw - output_size[0]) // 2 - cropped = resized[dy:dy+output_size[1], dx:dx+output_size[0]] - pyramid.append(cropped) - else: # scale == 1.0 - pyramid.append(resized) - - return pyramid - -# Example usage -# pyramid = create_image_pyramid('path_to_image.jpg', 800) -def image_matching(query_img, target_img, image_dims=[640*2], scale_factors=[0.33,0.66,1], visualize=True, k_thresh=None, m_thresh=None, write=False): - - image1, inp1, scales1 = read_image(target_img, device, [640*2], 0, True) - query_pyramid = create_image_pyramid2(query_img, image_dims[0], scale_factors) - - all_valid = [] - all_inliers = [] - all_return_imgs = [] - max_matches_img = None - max_matches = -1 - - for idx, query_level in enumerate(query_pyramid): - temp_file_path = "temp_level_{}.png".format(idx) - cv2.imwrite(temp_file_path, query_level) - - image0, inp0, scales0 = read_image(temp_file_path, device, [640*2], 0, True) - - if image0 is None or image1 is None: - print('Problem reading image pair: {} {}'.format(query_img, target_img)) - else: - # Matching - pred = matching({'image0': inp0, 'image1': inp1}) - pred = {k: v[0] for k, v in pred.items()} - kpts0, kpts1 = pred['keypoints0'], pred['keypoints1'] - matches, conf = pred['matches0'], pred['matching_scores0'] - - valid = matches > -1 - mkpts0 = kpts0[valid] - mkpts1 = kpts1[matches[valid]] - mconf = conf[valid] - #color = cm.jet(mconf)[:len(mkpts0)] # Ensure consistent size - color = cm.jet(mconf.detach().numpy())[:len(mkpts0)] - - all_valid.append(np.sum( valid.tolist() )) - - # Convert torch tensors to numpy arrays. - mkpts0_np = mkpts0.cpu().numpy() - mkpts1_np = mkpts1.cpu().numpy() - - try: - # Use RANSAC to find the homography matrix. - H, inliers = cv2.findHomography(mkpts0_np, mkpts1_np, cv2.RANSAC, 5.0) - except: - H = 0 - inliers = 0 - print ("Not enough points for homography") - # Convert inliers from shape (N, 1) to shape (N,) and count them. - num_inliers = np.sum(inliers) - - all_inliers.append(num_inliers) - - # Visualization - text = [ - 'Engagify Image Matching', - 'Keypoints: {}:{}'.format(len(kpts0), len(kpts1)), - 'Scaling Factor: {}'.format( scale_factors[idx]), - 'Matches: {}'.format(len(mkpts0)), - 'Inliers: {}'.format(num_inliers), - ] - - - k_thresh = matching.superpoint.config['keypoint_threshold'] - m_thresh = matching.superglue.config['match_threshold'] - - small_text = [ - 'Keypoint Threshold: {:.4f}'.format(k_thresh), - 'Match Threshold: {:.2f}'.format(m_thresh), - ] - - visualized_img = None # To store the visualized image - - if visualize: - ret_img = unified_matching_plot2( - image0, image1, kpts0, kpts1, mkpts0, mkpts1, color, text, 'Test_Level_{}'.format(idx), True, False, True, 'Matches_Level_{}'.format(idx), small_text) - all_return_imgs.append(ret_img) - # Storing image with most matches - #if len(mkpts0) > max_matches: - # max_matches = len(mkpts0) - # max_matches_img = 'Matches_Level_{}'.format(idx) - - avg_valid = np.sum(all_valid) / len(scale_factors) - avg_inliers = np.sum(all_inliers) / len(scale_factors) - -# Convert the image with the most matches to base64 encoded format -# with open(max_matches_img, "rb") as image_file: -# encoded_string = base64.b64encode(image_file.read()).decode() - - return {'valid':all_valid, 'inliers':all_inliers, 'visualized_image':all_return_imgs} #, encoded_string - -# Usage: -#results = image_matching('Samples/Poster/poster_event_small_22.jpg', 'Samples/Images/16.jpeg', visualize=True) -#print (results) - -def image_matching_no_pyramid(query_img, target_img, visualize=True, write=False): - - image1, inp1, scales1 = read_image(target_img, device, [640*2], 0, True) - image0, inp0, scales0 = read_image(query_img, device, [640*2], 0, True) - - if image0 is None or image1 is None: - print('Problem reading image pair: {} {}'.format(query_img, target_img)) - return None - - # Matching - pred = matching({'image0': inp0, 'image1': inp1}) - pred = {k: v[0] for k, v in pred.items()} - kpts0, kpts1 = pred['keypoints0'], pred['keypoints1'] - matches, conf = pred['matches0'], pred['matching_scores0'] - - valid = matches > -1 - mkpts0 = kpts0[valid] - mkpts1 = kpts1[matches[valid]] - mconf = conf[valid] - #color = cm.jet(mconf)[:len(mkpts0)] # Ensure consistent size - color = cm.jet(mconf.detach().numpy())[:len(mkpts0)] - - valid_count = np.sum(valid.tolist()) - - # Convert torch tensors to numpy arrays. - mkpts0_np = mkpts0.cpu().numpy() - mkpts1_np = mkpts1.cpu().numpy() - - try: - # Use RANSAC to find the homography matrix. - H, inliers = cv2.findHomography(mkpts0_np, mkpts1_np, cv2.RANSAC, 5.0) - except: - H = 0 - inliers = 0 - print("Not enough points for homography") - - # Convert inliers from shape (N, 1) to shape (N,) and count them. - num_inliers = np.sum(inliers) - - # Visualization - text = [ - 'Engagify Image Matching', - 'Keypoints: {}:{}'.format(len(kpts0), len(kpts1)), - 'Matches: {}'.format(len(mkpts0)), - 'Inliers: {}'.format(num_inliers), - ] - - k_thresh = matching.superpoint.config['keypoint_threshold'] - m_thresh = matching.superglue.config['match_threshold'] - - small_text = [ - 'Keypoint Threshold: {:.4f}'.format(k_thresh), - 'Match Threshold: {:.2f}'.format(m_thresh), - ] - - visualized_img = None # To store the visualized image - - if visualize: - visualized_img = unified_matching_plot2( - image0, image1, kpts0, kpts1, mkpts0, mkpts1, color, text, 'Test_Match', True, False, True, 'Matches', small_text) - - return { - 'valid': [valid_count], - 'inliers': [num_inliers], - 'visualized_image': [visualized_img] - } - -# Usage: -#results = image_matching_no_pyramid('Samples/Poster/poster_event_small_22.jpg', 'Samples/Images/16.jpeg', visualize=True) - -# Load the SuperPoint and SuperGlue models. -device = 'cuda' if torch.cuda.is_available() and not opt.force_cpu else 'cpu' -print('Running inference on device \"{}\"'.format(device)) -config = { - 'superpoint': { - 'nms_radius': 4, - 'keypoint_threshold': 0.005, - 'max_keypoints': 1024 - }, - 'superglue': { - 'weights': 'outdoor', - 'sinkhorn_iterations': 20, - 'match_threshold': 0.2, - } -} -matching = Matching(config).eval().to(device) - -from PIL import Image - -def stitch_images(images): - """Stitches a list of images vertically.""" - if not images: - # Return a placeholder image if the images list is empty - return Image.new('RGB', (100, 100), color='gray') - - max_width = max([img.width for img in images]) - total_height = sum(img.height for img in images) - - composite = Image.new('RGB', (max_width, total_height)) - - y_offset = 0 - for img in images: - composite.paste(img, (0, y_offset)) - y_offset += img.height - - return composite - -def check_object_in_image3(query_image, target_image, threshold=50, scale_factor=[0.33,0.66,1]): - decision_on = [] - # Convert cv2 images to PIL images and add them to a list - images_to_return = [] - - cropped_images, bbox_image = detect_and_crop2(target_image_path=target_image, - query_image_path=query_image, - model=model, - processor=processor, - mixin=mixin, - device=device, - visualize=False) - - temp_files = [save_array_to_temp_image(i) for i in cropped_images] - crop_results = [image_matching_no_pyramid(query_image, i, visualize=True) for i in temp_files] - - cropped_visuals = [] - cropped_inliers = [] - for result in crop_results: - # Add visualized images to the temporary list - for img in result['visualized_image']: - cropped_visuals.append(Image.fromarray(img)) - for inliers_ in result['inliers']: - cropped_inliers.append(inliers_) - # Stitch the cropped visuals into one image - images_to_return.append(stitch_images(cropped_visuals)) - - pyramid_results = image_matching(query_image, target_image, visualize=True, scale_factors=scale_factor) - - pyramid_visuals = [Image.fromarray(img) for img in pyramid_results['visualized_image']] - # Stitch the pyramid visuals into one image - images_to_return.append(stitch_images(pyramid_visuals)) - - # Check inliers and determine if the object is present - print (cropped_inliers) - is_present = any(value > threshold for value in cropped_inliers) - if is_present == True: - decision_on.append('Object Detection') - is_present = any(value > threshold for value in pyramid_results["inliers"]) - if is_present == True: - decision_on.append('Pyramid Max Point') - if is_present == False: - decision_on.append("Neither, It Failed All Tests") - - # Return results as a dictionary - return { - 'is_present': is_present, - 'images': images_to_return, - 'scale factors': scale_factor, - 'object detection inliers': cropped_inliers, - 'pyramid_inliers' : pyramid_results["inliers"], - 'bbox_image':bbox_image, - 'decision_on':decision_on, - - } - -# Example call: -#result = check_object_in_image3('Samples/Poster/poster_event_small.jpg', 'Samples/Images/True_Image_3423234.jpeg', 50) -# Accessing the results: -#print(result['is_present']) # prints True/False -#print(result['images']) # is a list of 2 stitched images. - - -import gradio as gr -import cv2 -from PIL import Image - -def gradio_interface(query_image_path, target_image_path, threshold): - result = check_object_in_image3(query_image_path, target_image_path, threshold) - # Depending on how many images are in the list, you can return them like this: - return result['bbox_image'], result['images'][0], result['object detection inliers'], result['scale factors'], result['pyramid_inliers'], result['images'][1], str(result['is_present']), result['decision_on'] - - -# Define the Gradio interface -interface = gr.Interface( - fn=gradio_interface, # function to be called on button press - inputs=[ - gr.components.Image(label="Query Image (Drop the Image you want to detect here)", type="filepath"), - gr.components.Image(label="Target Image (Drop the Image youd like to search here)", type="filepath"), - gr.components.Slider(minimum=0, maximum=200, value=50, step=5, label="Enter the Inlier Threshold"), - ], - outputs=[ - gr.components.Image(label='Filtered Regions of Interest (Candidates)'), - gr.components.Image(label="Cropped Visuals from Image Guided Object Detection "), - gr.components.Text(label='Inliers detected for Image Guided Object Detection '), - gr.components.Text(label='Scale Factors Used for Pyramid (Results below, In Order)'), - gr.components.Text(label='Inliers detected for Pyramid Search (In Order)'), - gr.components.Image(label="Pyramid Visuals"), - gr.components.Textbox(label="Object Present?"), - gr.components.Textbox(label="Decision Taken Based on?"), - ], - theme=gr.themes.Monochrome(), - title="'Image Specific Image Recognition + Matching Tool", - description="[Author: Ibrahim Hasani] \n " - " This tool leverages Transformer, Deep Learning, and Traditional Computer Vision techniques to determine if a specified object " - "(given by the query image) is present within a target image. \n" - "1. Image-Guided Object Detection where we detect potential regions of interest. (Owl-Vit-Google). \n" - "2. Pyramid Search that looks at various scales of the target image. Results provide " - "visual representations of the matching process and a final verdict on the object's presence.\n" - "3. SuperPoint (MagicLeap) + SuperGlue + Homography to extract inliers, which are thresholded for decision making." -) - -interface.launch() \ No newline at end of file diff --git a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/preprocessing/get_common_voice_audio_manifest.py b/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/preprocessing/get_common_voice_audio_manifest.py deleted file mode 100644 index a30254604311a488a1d4959f941051890ed32b2e..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/preprocessing/get_common_voice_audio_manifest.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import logging -from pathlib import Path -from collections import defaultdict -from typing import List, Dict, Tuple - -import pandas as pd -import numpy as np -import torchaudio -from tqdm import tqdm - -from examples.speech_to_text.data_utils import load_df_from_tsv, save_df_to_tsv - - -log = logging.getLogger(__name__) - -SPLITS = ["train", "dev", "test"] - - -def get_top_n( - root: Path, n_speakers: int = 10, min_n_tokens: int = 5 -) -> pd.DataFrame: - df = load_df_from_tsv(root / "validated.tsv") - df["n_tokens"] = [len(s.split()) for s in df["sentence"]] - df = df[df["n_tokens"] >= min_n_tokens] - df["n_frames"] = [ - torchaudio.info((root / "clips" / p).as_posix()).num_frames - for p in tqdm(df["path"]) - ] - df["id"] = [Path(p).stem for p in df["path"]] - total_duration_ms = df.groupby("client_id")["n_frames"].agg(["sum"]) - total_duration_ms = total_duration_ms.sort_values("sum", ascending=False) - - top_n_total_duration_ms = total_duration_ms.head(n_speakers) - top_n_client_ids = set(top_n_total_duration_ms.index.tolist()) - df_top_n = df[df["client_id"].isin(top_n_client_ids)] - return df_top_n - - -def get_splits( - df, train_split_ratio=0.99, speaker_in_all_splits=False, rand_seed=0 -) -> Tuple[Dict[str, str], List[str]]: - np.random.seed(rand_seed) - dev_split_ratio = (1. - train_split_ratio) / 3 - grouped = list(df.groupby("client_id")) - id_to_split = {} - for _, cur_df in tqdm(grouped): - cur_n_examples = len(cur_df) - if speaker_in_all_splits and cur_n_examples < 3: - continue - cur_n_train = int(cur_n_examples * train_split_ratio) - cur_n_dev = int(cur_n_examples * dev_split_ratio) - cur_n_test = cur_n_examples - cur_n_dev - cur_n_train - if speaker_in_all_splits and cur_n_dev * cur_n_test == 0: - cur_n_dev, cur_n_test = 1, 1 - cur_n_train = cur_n_examples - cur_n_dev - cur_n_test - cur_indices = cur_df.index.tolist() - cur_shuffled_indices = np.random.permutation(cur_n_examples) - cur_shuffled_indices = [cur_indices[i] for i in cur_shuffled_indices] - cur_indices_by_split = { - "train": cur_shuffled_indices[:cur_n_train], - "dev": cur_shuffled_indices[cur_n_train: cur_n_train + cur_n_dev], - "test": cur_shuffled_indices[cur_n_train + cur_n_dev:] - } - for split in SPLITS: - for i in cur_indices_by_split[split]: - id_ = df["id"].loc[i] - id_to_split[id_] = split - return id_to_split, sorted(df["client_id"].unique()) - - -def convert_to_wav(root: Path, filenames: List[str], target_sr=16_000): - out_root = root / "wav" - out_root.mkdir(exist_ok=True, parents=True) - print("Converting to WAV...") - for n in tqdm(filenames): - in_path = (root / "clips" / n).as_posix() - waveform, sr = torchaudio.load(in_path) - converted, converted_sr = torchaudio.sox_effects.apply_effects_tensor( - waveform, sr, [["rate", str(target_sr)], ["channels", "1"]] - ) - out_path = (out_root / Path(n).with_suffix(".wav").name).as_posix() - torchaudio.save(out_path, converted, converted_sr, encoding="PCM_S", - bits_per_sample=16) - - -def process(args): - data_root = Path(args.data_root).absolute() / args.lang - - # Generate TSV manifest - print("Generating manifest...") - - df_top_n = get_top_n(data_root) - id_to_split, speakers = get_splits(df_top_n) - - if args.convert_to_wav: - convert_to_wav(data_root, df_top_n["path"].tolist()) - - manifest_by_split = {split: defaultdict(list) for split in SPLITS} - for sample in tqdm(df_top_n.to_dict(orient="index").values()): - sample_id = sample["id"] - split = id_to_split[sample_id] - manifest_by_split[split]["id"].append(sample_id) - if args.convert_to_wav: - audio_path = data_root / "wav" / f"{sample_id}.wav" - else: - audio_path = data_root / "clips" / f"{sample_id}.mp3" - manifest_by_split[split]["audio"].append(audio_path.as_posix()) - manifest_by_split[split]["n_frames"].append(sample["n_frames"]) - manifest_by_split[split]["tgt_text"].append(sample["sentence"]) - manifest_by_split[split]["speaker"].append(sample["client_id"]) - manifest_by_split[split]["src_text"].append(sample["sentence"]) - - output_root = Path(args.output_manifest_root).absolute() - output_root.mkdir(parents=True, exist_ok=True) - for split in SPLITS: - save_df_to_tsv( - pd.DataFrame.from_dict(manifest_by_split[split]), - output_root / f"{split}.audio.tsv" - ) - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--data-root", "-d", required=True, type=str) - parser.add_argument("--output-manifest-root", "-m", required=True, type=str) - parser.add_argument("--lang", "-l", required=True, type=str) - parser.add_argument("--convert-to-wav", action="store_true") - args = parser.parse_args() - - process(args) - - -if __name__ == "__main__": - main() diff --git a/spaces/ICML2022/OFA/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/__init__.py b/spaces/ICML2022/OFA/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/data/concat_sentences_dataset.py b/spaces/ICML2022/OFA/fairseq/fairseq/data/concat_sentences_dataset.py deleted file mode 100644 index 625a29370e90f9d1d7274024afb902ed83a22325..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/data/concat_sentences_dataset.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch - -from . import FairseqDataset - - -class ConcatSentencesDataset(FairseqDataset): - def __init__(self, *datasets): - super().__init__() - self.datasets = datasets - assert all( - len(ds) == len(datasets[0]) for ds in datasets - ), "datasets must have the same length" - - def __getitem__(self, index): - return torch.cat([ds[index] for ds in self.datasets]) - - def __len__(self): - return len(self.datasets[0]) - - def collater(self, samples): - return self.datasets[0].collater(samples) - - @property - def sizes(self): - return sum(ds.sizes for ds in self.datasets) - - def num_tokens(self, index): - return sum(ds.num_tokens(index) for ds in self.datasets) - - def size(self, index): - return sum(ds.size(index) for ds in self.datasets) - - def ordered_indices(self): - return self.datasets[0].ordered_indices() - - @property - def supports_prefetch(self): - return any(getattr(ds, "supports_prefetch", False) for ds in self.datasets) - - def prefetch(self, indices): - for ds in self.datasets: - if getattr(ds, "supports_prefetch", False): - ds.prefetch(indices) - - def set_epoch(self, epoch): - super().set_epoch(epoch) - for ds in self.datasets: - if hasattr(ds, "set_epoch"): - ds.set_epoch(epoch) diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/tasks/audio_pretraining.py b/spaces/ICML2022/OFA/fairseq/fairseq/tasks/audio_pretraining.py deleted file mode 100644 index cc310088db8852e80cd2e65d51f06f8f7cb592e3..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/tasks/audio_pretraining.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright (c) 2017-present, Facebook, Inc. -# All rights reserved. -# -# This source code is licensed under the license found in the LICENSE file in -# the root directory of this source tree. An additional grant of patent rights -# can be found in the PATENTS file in the same directory. - -import logging -import os -import sys - -from argparse import Namespace -from dataclasses import dataclass, field -from typing import Optional -from omegaconf import MISSING, II, OmegaConf - -from fairseq.data import BinarizedAudioDataset, FileAudioDataset -from fairseq.dataclass import FairseqDataclass, ChoiceEnum -from fairseq.data.text_compressor import TextCompressionLevel - -from . import FairseqTask, register_task - - -logger = logging.getLogger(__name__) - - -@dataclass -class InferredW2vConfig: - # The following are needed to precompute mask and mask channel indices - # before model's forward. - mask_length: Optional[int] = II("model.mask_length") - mask_prob: Optional[float] = II("model.mask_prob") - mask_selection: Optional[str] = II("model.mask_selection") - mask_other: Optional[float] = II("model.mask_other") - no_mask_overlap: Optional[bool] = II("model.no_mask_overlap") - mask_min_space: Optional[int] = II("model.mask_min_space") - mask_channel_length: Optional[int] = II("model.mask_channel_length") - mask_channel_prob: Optional[float] = II("model.mask_channel_prob") - mask_channel_selection: Optional[str] = II("model.mask_channel_selection") - mask_channel_other: Optional[float] = II("model.mask_channel_other") - no_mask_channel_overlap: Optional[bool] = II("model.no_mask_channel_overlap") - mask_channel_min_space: Optional[int] = II("model.mask_channel_min_space") - - conv_feature_layers: Optional[str] = II("model.conv_feature_layers") - encoder_embed_dim: Optional[int] = II("model.encoder_embed_dim") - - -@dataclass -class AudioPretrainingConfig(FairseqDataclass): - data: str = field(default=MISSING, metadata={"help": "path to data directory"}) - labels: Optional[str] = field( - default=None, - metadata={ - "help": "extension of the label file to load, used for fine-tuning"}, - ) - binarized_dataset: bool = field( - default=False, - metadata={ - "help": "if true, loads binarized dataset (useful for very large datasets). " - "See examples/wav2vec/scripts/binarize_manifest.sh" - }, - ) - sample_rate: int = field( - default=16_000, - metadata={ - "help": "target sample rate. audio files will be up/down sampled to this rate" - }, - ) - normalize: bool = field( - default=False, - metadata={"help": "if set, normalizes input to have 0 mean and unit variance"}, - ) - enable_padding: bool = field( - default=False, metadata={"help": "pad shorter samples instead of cropping"} - ) - max_sample_size: Optional[int] = field( - default=None, metadata={"help": "max sample size to crop to for batching"} - ) - min_sample_size: Optional[int] = field( - default=None, metadata={"help": "min sample size to skip small examples"} - ) - num_batch_buckets: int = field( - default=0, - metadata={"help": "number of buckets"}, - ) - precompute_mask_indices: bool = field( - default=False, - metadata={ - "help": "flag to compute mask indices in data preparation.", - }, - ) - - inferred_w2v_config: Optional[InferredW2vConfig] = field( - default=None, - metadata={ - "help": "wav2vec 2.0 masking arguments used to pre-compute masks (required for TPU)", - }, - ) - - tpu: bool = II("common.tpu") - text_compression_level: ChoiceEnum([x.name for x in TextCompressionLevel]) = field( - default="none", - metadata={ - "help": "compression level for texts (e.g. audio filenames, " - "target texts): none/low/high (default: none). " - } - ) - - -@register_task("audio_pretraining", dataclass=AudioPretrainingConfig) -class AudioPretrainingTask(FairseqTask): - """ """ - - cfg: AudioPretrainingConfig - - @classmethod - def setup_task(cls, cfg: AudioPretrainingConfig, **kwargs): - """Setup the task (e.g., load dictionaries). - - Args: - cfg (AudioPretrainingConfig): configuration of this task - """ - - return cls(cfg) - - def _get_mask_precompute_kwargs(self, cfg): - if self.cfg.precompute_mask_indices or self.cfg.tpu: - assert ( - cfg.inferred_w2v_config is not None - ), "inferred_w2v_config must be set" - return OmegaConf.to_container( - cfg.inferred_w2v_config, resolve=True, enum_to_str=True - ) - else: - return {} - - def load_dataset(self, split: str, task_cfg: FairseqDataclass = None, **kwargs): - data_path = self.cfg.data - task_cfg = task_cfg or self.cfg - - # upgrade old task - if isinstance(task_cfg, Namespace): - if not hasattr(task_cfg, "autoregressive"): - task_cfg.autoregressive = not task_cfg.criterion == "ctc" - - text_compression_level = getattr( - TextCompressionLevel, str(self.cfg.text_compression_level) - ) - if getattr(task_cfg, "binarized_dataset", False): - self.datasets[split] = BinarizedAudioDataset( - data_path, - split=split, - sample_rate=task_cfg.get("sample_rate", self.cfg.sample_rate), - max_sample_size=self.cfg.max_sample_size, - min_sample_size=self.cfg.min_sample_size, - pad=task_cfg.labels is not None or task_cfg.enable_padding, - normalize=task_cfg.normalize, - num_buckets=self.cfg.num_batch_buckets or int(self.cfg.tpu), - compute_mask_indices=(self.cfg.precompute_mask_indices or self.cfg.tpu), - **self._get_mask_precompute_kwargs(task_cfg), - ) - else: - manifest_path = os.path.join(data_path, "{}.tsv".format(split)) - - self.datasets[split] = FileAudioDataset( - manifest_path=manifest_path, - sample_rate=task_cfg.get("sample_rate", self.cfg.sample_rate), - max_sample_size=self.cfg.max_sample_size, - min_sample_size=self.cfg.min_sample_size, - pad=task_cfg.labels is not None or task_cfg.enable_padding, - normalize=task_cfg.normalize, - num_buckets=self.cfg.num_batch_buckets or int(self.cfg.tpu), - compute_mask_indices=(self.cfg.precompute_mask_indices or self.cfg.tpu), - text_compression_level=text_compression_level, - **self._get_mask_precompute_kwargs(task_cfg), - ) - - if self.cfg.tpu and task_cfg.inferred_w2v_config.mask_channel_prob == 0.0: - logger.info( - "Pretraining on TPUs may suffer convergence " - "issues when training with `mask_channel_prob` value of " - "0. You may want to set this to a low value close to 0." - ) - - @property - def source_dictionary(self): - return None - - @property - def target_dictionary(self): - return None - - def max_positions(self): - """Maximum input length supported by the encoder.""" - return sys.maxsize, sys.maxsize - - def build_model(self, model_cfg: FairseqDataclass): - model = super().build_model(model_cfg) - - actualized_cfg = getattr(model, "cfg", None) - if actualized_cfg is not None: - # if "w2v_args" in actualized_cfg: - if hasattr(actualized_cfg, "w2v_args"): - model_cfg.w2v_args = actualized_cfg.w2v_args - - return model diff --git a/spaces/Illumotion/Koboldcpp/otherarch/mpt_v3.cpp b/spaces/Illumotion/Koboldcpp/otherarch/mpt_v3.cpp deleted file mode 100644 index 57ed90888fb5309b861c5ebe917c6bd1dfc667c3..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/otherarch/mpt_v3.cpp +++ /dev/null @@ -1,581 +0,0 @@ -#include "ggml.h" -#include "otherarch.h" - -#include "utils.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "model_adapter.h" - -#ifdef GGML_USE_CUBLAS -#include "ggml-cuda.h" -#endif -#if defined(GGML_USE_CLBLAST) -#include "ggml-opencl.h" -#endif - -// load the model's weights from a file -bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vocab, int gpulayers) { - printf("%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str()); - - auto fin = std::ifstream(fname, std::ios::binary); - if (!fin) { - fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str()); - return false; - } - - // verify magic - { - uint32_t magic; - fin.read((char *)&magic, sizeof(magic)); - if (magic != 0x67676d6c) { - fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str()); - return false; - } - } - - // load hparams - { - auto & hparams = model.hparams; - - fin.read((char *) &hparams.d_model, sizeof(hparams.d_model)); - fin.read((char *) &hparams.max_seq_len, sizeof(hparams.max_seq_len)); - fin.read((char *) &hparams.n_heads, sizeof(hparams.n_heads)); - fin.read((char *) &hparams.n_layers, sizeof(hparams.n_layers)); - fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); - fin.read((char *) &hparams.alibi_bias_max, sizeof(hparams.alibi_bias_max)); - fin.read((char *) &hparams.clip_qkv, sizeof(hparams.clip_qkv)); - fin.read((char *) &hparams.ftype, sizeof(hparams.ftype)); - - hparams.n_ctx = std::min(hparams.max_seq_len, hparams.n_ctx); - - const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR; - - printf("%s: d_model = %d\n", __func__, hparams.d_model); - printf("%s: max_seq_len = %d\n", __func__, hparams.max_seq_len); - printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); - printf("%s: n_heads = %d\n", __func__, hparams.n_heads); - printf("%s: n_layers = %d\n", __func__, hparams.n_layers); - printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); - printf("%s: alibi_bias_max = %f\n", __func__, hparams.alibi_bias_max); - printf("%s: clip_qkv = %f\n", __func__, hparams.clip_qkv); - printf("%s: ftype = %d\n", __func__, hparams.ftype); - printf("%s: qntvr = %d\n", __func__, qntvr); - - hparams.ftype %= GGML_QNT_VERSION_FACTOR; - } - - // load vocab - { - const int32_t n_vocab = model.hparams.n_vocab; - - std::string word; - std::vector buf(128); - - for (int i = 0; i < n_vocab; i++) { - uint32_t len; - fin.read((char *) &len, sizeof(len)); - - buf.resize(len); - fin.read((char *) buf.data(), len); - word.assign(buf.data(), len); - - // Convert token from utf-8 - // std::wstring word_multibytes = convert_to_wstring(word); - // if(word_multibytes!=L"") - // { - // word.resize(word_multibytes.size()); - // for (int w = 0; w < word_multibytes.size(); w++) { - // word[w] = uint8_t(word_multibytes[w]); - // } - // } - - vocab.token_to_id[word] = i; - vocab.id_to_token[i] = word; - } - } - - // for the big tensors, we have the option to store the data in 16-bit - // floats or quantized in order to save memory and also to speed up the - // computation - ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype)(model.hparams.ftype)); - if (wtype == GGML_TYPE_COUNT) { - fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n", __func__, fname.c_str(), - model.hparams.ftype); - return false; - } - - auto & ctx = model.ctx; - - size_t ctx_size = 0; - - const auto & hparams = model.hparams; - const size_t n_ctx = hparams.n_ctx; - - { - const size_t n_embd = hparams.d_model; - const size_t n_layer = hparams.n_layers; - const size_t n_vocab = hparams.n_vocab; - - ctx_size += n_embd * n_vocab * ggml_type_sizef(wtype); // wte_weight - ctx_size += n_embd * ggml_type_sizef(GGML_TYPE_F32); // norm_f_weight - - ctx_size += n_layer * (n_embd * ggml_type_sizef(GGML_TYPE_F32)); // ln_1_weight - ctx_size += n_layer * (3 * n_embd * n_embd * ggml_type_sizef(wtype)); // attn_Wqkv_weight - ctx_size += n_layer * (n_embd * n_embd * ggml_type_sizef(wtype)); // attn_out_proj_weight - ctx_size += n_layer * (n_embd * ggml_type_sizef(GGML_TYPE_F32)); // ln_2_weight - ctx_size += n_layer * (4 * n_embd * n_embd * ggml_type_sizef(wtype)); // mlp_mlp_up_weight - ctx_size += n_layer * (n_embd * n_embd * 4 * ggml_type_sizef(wtype)); // mlp_mlp_down_weight - - ctx_size += n_ctx * n_layer * n_embd * ggml_type_sizef(GGML_TYPE_F16); // memory_k - ctx_size += n_ctx * n_layer * n_embd * ggml_type_sizef(GGML_TYPE_F16); // memory_v - - ctx_size += (6 + 6 * n_layer) * 512; // object overhead - - printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size / (1024.0 * 1024.0)); - } - - // create the ggml context - { - struct ggml_init_params params; - params.mem_size = ctx_size; - params.mem_buffer = NULL; - params.no_alloc = false; - - model.ctx = ggml_init(params); - if (!model.ctx) { - fprintf(stderr, "%s: ggml_init() failed\n", __func__); - return false; - } - } - - // prepare memory for the weights - { - const auto & hparams = model.hparams; - - const size_t n_embd = hparams.d_model; - const size_t n_layer = hparams.n_layers; - const size_t n_vocab = hparams.n_vocab; - - model.layers.resize(n_layer); - - model.wte_weight = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); - model.norm_f_weight = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - - // map by name - model.tensors["transformer.wte.weight"] = model.wte_weight; - model.tensors["transformer.norm_f.weight"] = model.norm_f_weight; - - for (int i = 0; i < (int) n_layer; ++i) { - auto & layer = model.layers[i]; - - layer.norm_1_weight = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - layer.c_attn_wqkv_weight = ggml_new_tensor_2d(ctx, wtype, n_embd, 3 * n_embd); - layer.c_attn_out_proj_weight = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); - layer.norm_2_weight = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - layer.ffn_up_proj = ggml_new_tensor_2d(ctx, wtype, n_embd, 4 * n_embd); - layer.ffn_down_proj = ggml_new_tensor_2d(ctx, wtype, 4 * n_embd, n_embd); - - // map by name - model.tensors["transformer.blocks." + std::to_string(i) + ".norm_1.weight"] = layer.norm_1_weight; - model.tensors["transformer.blocks." + std::to_string(i) + ".attn.Wqkv.weight"] = layer.c_attn_wqkv_weight; - model.tensors["transformer.blocks." + std::to_string(i) + ".attn.out_proj.weight"] = layer.c_attn_out_proj_weight; - model.tensors["transformer.blocks." + std::to_string(i) + ".norm_2.weight"] = layer.norm_2_weight; - model.tensors["transformer.blocks." + std::to_string(i) + ".ffn.up_proj.weight"] = layer.ffn_up_proj; - model.tensors["transformer.blocks." + std::to_string(i) + ".ffn.down_proj.weight"] = layer.ffn_down_proj; - } - } - - // key + value memory - { - const auto & hparams = model.hparams; - - const size_t n_embd = hparams.d_model; - const size_t n_layer = hparams.n_layers; - - const int64_t n_mem = n_layer * n_ctx; - const int64_t n_elements = n_embd * n_mem; - - model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements); - model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements); - - const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v); - - printf("%s: memory_size = %8.2f MB, n_mem = %" PRId64 "\n", __func__, memory_size / 1024.0 / 1024.0, n_mem); - } - - // load weights - { - int n_tensors = 0; - size_t total_size = 0; - - printf("%s: ", __func__); - - while (true) { - int32_t n_dims; - int32_t length; - int32_t ttype; - - fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); - fin.read(reinterpret_cast(&length), sizeof(length)); - fin.read(reinterpret_cast(&ttype), sizeof(ttype)); - - if (fin.eof()) { - break; - } - - int32_t nelements = 1; - int32_t ne[2] = {1, 1}; - for (int i = 0; i < n_dims; ++i) { - fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); - nelements *= ne[i]; - } - - std::string name(length, 0); - fin.read(&name[0], length); - - if (model.tensors.find(name.data()) == model.tensors.end()) { - fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data()); - return false; - } - - auto tensor = model.tensors[name.data()]; - if (ggml_nelements(tensor) != nelements) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data()); - return false; - } - - if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { - fprintf(stderr, - "%s: tensor '%s' has wrong shape in model file: got [%5d, " - "%5d], expected [%5d, %5d]\n", - __func__, name.data(), (int)tensor->ne[0], (int)tensor->ne[1], ne[0], ne[1]); - return false; - } - - // for debugging - if (0) { - printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], - ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor) / 1024.0 / 1024.0, ggml_nbytes(tensor)); - } - - const size_t bpe = ggml_type_size(ggml_type(ttype)); - - if ((nelements * bpe) / ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { - fprintf(stderr, - "%s: tensor '%s' has wrong size in model file: got %zu, " - "expected %zu\n", - __func__, name.data(), ggml_nbytes(tensor), nelements * bpe); - return false; - } - - fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); - - total_size += ggml_nbytes(tensor); - if (++n_tensors % 8 == 0) { - printf("."); - fflush(stdout); - } - } - - printf(" done\n"); - - printf("%s: model size = %8.2f MB / num tensors = %d\n", __func__, total_size / 1024.0 / 1024.0, n_tensors); - } - - fin.close(); - - //gpu offload - #if defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS) - if(gpulayers>0) - { - const auto & hparams = model.hparams; - size_t vram_total = 0; - const int n_gpu = std::min(gpulayers, int(hparams.n_layers)); - #if defined(GGML_USE_CLBLAST) - fprintf(stderr, "%s: [opencl] offloading %d layers to GPU\n", __func__, n_gpu); - #else - fprintf(stderr, "%s: [CUDA] offloading %d layers to GPU\n", __func__, n_gpu); - #endif - for (int i = 0; i < n_gpu; ++i) { - const auto & layer = model.layers[i]; - layer.ffn_up_proj->backend = GGML_BACKEND_GPU; - layer.ffn_down_proj->backend = GGML_BACKEND_GPU; - layer.c_attn_wqkv_weight->backend = GGML_BACKEND_GPU; - layer.c_attn_out_proj_weight->backend = GGML_BACKEND_GPU; - #if defined(GGML_USE_CLBLAST) - ggml_cl_transform_tensor(layer.ffn_up_proj->data,layer.ffn_up_proj); vram_total += ggml_nbytes(layer.ffn_up_proj); - ggml_cl_transform_tensor(layer.ffn_down_proj->data,layer.ffn_down_proj); vram_total += ggml_nbytes(layer.ffn_down_proj); - ggml_cl_transform_tensor(layer.c_attn_wqkv_weight->data,layer.c_attn_wqkv_weight); vram_total += ggml_nbytes(layer.c_attn_wqkv_weight); - ggml_cl_transform_tensor(layer.c_attn_out_proj_weight->data,layer.c_attn_out_proj_weight); vram_total += ggml_nbytes(layer.c_attn_out_proj_weight); - #else - ggml_cuda_transform_tensor(layer.ffn_up_proj->data,layer.ffn_up_proj); vram_total += ggml_nbytes(layer.ffn_up_proj); - ggml_cuda_transform_tensor(layer.ffn_down_proj->data,layer.ffn_down_proj); vram_total += ggml_nbytes(layer.ffn_down_proj); - ggml_cuda_transform_tensor(layer.c_attn_wqkv_weight->data,layer.c_attn_wqkv_weight); vram_total += ggml_nbytes(layer.c_attn_wqkv_weight); - ggml_cuda_transform_tensor(layer.c_attn_out_proj_weight->data,layer.c_attn_out_proj_weight); vram_total += ggml_nbytes(layer.c_attn_out_proj_weight); - #endif - } - #if defined(GGML_USE_CLBLAST) - fprintf(stderr, "%s: [opencl] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024); - #else - fprintf(stderr, "%s: [CUDA] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024); - #endif - } - #endif - - return true; -} - -// evaluate the transformer -// -// - model: the model -// - n_threads: number of threads to use -// - n_past: the context size so far -// - embd_inp: the embeddings of the tokens in the context -// - embd_w: the predicted logits for the next token -// -bool mpt_eval(const mpt_model & model, const int n_threads, const int n_past, - const std::vector & embd_inp, std::vector & embd_w, - bool logits_all, size_t & mem_per_token, bool use_scratch) { - const int N = embd_inp.size(); - - const auto & hparams = model.hparams; - - const int n_embd = hparams.d_model; - const int n_layer = hparams.n_layers; - const int n_head = hparams.n_heads; - const int n_vocab = hparams.n_vocab; - const int n_ctx = hparams.n_ctx; - - static size_t buf_size = 256u * 1024 * 1024; - static void * buf = malloc(buf_size); - - // use 2 scratch buffers - // TODO: very hacky solution - reimplement in a more elegant way - //MPT 30B needs more scratch memory - static size_t scr0_size = (n_embd>=7168?2048u:1024u)*1024*1024*(hparams.n_ctx>8192?2:1); - static size_t scr1_size = (n_embd>=7168?2048u:1024u)*1024*1024; - - static void * scr0 = malloc(scr0_size); - static void * scr1 = malloc(scr1_size); - - if (mem_per_token > 0 && (mem_per_token*N*2 + 64u*1024*1024) > buf_size) { - const size_t buf_size_new = 320u*1024*1024 + 1.2*(mem_per_token*N); // add 10% to account for ggml object overhead - // printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, - // buf_size, buf_size_new); - // reallocate - if (buf_size_new > buf_size) - { - buf_size = buf_size_new; - buf = realloc(buf, buf_size); - if (buf == nullptr) { - fprintf(stderr, "%s: failed to allocate %zu bytes. Try reducing batch size.\n", __func__, buf_size); - return false; - } - } - } - - struct ggml_init_params params; - params.mem_size = buf_size; - params.mem_buffer = buf; - params.no_alloc = false; - - struct ggml_context * ctx0 = ggml_init(params); - struct ggml_cgraph gf = {}; - - struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); - memcpy(embd->data, embd_inp.data(), N * ggml_element_size(embd)); - - struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.wte_weight, embd); - - for (int il = 0; il < n_layer; ++il) { - - struct ggml_tensor * cur; - - if(use_scratch){ - ggml_set_scratch(ctx0, { 0, scr0_size, scr0, }); - } - - // a = self.ln_1(x) - { - cur = ggml_norm(ctx0, inpL, default_norm_eps); - - cur = ggml_mul(ctx0, ggml_repeat(ctx0, model.layers[il].norm_1_weight, cur), cur); - } - - // self-attention - // b, _, past_key_value = self.attn(a, past_key_value=past_key_value, - // attn_bias=attn_bias, attention_mask=attention_mask, - // is_causal=is_causal) - { - // compute QKV - cur = ggml_mul_mat(ctx0, model.layers[il].c_attn_wqkv_weight, cur); - - if (model.hparams.clip_qkv > 0.0f) { - cur = ggml_clamp(ctx0, cur, -model.hparams.clip_qkv, model.hparams.clip_qkv); - } - - struct ggml_tensor * Qcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 0 * sizeof(float) * n_embd); - struct ggml_tensor * Kcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 1 * sizeof(float) * n_embd); - struct ggml_tensor * Vcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2 * sizeof(float) * n_embd); - - // store key and value to memory - { - struct ggml_tensor * k = - ggml_view_1d(ctx0, model.memory_k, N * n_embd, - (ggml_element_size(model.memory_k) * n_embd) * (il * n_ctx + n_past)); - struct ggml_tensor * v = - ggml_view_1d(ctx0, model.memory_v, N * n_embd, - (ggml_element_size(model.memory_v) * n_embd) * (il * n_ctx + n_past)); - - ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k)); - ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v)); - } - - // Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, - // 2, 1, 3) [64, N, 12] - struct ggml_tensor * Q = ggml_permute( - ctx0, ggml_cpy(ctx0, Qcur, ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd / n_head, n_head, N)), 0, 2, - 1, 3); - - // K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, - // 3) [64, n_past + N, 12] - struct ggml_tensor * K = - ggml_permute(ctx0, - ggml_reshape_3d(ctx0, - ggml_view_1d(ctx0, model.memory_k, (n_past + N) * n_embd, - il * n_ctx * ggml_element_size(model.memory_k) * n_embd), - n_embd / n_head, n_head, n_past + N), - 0, 2, 1, 3); - // K * Q - struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); - - // KQ_scaled = KQ / sqrt(n_embd/n_head) - struct ggml_tensor * KQ_scaled = - ggml_scale(ctx0, KQ, ggml_new_f32(ctx0, 1.0f / sqrt(float(n_embd) / n_head))); - - struct ggml_tensor * KQ_scaled_alibi = - ggml_alibi(ctx0, KQ_scaled, n_past, n_head, model.hparams.alibi_bias_max); - - // KQ_masked = mask_past(KQ_scaled) - struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled_alibi, n_past); - - // KQ = soft_max(KQ_masked) - struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); - - // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, - // 2, 0, 3).contiguous() [n_past + N, 64, 12] - struct ggml_tensor * V_trans = ggml_cpy( - ctx0, - ggml_permute(ctx0, - ggml_reshape_3d(ctx0, - ggml_view_1d(ctx0, model.memory_v, (n_past + N) * n_embd, - il * n_ctx * ggml_element_size(model.memory_v) * n_embd), - n_embd / n_head, n_head, n_past + N), - 1, 2, 0, 3), - ggml_new_tensor_3d(ctx0, model.memory_v->type, n_past + N, n_embd / n_head, n_head)); - - // KQV = transpose(V) * KQ_soft_max - struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max); - - // KQV_merged = KQV.permute(0, 2, 1, 3) - struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); - - // cur = KQV_merged.contiguous().view(n_embd, N) - cur = ggml_cpy(ctx0, KQV_merged, ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N)); - - // projection - { cur = ggml_mul_mat(ctx0, model.layers[il].c_attn_out_proj_weight, cur); } - } - - inpL = ggml_add(ctx0, inpL, cur); - - if(use_scratch){ - ggml_set_scratch(ctx0, { 0, scr1_size, scr1, }); - } - - // m = self.ln_2(x) - { - cur = ggml_norm(ctx0, inpL, default_norm_eps); - - cur = ggml_mul(ctx0, ggml_repeat(ctx0, model.layers[il].norm_2_weight, cur), cur); - } - - // n = self.mlp(m) - { - - cur = ggml_mul_mat(ctx0, model.layers[il].ffn_up_proj, cur); - - // GELU activation - cur = ggml_gelu(ctx0, cur); - - // projection - // cur = proj_w*cur + proj_b - cur = ggml_mul_mat(ctx0, model.layers[il].ffn_down_proj, cur); - } - - // x = x + n - inpL = ggml_add(ctx0, inpL, cur); - } - - if(use_scratch){ - ggml_set_scratch(ctx0, { 0, scr0_size, scr0, }); - } - - // norm - { - inpL = ggml_norm(ctx0, inpL, default_norm_eps); - // inpL = ln_f_g*inpL - inpL = ggml_mul(ctx0, ggml_repeat(ctx0, model.norm_f_weight, inpL), inpL); - } - - if(use_scratch){ - ggml_set_scratch(ctx0, { 0, 0, nullptr, }); - } - - // output embedding weight tied to input embedding - inpL = ggml_mul_mat(ctx0, model.wte_weight, inpL); - - // logits -> probs - // inpL = ggml_soft_max(ctx0, inpL); - - // run the computation - ggml_build_forward_expand(&gf, inpL); - kcpp_graph_compute_helper(&gf, n_threads); - - // std::cout << "Qcur" << std::endl; - // print_tensor(Qcur); - - // if (n_past%100 == 0) { - // ggml_graph_print(&gf); - // ggml_graph_dump_dot(&gf, NULL, "mpt-model.dot"); - // } - - if (logits_all) { - // return result for all tokens - embd_w.resize(n_vocab *N); - memcpy(embd_w.data(), (float *)ggml_get_data(inpL) , sizeof(float) * n_vocab * N); - } else { - // return result for just the last token - embd_w.resize(n_vocab); - memcpy(embd_w.data(), (float *)ggml_get_data(inpL) + (n_vocab * (N - 1)), sizeof(float) * n_vocab); - } - - if (mem_per_token == 0) { - mem_per_token = ggml_used_mem(ctx0) / N; - } - // printf("used_mem = %zu\n", ggml_used_mem(ctx0)); - - ggml_free(ctx0); - - return true; -} diff --git a/spaces/JCTN/controlnet-segment-anything/README.md b/spaces/JCTN/controlnet-segment-anything/README.md deleted file mode 100644 index e491c5a14fa47a7e3182e751f25e4e3438ded024..0000000000000000000000000000000000000000 --- a/spaces/JCTN/controlnet-segment-anything/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Controlnet Segment Anything -emoji: 😻 -colorFrom: gray -colorTo: indigo -sdk: gradio -sdk_version: 3.28.0 -app_file: app.py -pinned: false -license: mit -tags: -- jax-diffusers-event ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/JLD/clip-image-search/README.md b/spaces/JLD/clip-image-search/README.md deleted file mode 100644 index 567b111a6f6887dd2ad4cdd3f0e6bf5ba05b18f6..0000000000000000000000000000000000000000 --- a/spaces/JLD/clip-image-search/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Image Search -emoji: 🔍 -colorFrom: gray -colorTo: blue -sdk: gradio -sdk_version: 3.14.0 -python_version: 3.9 -app_file: app.py -pinned: true ---- diff --git a/spaces/Jamkonams/AutoGPT/autogpt/json_utils/json_fix_llm.py b/spaces/Jamkonams/AutoGPT/autogpt/json_utils/json_fix_llm.py deleted file mode 100644 index 869aed125cfb8cd7a69ed02eeb389cc72a3e296b..0000000000000000000000000000000000000000 --- a/spaces/Jamkonams/AutoGPT/autogpt/json_utils/json_fix_llm.py +++ /dev/null @@ -1,220 +0,0 @@ -"""This module contains functions to fix JSON strings generated by LLM models, such as ChatGPT, using the assistance -of the ChatGPT API or LLM models.""" -from __future__ import annotations - -import contextlib -import json -from typing import Any, Dict - -from colorama import Fore -from regex import regex - -from autogpt.config import Config -from autogpt.json_utils.json_fix_general import correct_json -from autogpt.llm_utils import call_ai_function -from autogpt.logs import logger -from autogpt.speech import say_text - -JSON_SCHEMA = """ -{ - "command": { - "name": "command name", - "args": { - "arg name": "value" - } - }, - "thoughts": - { - "text": "thought", - "reasoning": "reasoning", - "plan": "- short bulleted\n- list that conveys\n- long-term plan", - "criticism": "constructive self-criticism", - "speak": "thoughts summary to say to user" - } -} -""" - -CFG = Config() - - -def auto_fix_json(json_string: str, schema: str) -> str: - """Fix the given JSON string to make it parseable and fully compliant with - the provided schema using GPT-3. - - Args: - json_string (str): The JSON string to fix. - schema (str): The schema to use to fix the JSON. - Returns: - str: The fixed JSON string. - """ - # Try to fix the JSON using GPT: - function_string = "def fix_json(json_string: str, schema:str=None) -> str:" - args = [f"'''{json_string}'''", f"'''{schema}'''"] - description_string = ( - "This function takes a JSON string and ensures that it" - " is parseable and fully compliant with the provided schema. If an object" - " or field specified in the schema isn't contained within the correct JSON," - " it is omitted. The function also escapes any double quotes within JSON" - " string values to ensure that they are valid. If the JSON string contains" - " any None or NaN values, they are replaced with null before being parsed." - ) - - # If it doesn't already start with a "`", add one: - if not json_string.startswith("`"): - json_string = "```json\n" + json_string + "\n```" - result_string = call_ai_function( - function_string, args, description_string, model=CFG.fast_llm_model - ) - logger.debug("------------ JSON FIX ATTEMPT ---------------") - logger.debug(f"Original JSON: {json_string}") - logger.debug("-----------") - logger.debug(f"Fixed JSON: {result_string}") - logger.debug("----------- END OF FIX ATTEMPT ----------------") - - try: - json.loads(result_string) # just check the validity - return result_string - except json.JSONDecodeError: # noqa: E722 - # Get the call stack: - # import traceback - # call_stack = traceback.format_exc() - # print(f"Failed to fix JSON: '{json_string}' "+call_stack) - return "failed" - - -def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]: - """Fix the given JSON string to make it parseable and fully compliant with two techniques. - - Args: - json_string (str): The JSON string to fix. - - Returns: - str: The fixed JSON string. - """ - - # Parse and print Assistant response - assistant_reply_json = fix_and_parse_json(assistant_reply) - if assistant_reply_json == {}: - assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets( - assistant_reply - ) - - if assistant_reply_json != {}: - return assistant_reply_json - - logger.error( - "Error: The following AI output couldn't be converted to a JSON:\n", - assistant_reply, - ) - if CFG.speak_mode: - say_text("I have received an invalid JSON response from the OpenAI API.") - - return {} - - -def fix_and_parse_json( - json_to_load: str, try_to_fix_with_gpt: bool = True -) -> Dict[Any, Any]: - """Fix and parse JSON string - - Args: - json_to_load (str): The JSON string. - try_to_fix_with_gpt (bool, optional): Try to fix the JSON with GPT. - Defaults to True. - - Returns: - str or dict[Any, Any]: The parsed JSON. - """ - - with contextlib.suppress(json.JSONDecodeError): - json_to_load = json_to_load.replace("\t", "") - return json.loads(json_to_load) - - with contextlib.suppress(json.JSONDecodeError): - json_to_load = correct_json(json_to_load) - return json.loads(json_to_load) - # Let's do something manually: - # sometimes GPT responds with something BEFORE the braces: - # "I'm sorry, I don't understand. Please try again." - # {"text": "I'm sorry, I don't understand. Please try again.", - # "confidence": 0.0} - # So let's try to find the first brace and then parse the rest - # of the string - try: - brace_index = json_to_load.index("{") - maybe_fixed_json = json_to_load[brace_index:] - last_brace_index = maybe_fixed_json.rindex("}") - maybe_fixed_json = maybe_fixed_json[: last_brace_index + 1] - return json.loads(maybe_fixed_json) - except (json.JSONDecodeError, ValueError) as e: - return try_ai_fix(try_to_fix_with_gpt, e, json_to_load) - - -def try_ai_fix( - try_to_fix_with_gpt: bool, exception: Exception, json_to_load: str -) -> Dict[Any, Any]: - """Try to fix the JSON with the AI - - Args: - try_to_fix_with_gpt (bool): Whether to try to fix the JSON with the AI. - exception (Exception): The exception that was raised. - json_to_load (str): The JSON string to load. - - Raises: - exception: If try_to_fix_with_gpt is False. - - Returns: - str or dict[Any, Any]: The JSON string or dictionary. - """ - if not try_to_fix_with_gpt: - raise exception - if CFG.debug_mode: - logger.warn( - "Warning: Failed to parse AI output, attempting to fix." - "\n If you see this warning frequently, it's likely that" - " your prompt is confusing the AI. Try changing it up" - " slightly." - ) - # Now try to fix this up using the ai_functions - ai_fixed_json = auto_fix_json(json_to_load, JSON_SCHEMA) - - if ai_fixed_json != "failed": - return json.loads(ai_fixed_json) - # This allows the AI to react to the error message, - # which usually results in it correcting its ways. - # logger.error("Failed to fix AI output, telling the AI.") - return {} - - -def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str): - if CFG.speak_mode and CFG.debug_mode: - say_text( - "I have received an invalid JSON response from the OpenAI API. " - "Trying to fix it now." - ) - logger.error("Attempting to fix JSON by finding outermost brackets\n") - - try: - json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}") - json_match = json_pattern.search(json_string) - - if json_match: - # Extract the valid JSON object from the string - json_string = json_match.group(0) - logger.typewriter_log( - title="Apparently json was fixed.", title_color=Fore.GREEN - ) - if CFG.speak_mode and CFG.debug_mode: - say_text("Apparently json was fixed.") - else: - return {} - - except (json.JSONDecodeError, ValueError): - if CFG.debug_mode: - logger.error(f"Error: Invalid JSON: {json_string}\n") - if CFG.speak_mode: - say_text("Didn't work. I will have to ignore this response then.") - logger.error("Error: Invalid JSON, setting it to empty JSON now.\n") - json_string = {} - - return fix_and_parse_json(json_string) diff --git a/spaces/JavierIA/gccopen/models/yolo.py b/spaces/JavierIA/gccopen/models/yolo.py deleted file mode 100644 index 95a019c6aeec8c3f1d582907d5fe7ff3ed6b9369..0000000000000000000000000000000000000000 --- a/spaces/JavierIA/gccopen/models/yolo.py +++ /dev/null @@ -1,843 +0,0 @@ -import argparse -import logging -import sys -from copy import deepcopy - -sys.path.append('./') # to run '$ python *.py' files in subdirectories -logger = logging.getLogger(__name__) -import torch -from models.common import * -from models.experimental import * -from utils.autoanchor import check_anchor_order -from utils.general import make_divisible, check_file, set_logging -from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ - select_device, copy_attr -from utils.loss import SigmoidBin - -try: - import thop # for FLOPS computation -except ImportError: - thop = None - - -class Detect(nn.Module): - stride = None # strides computed during build - export = False # onnx export - end2end = False - include_nms = False - concat = False - - def __init__(self, nc=80, anchors=(), ch=()): # detection layer - super(Detect, self).__init__() - self.nc = nc # number of classes - self.no = nc + 5 # number of outputs per anchor - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv - - def forward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](x[i]) # conv - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - y = x[i].sigmoid() - if not torch.onnx.is_in_onnx_export(): - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: - xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 - xy = xy * (2. * self.stride[i]) + (self.stride[i] * (self.grid[i] - 0.5)) # new xy - wh = wh ** 2 * (4 * self.anchor_grid[i].data) # new wh - y = torch.cat((xy, wh, conf), 4) - z.append(y.view(bs, -1, self.no)) - - if self.training: - out = x - elif self.end2end: - out = torch.cat(z, 1) - elif self.include_nms: - z = self.convert(z) - out = (z, ) - elif self.concat: - out = torch.cat(z, 1) - else: - out = (torch.cat(z, 1), x) - - return out - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - def convert(self, z): - z = torch.cat(z, 1) - box = z[:, :, :4] - conf = z[:, :, 4:5] - score = z[:, :, 5:] - score *= conf - convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], - dtype=torch.float32, - device=z.device) - box @= convert_matrix - return (box, score) - - -class IDetect(nn.Module): - stride = None # strides computed during build - export = False # onnx export - end2end = False - include_nms = False - concat = False - - def __init__(self, nc=80, anchors=(), ch=()): # detection layer - super(IDetect, self).__init__() - self.nc = nc # number of classes - self.no = nc + 5 # number of outputs per anchor - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv - - self.ia = nn.ModuleList(ImplicitA(x) for x in ch) - self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch) - - def forward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](self.ia[i](x[i])) # conv - x[i] = self.im[i](x[i]) - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - z.append(y.view(bs, -1, self.no)) - - return x if self.training else (torch.cat(z, 1), x) - - def fuseforward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](x[i]) # conv - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - if not torch.onnx.is_in_onnx_export(): - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: - xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 - xy = xy * (2. * self.stride[i]) + (self.stride[i] * (self.grid[i] - 0.5)) # new xy - wh = wh ** 2 * (4 * self.anchor_grid[i].data) # new wh - y = torch.cat((xy, wh, conf), 4) - z.append(y.view(bs, -1, self.no)) - - if self.training: - out = x - elif self.end2end: - out = torch.cat(z, 1) - elif self.include_nms: - z = self.convert(z) - out = (z, ) - elif self.concat: - out = torch.cat(z, 1) - else: - out = (torch.cat(z, 1), x) - - return out - - def fuse(self): - print("IDetect.fuse") - # fuse ImplicitA and Convolution - for i in range(len(self.m)): - c1,c2,_,_ = self.m[i].weight.shape - c1_,c2_, _,_ = self.ia[i].implicit.shape - self.m[i].bias += torch.matmul(self.m[i].weight.reshape(c1,c2),self.ia[i].implicit.reshape(c2_,c1_)).squeeze(1) - - # fuse ImplicitM and Convolution - for i in range(len(self.m)): - c1,c2, _,_ = self.im[i].implicit.shape - self.m[i].bias *= self.im[i].implicit.reshape(c2) - self.m[i].weight *= self.im[i].implicit.transpose(0,1) - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - def convert(self, z): - z = torch.cat(z, 1) - box = z[:, :, :4] - conf = z[:, :, 4:5] - score = z[:, :, 5:] - score *= conf - convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], - dtype=torch.float32, - device=z.device) - box @= convert_matrix - return (box, score) - - -class IKeypoint(nn.Module): - stride = None # strides computed during build - export = False # onnx export - - def __init__(self, nc=80, anchors=(), nkpt=17, ch=(), inplace=True, dw_conv_kpt=False): # detection layer - super(IKeypoint, self).__init__() - self.nc = nc # number of classes - self.nkpt = nkpt - self.dw_conv_kpt = dw_conv_kpt - self.no_det=(nc + 5) # number of outputs per anchor for box and class - self.no_kpt = 3*self.nkpt ## number of outputs per anchor for keypoints - self.no = self.no_det+self.no_kpt - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - self.flip_test = False - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no_det * self.na, 1) for x in ch) # output conv - - self.ia = nn.ModuleList(ImplicitA(x) for x in ch) - self.im = nn.ModuleList(ImplicitM(self.no_det * self.na) for _ in ch) - - if self.nkpt is not None: - if self.dw_conv_kpt: #keypoint head is slightly more complex - self.m_kpt = nn.ModuleList( - nn.Sequential(DWConv(x, x, k=3), Conv(x,x), - DWConv(x, x, k=3), Conv(x, x), - DWConv(x, x, k=3), Conv(x,x), - DWConv(x, x, k=3), Conv(x, x), - DWConv(x, x, k=3), Conv(x, x), - DWConv(x, x, k=3), nn.Conv2d(x, self.no_kpt * self.na, 1)) for x in ch) - else: #keypoint head is a single convolution - self.m_kpt = nn.ModuleList(nn.Conv2d(x, self.no_kpt * self.na, 1) for x in ch) - - self.inplace = inplace # use in-place ops (e.g. slice assignment) - - def forward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - if self.nkpt is None or self.nkpt==0: - x[i] = self.im[i](self.m[i](self.ia[i](x[i]))) # conv - else : - x[i] = torch.cat((self.im[i](self.m[i](self.ia[i](x[i]))), self.m_kpt[i](x[i])), axis=1) - - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - x_det = x[i][..., :6] - x_kpt = x[i][..., 6:] - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - kpt_grid_x = self.grid[i][..., 0:1] - kpt_grid_y = self.grid[i][..., 1:2] - - if self.nkpt == 0: - y = x[i].sigmoid() - else: - y = x_det.sigmoid() - - if self.inplace: - xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].view(1, self.na, 1, 1, 2) # wh - if self.nkpt != 0: - x_kpt[..., 0::3] = (x_kpt[..., ::3] * 2. - 0.5 + kpt_grid_x.repeat(1,1,1,1,17)) * self.stride[i] # xy - x_kpt[..., 1::3] = (x_kpt[..., 1::3] * 2. - 0.5 + kpt_grid_y.repeat(1,1,1,1,17)) * self.stride[i] # xy - #x_kpt[..., 0::3] = (x_kpt[..., ::3] + kpt_grid_x.repeat(1,1,1,1,17)) * self.stride[i] # xy - #x_kpt[..., 1::3] = (x_kpt[..., 1::3] + kpt_grid_y.repeat(1,1,1,1,17)) * self.stride[i] # xy - #print('=============') - #print(self.anchor_grid[i].shape) - #print(self.anchor_grid[i][...,0].unsqueeze(4).shape) - #print(x_kpt[..., 0::3].shape) - #x_kpt[..., 0::3] = ((x_kpt[..., 0::3].tanh() * 2.) ** 3 * self.anchor_grid[i][...,0].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_x.repeat(1,1,1,1,17) * self.stride[i] # xy - #x_kpt[..., 1::3] = ((x_kpt[..., 1::3].tanh() * 2.) ** 3 * self.anchor_grid[i][...,1].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_y.repeat(1,1,1,1,17) * self.stride[i] # xy - #x_kpt[..., 0::3] = (((x_kpt[..., 0::3].sigmoid() * 4.) ** 2 - 8.) * self.anchor_grid[i][...,0].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_x.repeat(1,1,1,1,17) * self.stride[i] # xy - #x_kpt[..., 1::3] = (((x_kpt[..., 1::3].sigmoid() * 4.) ** 2 - 8.) * self.anchor_grid[i][...,1].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_y.repeat(1,1,1,1,17) * self.stride[i] # xy - x_kpt[..., 2::3] = x_kpt[..., 2::3].sigmoid() - - y = torch.cat((xy, wh, y[..., 4:], x_kpt), dim = -1) - - else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 - xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - if self.nkpt != 0: - y[..., 6:] = (y[..., 6:] * 2. - 0.5 + self.grid[i].repeat((1,1,1,1,self.nkpt))) * self.stride[i] # xy - y = torch.cat((xy, wh, y[..., 4:]), -1) - - z.append(y.view(bs, -1, self.no)) - - return x if self.training else (torch.cat(z, 1), x) - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - -class IAuxDetect(nn.Module): - stride = None # strides computed during build - export = False # onnx export - end2end = False - include_nms = False - concat = False - - def __init__(self, nc=80, anchors=(), ch=()): # detection layer - super(IAuxDetect, self).__init__() - self.nc = nc # number of classes - self.no = nc + 5 # number of outputs per anchor - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch[:self.nl]) # output conv - self.m2 = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch[self.nl:]) # output conv - - self.ia = nn.ModuleList(ImplicitA(x) for x in ch[:self.nl]) - self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch[:self.nl]) - - def forward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](self.ia[i](x[i])) # conv - x[i] = self.im[i](x[i]) - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - x[i+self.nl] = self.m2[i](x[i+self.nl]) - x[i+self.nl] = x[i+self.nl].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - if not torch.onnx.is_in_onnx_export(): - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: - xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 - xy = xy * (2. * self.stride[i]) + (self.stride[i] * (self.grid[i] - 0.5)) # new xy - wh = wh ** 2 * (4 * self.anchor_grid[i].data) # new wh - y = torch.cat((xy, wh, conf), 4) - z.append(y.view(bs, -1, self.no)) - - return x if self.training else (torch.cat(z, 1), x[:self.nl]) - - def fuseforward(self, x): - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](x[i]) # conv - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - if not torch.onnx.is_in_onnx_export(): - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: - xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].data # wh - y = torch.cat((xy, wh, y[..., 4:]), -1) - z.append(y.view(bs, -1, self.no)) - - if self.training: - out = x - elif self.end2end: - out = torch.cat(z, 1) - elif self.include_nms: - z = self.convert(z) - out = (z, ) - elif self.concat: - out = torch.cat(z, 1) - else: - out = (torch.cat(z, 1), x) - - return out - - def fuse(self): - print("IAuxDetect.fuse") - # fuse ImplicitA and Convolution - for i in range(len(self.m)): - c1,c2,_,_ = self.m[i].weight.shape - c1_,c2_, _,_ = self.ia[i].implicit.shape - self.m[i].bias += torch.matmul(self.m[i].weight.reshape(c1,c2),self.ia[i].implicit.reshape(c2_,c1_)).squeeze(1) - - # fuse ImplicitM and Convolution - for i in range(len(self.m)): - c1,c2, _,_ = self.im[i].implicit.shape - self.m[i].bias *= self.im[i].implicit.reshape(c2) - self.m[i].weight *= self.im[i].implicit.transpose(0,1) - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - def convert(self, z): - z = torch.cat(z, 1) - box = z[:, :, :4] - conf = z[:, :, 4:5] - score = z[:, :, 5:] - score *= conf - convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], - dtype=torch.float32, - device=z.device) - box @= convert_matrix - return (box, score) - - -class IBin(nn.Module): - stride = None # strides computed during build - export = False # onnx export - - def __init__(self, nc=80, anchors=(), ch=(), bin_count=21): # detection layer - super(IBin, self).__init__() - self.nc = nc # number of classes - self.bin_count = bin_count - - self.w_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0) - self.h_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0) - # classes, x,y,obj - self.no = nc + 3 + \ - self.w_bin_sigmoid.get_length() + self.h_bin_sigmoid.get_length() # w-bce, h-bce - # + self.x_bin_sigmoid.get_length() + self.y_bin_sigmoid.get_length() - - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv - - self.ia = nn.ModuleList(ImplicitA(x) for x in ch) - self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch) - - def forward(self, x): - - #self.x_bin_sigmoid.use_fw_regression = True - #self.y_bin_sigmoid.use_fw_regression = True - self.w_bin_sigmoid.use_fw_regression = True - self.h_bin_sigmoid.use_fw_regression = True - - # x = x.copy() # for profiling - z = [] # inference output - self.training |= self.export - for i in range(self.nl): - x[i] = self.m[i](self.ia[i](x[i])) # conv - x[i] = self.im[i](x[i]) - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = x[i].sigmoid() - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - #y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - - - #px = (self.x_bin_sigmoid.forward(y[..., 0:12]) + self.grid[i][..., 0]) * self.stride[i] - #py = (self.y_bin_sigmoid.forward(y[..., 12:24]) + self.grid[i][..., 1]) * self.stride[i] - - pw = self.w_bin_sigmoid.forward(y[..., 2:24]) * self.anchor_grid[i][..., 0] - ph = self.h_bin_sigmoid.forward(y[..., 24:46]) * self.anchor_grid[i][..., 1] - - #y[..., 0] = px - #y[..., 1] = py - y[..., 2] = pw - y[..., 3] = ph - - y = torch.cat((y[..., 0:4], y[..., 46:]), dim=-1) - - z.append(y.view(bs, -1, y.shape[-1])) - - return x if self.training else (torch.cat(z, 1), x) - - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - -class Model(nn.Module): - def __init__(self, cfg='yolor-csp-c.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes - super(Model, self).__init__() - self.traced = False - if isinstance(cfg, dict): - self.yaml = cfg # model dict - else: # is *.yaml - import yaml # for torch hub - self.yaml_file = Path(cfg).name - with open(cfg) as f: - self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict - - # Define model - ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels - if nc and nc != self.yaml['nc']: - logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") - self.yaml['nc'] = nc # override yaml value - if anchors: - logger.info(f'Overriding model.yaml anchors with anchors={anchors}') - self.yaml['anchors'] = round(anchors) # override yaml value - self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist - self.names = [str(i) for i in range(self.yaml['nc'])] # default names - # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) - - # Build strides, anchors - m = self.model[-1] # Detect() - if isinstance(m, Detect): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_biases() # only run once - # print('Strides: %s' % m.stride.tolist()) - if isinstance(m, IDetect): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_biases() # only run once - # print('Strides: %s' % m.stride.tolist()) - if isinstance(m, IAuxDetect): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))[:4]]) # forward - #print(m.stride) - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_aux_biases() # only run once - # print('Strides: %s' % m.stride.tolist()) - if isinstance(m, IBin): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_biases_bin() # only run once - # print('Strides: %s' % m.stride.tolist()) - if isinstance(m, IKeypoint): - s = 256 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_biases_kpt() # only run once - # print('Strides: %s' % m.stride.tolist()) - - # Init weights, biases - initialize_weights(self) - self.info() - logger.info('') - - def forward(self, x, augment=False, profile=False): - if augment: - img_size = x.shape[-2:] # height, width - s = [1, 0.83, 0.67] # scales - f = [None, 3, None] # flips (2-ud, 3-lr) - y = [] # outputs - for si, fi in zip(s, f): - xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) - yi = self.forward_once(xi)[0] # forward - # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save - yi[..., :4] /= si # de-scale - if fi == 2: - yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud - elif fi == 3: - yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr - y.append(yi) - return torch.cat(y, 1), None # augmented inference, train - else: - return self.forward_once(x, profile) # single-scale inference, train - - def forward_once(self, x, profile=False): - y, dt = [], [] # outputs - for m in self.model: - if m.f != -1: # if not from previous layer - x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers - - if not hasattr(self, 'traced'): - self.traced=False - - if self.traced: - if isinstance(m, Detect) or isinstance(m, IDetect) or isinstance(m, IAuxDetect) or isinstance(m, IKeypoint): - break - - if profile: - c = isinstance(m, (Detect, IDetect, IAuxDetect, IBin)) - o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS - for _ in range(10): - m(x.copy() if c else x) - t = time_synchronized() - for _ in range(10): - m(x.copy() if c else x) - dt.append((time_synchronized() - t) * 100) - print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) - - x = m(x) # run - - y.append(x if m.i in self.save else None) # save output - - if profile: - print('%.1fms total' % sum(dt)) - return x - - def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. - m = self.model[-1] # Detect() module - for mi, s in zip(m.m, m.stride): # from - b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) - b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - - def _initialize_aux_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. - m = self.model[-1] # Detect() module - for mi, mi2, s in zip(m.m, m.m2, m.stride): # from - b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) - b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - b2 = mi2.bias.view(m.na, -1) # conv.bias(255) to (3,85) - b2.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b2.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - mi2.bias = torch.nn.Parameter(b2.view(-1), requires_grad=True) - - def _initialize_biases_bin(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. - m = self.model[-1] # Bin() module - bc = m.bin_count - for mi, s in zip(m.m, m.stride): # from - b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) - old = b[:, (0,1,2,bc+3)].data - obj_idx = 2*bc+4 - b[:, :obj_idx].data += math.log(0.6 / (bc + 1 - 0.99)) - b[:, obj_idx].data += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b[:, (obj_idx+1):].data += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - b[:, (0,1,2,bc+3)].data = old - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - - def _initialize_biases_kpt(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. - m = self.model[-1] # Detect() module - for mi, s in zip(m.m, m.stride): # from - b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) - b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - - def _print_biases(self): - m = self.model[-1] # Detect() module - for mi in m.m: # from - b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) - print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) - - # def _print_weights(self): - # for m in self.model.modules(): - # if type(m) is Bottleneck: - # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights - - def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers - print('Fusing layers... ') - for m in self.model.modules(): - if isinstance(m, RepConv): - #print(f" fuse_repvgg_block") - m.fuse_repvgg_block() - elif isinstance(m, RepConv_OREPA): - #print(f" switch_to_deploy") - m.switch_to_deploy() - elif type(m) is Conv and hasattr(m, 'bn'): - m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv - delattr(m, 'bn') # remove batchnorm - m.forward = m.fuseforward # update forward - elif isinstance(m, (IDetect, IAuxDetect)): - m.fuse() - m.forward = m.fuseforward - self.info() - return self - - def nms(self, mode=True): # add or remove NMS module - present = type(self.model[-1]) is NMS # last layer is NMS - if mode and not present: - print('Adding NMS... ') - m = NMS() # module - m.f = -1 # from - m.i = self.model[-1].i + 1 # index - self.model.add_module(name='%s' % m.i, module=m) # add - self.eval() - elif not mode and present: - print('Removing NMS... ') - self.model = self.model[:-1] # remove - return self - - def autoshape(self): # add autoShape module - print('Adding autoShape... ') - m = autoShape(self) # wrap model - copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes - return m - - def info(self, verbose=False, img_size=640): # print model information - model_info(self, verbose, img_size) - - -def parse_model(d, ch): # model_dict, input_channels(3) - logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) - anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] - na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors - no = na * (nc + 5) # number of outputs = anchors * (classes + 5) - - layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out - for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args - m = eval(m) if isinstance(m, str) else m # eval strings - for j, a in enumerate(args): - try: - args[j] = eval(a) if isinstance(a, str) else a # eval strings - except: - pass - - n = max(round(n * gd), 1) if n > 1 else n # depth gain - if m in [nn.Conv2d, Conv, RobustConv, RobustConv2, DWConv, GhostConv, RepConv, RepConv_OREPA, DownC, - SPP, SPPF, SPPCSPC, GhostSPPCSPC, MixConv2d, Focus, Stem, GhostStem, CrossConv, - Bottleneck, BottleneckCSPA, BottleneckCSPB, BottleneckCSPC, - RepBottleneck, RepBottleneckCSPA, RepBottleneckCSPB, RepBottleneckCSPC, - Res, ResCSPA, ResCSPB, ResCSPC, - RepRes, RepResCSPA, RepResCSPB, RepResCSPC, - ResX, ResXCSPA, ResXCSPB, ResXCSPC, - RepResX, RepResXCSPA, RepResXCSPB, RepResXCSPC, - Ghost, GhostCSPA, GhostCSPB, GhostCSPC, - SwinTransformerBlock, STCSPA, STCSPB, STCSPC, - SwinTransformer2Block, ST2CSPA, ST2CSPB, ST2CSPC]: - c1, c2 = ch[f], args[0] - if c2 != no: # if not output - c2 = make_divisible(c2 * gw, 8) - - args = [c1, c2, *args[1:]] - if m in [DownC, SPPCSPC, GhostSPPCSPC, - BottleneckCSPA, BottleneckCSPB, BottleneckCSPC, - RepBottleneckCSPA, RepBottleneckCSPB, RepBottleneckCSPC, - ResCSPA, ResCSPB, ResCSPC, - RepResCSPA, RepResCSPB, RepResCSPC, - ResXCSPA, ResXCSPB, ResXCSPC, - RepResXCSPA, RepResXCSPB, RepResXCSPC, - GhostCSPA, GhostCSPB, GhostCSPC, - STCSPA, STCSPB, STCSPC, - ST2CSPA, ST2CSPB, ST2CSPC]: - args.insert(2, n) # number of repeats - n = 1 - elif m is nn.BatchNorm2d: - args = [ch[f]] - elif m is Concat: - c2 = sum([ch[x] for x in f]) - elif m is Chuncat: - c2 = sum([ch[x] for x in f]) - elif m is Shortcut: - c2 = ch[f[0]] - elif m is Foldcut: - c2 = ch[f] // 2 - elif m in [Detect, IDetect, IAuxDetect, IBin, IKeypoint]: - args.append([ch[x] for x in f]) - if isinstance(args[1], int): # number of anchors - args[1] = [list(range(args[1] * 2))] * len(f) - elif m is ReOrg: - c2 = ch[f] * 4 - elif m is Contract: - c2 = ch[f] * args[0] ** 2 - elif m is Expand: - c2 = ch[f] // args[0] ** 2 - else: - c2 = ch[f] - - m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module - t = str(m)[8:-2].replace('__main__.', '') # module type - np = sum([x.numel() for x in m_.parameters()]) # number params - m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print - save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist - layers.append(m_) - if i == 0: - ch = [] - ch.append(c2) - return nn.Sequential(*layers), sorted(save) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--cfg', type=str, default='yolor-csp-c.yaml', help='model.yaml') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--profile', action='store_true', help='profile model speed') - opt = parser.parse_args() - opt.cfg = check_file(opt.cfg) # check file - set_logging() - device = select_device(opt.device) - - # Create model - model = Model(opt.cfg).to(device) - model.train() - - if opt.profile: - img = torch.rand(1, 3, 640, 640).to(device) - y = model(img, profile=True) - - # Profile - # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) - # y = model(img, profile=True) - - # Tensorboard - # from torch.utils.tensorboard import SummaryWriter - # tb_writer = SummaryWriter() - # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/") - # tb_writer.add_graph(model.model, img) # add model to tensorboard - # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard diff --git a/spaces/JavierIA/gccopen/utils/general.py b/spaces/JavierIA/gccopen/utils/general.py deleted file mode 100644 index faf908f960bfbb7797260a5135827019781001a1..0000000000000000000000000000000000000000 --- a/spaces/JavierIA/gccopen/utils/general.py +++ /dev/null @@ -1,891 +0,0 @@ -# YOLOR general utils - -import glob -import logging -import math -import os -import platform -import random -import re -import subprocess -import time -from pathlib import Path - -import cv2 -import numpy as np -import pandas as pd -import torch -import torchvision -import yaml - -from utils.google_utils import gsutil_getsize -from utils.metrics import fitness -from utils.torch_utils import init_torch_seeds - -# Settings -torch.set_printoptions(linewidth=320, precision=5, profile='long') -np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 -pd.options.display.max_columns = 10 -cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) -os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads - - -def set_logging(rank=-1): - logging.basicConfig( - format="%(message)s", - level=logging.INFO if rank in [-1, 0] else logging.WARN) - - -def init_seeds(seed=0): - # Initialize random number generator (RNG) seeds - random.seed(seed) - np.random.seed(seed) - init_torch_seeds(seed) - - -def get_latest_run(search_dir='.'): - # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) - last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) - return max(last_list, key=os.path.getctime) if last_list else '' - - -def isdocker(): - # Is environment a Docker container - return Path('/workspace').exists() # or Path('/.dockerenv').exists() - - -def emojis(str=''): - # Return platform-dependent emoji-safe version of string - return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str - - -def check_online(): - # Check internet connectivity - import socket - try: - socket.create_connection(("1.1.1.1", 443), 5) # check host accesability - return True - except OSError: - return False - - -def check_git_status(): - # Recommend 'git pull' if code is out of date - print(colorstr('github: '), end='') - try: - assert Path('.git').exists(), 'skipping check (not a git repository)' - assert not isdocker(), 'skipping check (Docker image)' - assert check_online(), 'skipping check (offline)' - - cmd = 'git fetch && git config --get remote.origin.url' - url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url - branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out - n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind - if n > 0: - s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ - f"Use 'git pull' to update or 'git clone {url}' to download latest." - else: - s = f'up to date with {url} ✅' - print(emojis(s)) # emoji-safe - except Exception as e: - print(e) - - -def check_requirements(requirements='requirements.txt', exclude=()): - # Check installed dependencies meet requirements (pass *.txt file or list of packages) - import pkg_resources as pkg - prefix = colorstr('red', 'bold', 'requirements:') - if isinstance(requirements, (str, Path)): # requirements.txt file - file = Path(requirements) - if not file.exists(): - print(f"{prefix} {file.resolve()} not found, check failed.") - return - requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] - else: # list or tuple of packages - requirements = [x for x in requirements if x not in exclude] - - n = 0 # number of packages updates - for r in requirements: - try: - pkg.require(r) - except Exception as e: # DistributionNotFound or VersionConflict if requirements not met - n += 1 - print(f"{prefix} {e.req} not found and is required by YOLOR, attempting auto-update...") - print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()) - - if n: # if packages updated - source = file.resolve() if 'file' in locals() else requirements - s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ - f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" - print(emojis(s)) # emoji-safe - - -def check_img_size(img_size, s=32): - # Verify img_size is a multiple of stride s - new_size = make_divisible(img_size, int(s)) # ceil gs-multiple - if new_size != img_size: - print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size)) - return new_size - - -def check_imshow(): - # Check if environment supports image displays - try: - assert not isdocker(), 'cv2.imshow() is disabled in Docker environments' - cv2.imshow('test', np.zeros((1, 1, 3))) - cv2.waitKey(1) - cv2.destroyAllWindows() - cv2.waitKey(1) - return True - except Exception as e: - print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') - return False - - -def check_file(file): - # Search for file if not found - if Path(file).is_file() or file == '': - return file - else: - files = glob.glob('./**/' + file, recursive=True) # find file - assert len(files), f'File Not Found: {file}' # assert file was found - assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique - return files[0] # return file - - -def check_dataset(dict): - # Download dataset if not found locally - val, s = dict.get('val'), dict.get('download') - if val and len(val): - val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path - if not all(x.exists() for x in val): - print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) - if s and len(s): # download script - print('Downloading %s ...' % s) - if s.startswith('http') and s.endswith('.zip'): # URL - f = Path(s).name # filename - torch.hub.download_url_to_file(s, f) - r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip - else: # bash script - r = os.system(s) - print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value - else: - raise Exception('Dataset not found.') - - -def make_divisible(x, divisor): - # Returns x evenly divisible by divisor - return math.ceil(x / divisor) * divisor - - -def clean_str(s): - # Cleans a string by replacing special characters with underscore _ - return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) - - -def one_cycle(y1=0.0, y2=1.0, steps=100): - # lambda function for sinusoidal ramp from y1 to y2 - return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 - - -def colorstr(*input): - # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') - *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string - colors = {'black': '\033[30m', # basic colors - 'red': '\033[31m', - 'green': '\033[32m', - 'yellow': '\033[33m', - 'blue': '\033[34m', - 'magenta': '\033[35m', - 'cyan': '\033[36m', - 'white': '\033[37m', - 'bright_black': '\033[90m', # bright colors - 'bright_red': '\033[91m', - 'bright_green': '\033[92m', - 'bright_yellow': '\033[93m', - 'bright_blue': '\033[94m', - 'bright_magenta': '\033[95m', - 'bright_cyan': '\033[96m', - 'bright_white': '\033[97m', - 'end': '\033[0m', # misc - 'bold': '\033[1m', - 'underline': '\033[4m'} - return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] - - -def labels_to_class_weights(labels, nc=80): - # Get class weights (inverse frequency) from training labels - if labels[0] is None: # no labels loaded - return torch.Tensor() - - labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO - classes = labels[:, 0].astype(np.int) # labels = [class xywh] - weights = np.bincount(classes, minlength=nc) # occurrences per class - - # Prepend gridpoint count (for uCE training) - # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image - # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start - - weights[weights == 0] = 1 # replace empty bins with 1 - weights = 1 / weights # number of targets per class - weights /= weights.sum() # normalize - return torch.from_numpy(weights) - - -def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): - # Produces image weights based on class_weights and image contents - class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels]) - image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) - # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample - return image_weights - - -def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) - # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ - # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') - # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') - # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco - # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet - x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] - return x - - -def xyxy2xywh(x): - # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center - y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center - y[:, 2] = x[:, 2] - x[:, 0] # width - y[:, 3] = x[:, 3] - x[:, 1] # height - return y - - -def xywh2xyxy(x): - # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x - y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y - y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x - y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y - return y - - -def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): - # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x - y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y - y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x - y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y - return y - - -def xyn2xy(x, w=640, h=640, padw=0, padh=0): - # Convert normalized segments into pixel segments, shape (n,2) - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = w * x[:, 0] + padw # top left x - y[:, 1] = h * x[:, 1] + padh # top left y - return y - - -def segment2box(segment, width=640, height=640): - # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) - x, y = segment.T # segment xy - inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) - x, y, = x[inside], y[inside] - return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy - - -def segments2boxes(segments): - # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) - boxes = [] - for s in segments: - x, y = s.T # segment xy - boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy - return xyxy2xywh(np.array(boxes)) # cls, xywh - - -def resample_segments(segments, n=1000): - # Up-sample an (n,2) segment - for i, s in enumerate(segments): - x = np.linspace(0, len(s) - 1, n) - xp = np.arange(len(s)) - segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy - return segments - - -def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): - # Rescale coords (xyxy) from img1_shape to img0_shape - if ratio_pad is None: # calculate from img0_shape - gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new - pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding - else: - gain = ratio_pad[0][0] - pad = ratio_pad[1] - - coords[:, [0, 2]] -= pad[0] # x padding - coords[:, [1, 3]] -= pad[1] # y padding - coords[:, :4] /= gain - clip_coords(coords, img0_shape) - return coords - - -def clip_coords(boxes, img_shape): - # Clip bounding xyxy bounding boxes to image shape (height, width) - boxes[:, 0].clamp_(0, img_shape[1]) # x1 - boxes[:, 1].clamp_(0, img_shape[0]) # y1 - boxes[:, 2].clamp_(0, img_shape[1]) # x2 - boxes[:, 3].clamp_(0, img_shape[0]) # y2 - - -def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): - # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 - box2 = box2.T - - # Get the coordinates of bounding boxes - if x1y1x2y2: # x1, y1, x2, y2 = box1 - b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] - b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] - else: # transform from xywh to xyxy - b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 - b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 - b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 - b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 - - # Intersection area - inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ - (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) - - # Union Area - w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps - w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps - union = w1 * h1 + w2 * h2 - inter + eps - - iou = inter / union - - if GIoU or DIoU or CIoU: - cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width - ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height - if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 - c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared - rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + - (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared - if DIoU: - return iou - rho2 / c2 # DIoU - elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / (h2 + eps)) - torch.atan(w1 / (h1 + eps)), 2) - with torch.no_grad(): - alpha = v / (v - iou + (1 + eps)) - return iou - (rho2 / c2 + v * alpha) # CIoU - else: # GIoU https://arxiv.org/pdf/1902.09630.pdf - c_area = cw * ch + eps # convex area - return iou - (c_area - union) / c_area # GIoU - else: - return iou # IoU - - - - -def bbox_alpha_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=False, alpha=2, eps=1e-9): - # Returns tsqrt_he IoU of box1 to box2. box1 is 4, box2 is nx4 - box2 = box2.T - - # Get the coordinates of bounding boxes - if x1y1x2y2: # x1, y1, x2, y2 = box1 - b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] - b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] - else: # transform from xywh to xyxy - b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 - b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 - b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 - b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 - - # Intersection area - inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ - (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) - - # Union Area - w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps - w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps - union = w1 * h1 + w2 * h2 - inter + eps - - # change iou into pow(iou+eps) - # iou = inter / union - iou = torch.pow(inter/union + eps, alpha) - # beta = 2 * alpha - if GIoU or DIoU or CIoU: - cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width - ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height - if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 - c2 = (cw ** 2 + ch ** 2) ** alpha + eps # convex diagonal - rho_x = torch.abs(b2_x1 + b2_x2 - b1_x1 - b1_x2) - rho_y = torch.abs(b2_y1 + b2_y2 - b1_y1 - b1_y2) - rho2 = ((rho_x ** 2 + rho_y ** 2) / 4) ** alpha # center distance - if DIoU: - return iou - rho2 / c2 # DIoU - elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) - with torch.no_grad(): - alpha_ciou = v / ((1 + eps) - inter / union + v) - # return iou - (rho2 / c2 + v * alpha_ciou) # CIoU - return iou - (rho2 / c2 + torch.pow(v * alpha_ciou + eps, alpha)) # CIoU - else: # GIoU https://arxiv.org/pdf/1902.09630.pdf - # c_area = cw * ch + eps # convex area - # return iou - (c_area - union) / c_area # GIoU - c_area = torch.max(cw * ch + eps, union) # convex area - return iou - torch.pow((c_area - union) / c_area + eps, alpha) # GIoU - else: - return iou # torch.log(iou+eps) or iou - - -def box_iou(box1, box2): - # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py - """ - Return intersection-over-union (Jaccard index) of boxes. - Both sets of boxes are expected to be in (x1, y1, x2, y2) format. - Arguments: - box1 (Tensor[N, 4]) - box2 (Tensor[M, 4]) - Returns: - iou (Tensor[N, M]): the NxM matrix containing the pairwise - IoU values for every element in boxes1 and boxes2 - """ - - def box_area(box): - # box = 4xn - return (box[2] - box[0]) * (box[3] - box[1]) - - area1 = box_area(box1.T) - area2 = box_area(box2.T) - - # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) - inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) - return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) - - -def wh_iou(wh1, wh2): - # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 - wh1 = wh1[:, None] # [N,1,2] - wh2 = wh2[None] # [1,M,2] - inter = torch.min(wh1, wh2).prod(2) # [N,M] - return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) - - -def box_giou(box1, box2): - """ - Return generalized intersection-over-union (Jaccard index) between two sets of boxes. - Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with - ``0 <= x1 < x2`` and ``0 <= y1 < y2``. - Args: - boxes1 (Tensor[N, 4]): first set of boxes - boxes2 (Tensor[M, 4]): second set of boxes - Returns: - Tensor[N, M]: the NxM matrix containing the pairwise generalized IoU values - for every element in boxes1 and boxes2 - """ - - def box_area(box): - # box = 4xn - return (box[2] - box[0]) * (box[3] - box[1]) - - area1 = box_area(box1.T) - area2 = box_area(box2.T) - - inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) - union = (area1[:, None] + area2 - inter) - - iou = inter / union - - lti = torch.min(box1[:, None, :2], box2[:, :2]) - rbi = torch.max(box1[:, None, 2:], box2[:, 2:]) - - whi = (rbi - lti).clamp(min=0) # [N,M,2] - areai = whi[:, :, 0] * whi[:, :, 1] - - return iou - (areai - union) / areai - - -def box_ciou(box1, box2, eps: float = 1e-7): - """ - Return complete intersection-over-union (Jaccard index) between two sets of boxes. - Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with - ``0 <= x1 < x2`` and ``0 <= y1 < y2``. - Args: - boxes1 (Tensor[N, 4]): first set of boxes - boxes2 (Tensor[M, 4]): second set of boxes - eps (float, optional): small number to prevent division by zero. Default: 1e-7 - Returns: - Tensor[N, M]: the NxM matrix containing the pairwise complete IoU values - for every element in boxes1 and boxes2 - """ - - def box_area(box): - # box = 4xn - return (box[2] - box[0]) * (box[3] - box[1]) - - area1 = box_area(box1.T) - area2 = box_area(box2.T) - - inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) - union = (area1[:, None] + area2 - inter) - - iou = inter / union - - lti = torch.min(box1[:, None, :2], box2[:, :2]) - rbi = torch.max(box1[:, None, 2:], box2[:, 2:]) - - whi = (rbi - lti).clamp(min=0) # [N,M,2] - diagonal_distance_squared = (whi[:, :, 0] ** 2) + (whi[:, :, 1] ** 2) + eps - - # centers of boxes - x_p = (box1[:, None, 0] + box1[:, None, 2]) / 2 - y_p = (box1[:, None, 1] + box1[:, None, 3]) / 2 - x_g = (box2[:, 0] + box2[:, 2]) / 2 - y_g = (box2[:, 1] + box2[:, 3]) / 2 - # The distance between boxes' centers squared. - centers_distance_squared = (x_p - x_g) ** 2 + (y_p - y_g) ** 2 - - w_pred = box1[:, None, 2] - box1[:, None, 0] - h_pred = box1[:, None, 3] - box1[:, None, 1] - - w_gt = box2[:, 2] - box2[:, 0] - h_gt = box2[:, 3] - box2[:, 1] - - v = (4 / (torch.pi ** 2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2) - with torch.no_grad(): - alpha = v / (1 - iou + v + eps) - return iou - (centers_distance_squared / diagonal_distance_squared) - alpha * v - - -def box_diou(box1, box2, eps: float = 1e-7): - """ - Return distance intersection-over-union (Jaccard index) between two sets of boxes. - Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with - ``0 <= x1 < x2`` and ``0 <= y1 < y2``. - Args: - boxes1 (Tensor[N, 4]): first set of boxes - boxes2 (Tensor[M, 4]): second set of boxes - eps (float, optional): small number to prevent division by zero. Default: 1e-7 - Returns: - Tensor[N, M]: the NxM matrix containing the pairwise distance IoU values - for every element in boxes1 and boxes2 - """ - - def box_area(box): - # box = 4xn - return (box[2] - box[0]) * (box[3] - box[1]) - - area1 = box_area(box1.T) - area2 = box_area(box2.T) - - inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) - union = (area1[:, None] + area2 - inter) - - iou = inter / union - - lti = torch.min(box1[:, None, :2], box2[:, :2]) - rbi = torch.max(box1[:, None, 2:], box2[:, 2:]) - - whi = (rbi - lti).clamp(min=0) # [N,M,2] - diagonal_distance_squared = (whi[:, :, 0] ** 2) + (whi[:, :, 1] ** 2) + eps - - # centers of boxes - x_p = (box1[:, None, 0] + box1[:, None, 2]) / 2 - y_p = (box1[:, None, 1] + box1[:, None, 3]) / 2 - x_g = (box2[:, 0] + box2[:, 2]) / 2 - y_g = (box2[:, 1] + box2[:, 3]) / 2 - # The distance between boxes' centers squared. - centers_distance_squared = (x_p - x_g) ** 2 + (y_p - y_g) ** 2 - - # The distance IoU is the IoU penalized by a normalized - # distance between boxes' centers squared. - return iou - (centers_distance_squared / diagonal_distance_squared) - - -def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, - labels=()): - """Runs Non-Maximum Suppression (NMS) on inference results - - Returns: - list of detections, on (n,6) tensor per image [xyxy, conf, cls] - """ - - nc = prediction.shape[2] - 5 # number of classes - xc = prediction[..., 4] > conf_thres # candidates - - # Settings - min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height - max_det = 300 # maximum number of detections per image - max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() - time_limit = 10.0 # seconds to quit after - redundant = True # require redundant detections - multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) - merge = False # use merge-NMS - - t = time.time() - output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] - for xi, x in enumerate(prediction): # image index, image inference - # Apply constraints - # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height - x = x[xc[xi]] # confidence - - # Cat apriori labels if autolabelling - if labels and len(labels[xi]): - l = labels[xi] - v = torch.zeros((len(l), nc + 5), device=x.device) - v[:, :4] = l[:, 1:5] # box - v[:, 4] = 1.0 # conf - v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls - x = torch.cat((x, v), 0) - - # If none remain process next image - if not x.shape[0]: - continue - - # Compute conf - if nc == 1: - x[:, 5:] = x[:, 4:5] # for models with one class, cls_loss is 0 and cls_conf is always 0.5, - # so there is no need to multiplicate. - else: - x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf - - # Box (center x, center y, width, height) to (x1, y1, x2, y2) - box = xywh2xyxy(x[:, :4]) - - # Detections matrix nx6 (xyxy, conf, cls) - if multi_label: - i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T - x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) - else: # best class only - conf, j = x[:, 5:].max(1, keepdim=True) - x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] - - # Filter by class - if classes is not None: - x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] - - # Apply finite constraint - # if not torch.isfinite(x).all(): - # x = x[torch.isfinite(x).all(1)] - - # Check shape - n = x.shape[0] # number of boxes - if not n: # no boxes - continue - elif n > max_nms: # excess boxes - x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence - - # Batched NMS - c = x[:, 5:6] * (0 if agnostic else max_wh) # classes - boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores - i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS - if i.shape[0] > max_det: # limit detections - i = i[:max_det] - if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) - # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) - iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix - weights = iou * scores[None] # box weights - x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes - if redundant: - i = i[iou.sum(1) > 1] # require redundancy - - output[xi] = x[i] - if (time.time() - t) > time_limit: - print(f'WARNING: NMS time limit {time_limit}s exceeded') - break # time limit exceeded - - return output - - -def non_max_suppression_kpt(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, - labels=(), kpt_label=False, nc=None, nkpt=None): - """Runs Non-Maximum Suppression (NMS) on inference results - - Returns: - list of detections, on (n,6) tensor per image [xyxy, conf, cls] - """ - if nc is None: - nc = prediction.shape[2] - 5 if not kpt_label else prediction.shape[2] - 56 # number of classes - xc = prediction[..., 4] > conf_thres # candidates - - # Settings - min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height - max_det = 300 # maximum number of detections per image - max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() - time_limit = 10.0 # seconds to quit after - redundant = True # require redundant detections - multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) - merge = False # use merge-NMS - - t = time.time() - output = [torch.zeros((0,6), device=prediction.device)] * prediction.shape[0] - for xi, x in enumerate(prediction): # image index, image inference - # Apply constraints - # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height - x = x[xc[xi]] # confidence - - # Cat apriori labels if autolabelling - if labels and len(labels[xi]): - l = labels[xi] - v = torch.zeros((len(l), nc + 5), device=x.device) - v[:, :4] = l[:, 1:5] # box - v[:, 4] = 1.0 # conf - v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls - x = torch.cat((x, v), 0) - - # If none remain process next image - if not x.shape[0]: - continue - - # Compute conf - x[:, 5:5+nc] *= x[:, 4:5] # conf = obj_conf * cls_conf - - # Box (center x, center y, width, height) to (x1, y1, x2, y2) - box = xywh2xyxy(x[:, :4]) - - # Detections matrix nx6 (xyxy, conf, cls) - if multi_label: - i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T - x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) - else: # best class only - if not kpt_label: - conf, j = x[:, 5:].max(1, keepdim=True) - x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] - else: - kpts = x[:, 6:] - conf, j = x[:, 5:6].max(1, keepdim=True) - x = torch.cat((box, conf, j.float(), kpts), 1)[conf.view(-1) > conf_thres] - - - # Filter by class - if classes is not None: - x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] - - # Apply finite constraint - # if not torch.isfinite(x).all(): - # x = x[torch.isfinite(x).all(1)] - - # Check shape - n = x.shape[0] # number of boxes - if not n: # no boxes - continue - elif n > max_nms: # excess boxes - x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence - - # Batched NMS - c = x[:, 5:6] * (0 if agnostic else max_wh) # classes - boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores - i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS - if i.shape[0] > max_det: # limit detections - i = i[:max_det] - if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) - # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) - iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix - weights = iou * scores[None] # box weights - x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes - if redundant: - i = i[iou.sum(1) > 1] # require redundancy - - output[xi] = x[i] - if (time.time() - t) > time_limit: - print(f'WARNING: NMS time limit {time_limit}s exceeded') - break # time limit exceeded - - return output - - -def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() - # Strip optimizer from 'f' to finalize training, optionally save as 's' - x = torch.load(f, map_location=torch.device('cpu')) - if x.get('ema'): - x['model'] = x['ema'] # replace model with ema - for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys - x[k] = None - x['epoch'] = -1 - x['model'].half() # to FP16 - for p in x['model'].parameters(): - p.requires_grad = False - torch.save(x, s or f) - mb = os.path.getsize(s or f) / 1E6 # filesize - print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") - - -def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): - # Print mutation results to evolve.txt (for use with train.py --evolve) - a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys - b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values - c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) - print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c)) - - if bucket: - url = 'gs://%s/evolve.txt' % bucket - if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0): - os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local - - with open('evolve.txt', 'a') as f: # append result - f.write(c + b + '\n') - x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows - x = x[np.argsort(-fitness(x))] # sort - np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness - - # Save yaml - for i, k in enumerate(hyp.keys()): - hyp[k] = float(x[0, i + 7]) - with open(yaml_file, 'w') as f: - results = tuple(x[0, :7]) - c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) - f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n') - yaml.dump(hyp, f, sort_keys=False) - - if bucket: - os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload - - -def apply_classifier(x, model, img, im0): - # applies a second stage classifier to yolo outputs - im0 = [im0] if isinstance(im0, np.ndarray) else im0 - for i, d in enumerate(x): # per image - if d is not None and len(d): - d = d.clone() - - # Reshape and pad cutouts - b = xyxy2xywh(d[:, :4]) # boxes - b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square - b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad - d[:, :4] = xywh2xyxy(b).long() - - # Rescale boxes from img_size to im0 size - scale_coords(img.shape[2:], d[:, :4], im0[i].shape) - - # Classes - pred_cls1 = d[:, 5].long() - ims = [] - for j, a in enumerate(d): # per item - cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] - im = cv2.resize(cutout, (224, 224)) # BGR - # cv2.imwrite('test%i.jpg' % j, cutout) - - im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 - im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 - im /= 255.0 # 0 - 255 to 0.0 - 1.0 - ims.append(im) - - pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction - x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections - - return x - - -def increment_path(path, exist_ok=True, sep=''): - # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc. - path = Path(path) # os-agnostic - if (path.exists() and exist_ok) or (not path.exists()): - return str(path) - else: - dirs = glob.glob(f"{path}{sep}*") # similar paths - matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] - i = [int(m.groups()[0]) for m in matches if m] # indices - n = max(i) + 1 if i else 2 # increment number - return f"{path}{sep}{n}" # update path diff --git a/spaces/KAIST-Geometric-AI-Lab/salad-demo/salad/spaghetti/models/transformer.py b/spaces/KAIST-Geometric-AI-Lab/salad-demo/salad/spaghetti/models/transformer.py deleted file mode 100644 index a51874d75f66b560624a2052919c5549630d54aa..0000000000000000000000000000000000000000 --- a/spaces/KAIST-Geometric-AI-Lab/salad-demo/salad/spaghetti/models/transformer.py +++ /dev/null @@ -1,120 +0,0 @@ -from ..custom_types import * - - -class FeedForward(nn.Module): - def __init__(self, in_dim, h_dim, out_d: Optional[int] = None, act=nnf.relu, dropout=0.): - super().__init__() - out_d = out_d if out_d is not None else in_dim - self.fc1 = nn.Linear(in_dim, h_dim) - self.act = act - self.fc2 = nn.Linear(h_dim, out_d) - self.dropout = nn.Dropout(dropout) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.dropout(x) - x = self.fc2(x) - x = self.dropout(x) - return x - - -class MultiHeadAttention(nn.Module): - - def __init__(self, dim_self, dim_ref, num_heads, bias=True, dropout=0.): - super().__init__() - self.num_heads = num_heads - head_dim = dim_self // num_heads - self.scale = head_dim ** -0.5 - self.to_queries = nn.Linear(dim_self, dim_self, bias=bias) - self.to_keys_values = nn.Linear(dim_ref, dim_self * 2, bias=bias) - self.project = nn.Linear(dim_self, dim_self) - self.dropout = nn.Dropout(dropout) - - def forward_interpolation(self, queries: T, keys: T, values: T, alpha: T, mask: TN = None) -> TNS: - attention = torch.einsum('nhd,bmhd->bnmh', queries[0], keys) * self.scale - if mask is not None: - if mask.dim() == 2: - mask = mask.unsqueeze(1) - attention = attention.masked_fill(mask.unsqueeze(3), float("-inf")) - attention = attention.softmax(dim=2) - attention = attention * alpha[:, None, None, None] - out = torch.einsum('bnmh,bmhd->nhd', attention, values).reshape(1, attention.shape[1], -1) - return out, attention - - def forward(self, x, y: Optional[T] = None, mask: Optional[T] = None, alpha: TN = None): - y = y if y is not None else x - b_a, n, c = x.shape - b, m, d = y.shape - # b n h dh - queries = self.to_queries(x).reshape(b_a, n, self.num_heads, c // self.num_heads) - # b m 2 h dh - keys_values = self.to_keys_values(y).reshape(b, m, 2, self.num_heads, c // self.num_heads) - keys, values = keys_values[:, :, 0], keys_values[:, :, 1] - if alpha is not None: - out, attention = self.forward_interpolation(queries, keys, values, alpha, mask) - else: - attention = torch.einsum('bnhd,bmhd->bnmh', queries, keys) * self.scale - if mask is not None: - if mask.dim() == 2: - mask = mask.unsqueeze(1) - attention = attention.masked_fill(mask.unsqueeze(3), float("-inf")) - attention = attention.softmax(dim=2) - out = torch.einsum('bnmh,bmhd->bnhd', attention, values).reshape(b, n, c) - out = self.project(out) - return out, attention - - -class TransformerLayer(nn.Module): - - def forward_with_attention(self, x, y: Optional[T] = None, mask: Optional[T] = None, alpha: TN = None): - x_, attention = self.attn(self.norm1(x), y, mask, alpha) - x = x + x_ - x = x + self.mlp(self.norm2(x)) - return x, attention - - def forward(self, x, y: Optional[T] = None, mask: Optional[T] = None, alpha: TN = None): - x = x + self.attn(self.norm1(x), y, mask, alpha)[0] - x = x + self.mlp(self.norm2(x)) - return x - - def __init__(self, dim_self, dim_ref, num_heads, mlp_ratio=4., bias=False, dropout=0., act=nnf.relu, - norm_layer: nn.Module = nn.LayerNorm): - super().__init__() - self.norm1 = norm_layer(dim_self) - self.attn = MultiHeadAttention(dim_self, dim_ref, num_heads, bias=bias, dropout=dropout) - self.norm2 = norm_layer(dim_self) - self.mlp = FeedForward(dim_self, int(dim_self * mlp_ratio), act=act, dropout=dropout) - - -class DummyTransformer: - - @staticmethod - def forward_with_attention(x, *_, **__): - return x, [] - - @staticmethod - def forward(x, *_, **__): - return x - - -class Transformer(nn.Module): - - def forward_with_attention(self, x, y: Optional[T] = None, mask: Optional[T] = None, alpha: TN = None): - attentions = [] - for layer in self.layers: - x, att = layer.forward_with_attention(x, y, mask, alpha) - attentions.append(att) - return x, attentions - - def forward(self, x, y: TN = None, mask: TN = None, alpha: TN = None): - for layer in self.layers: - x = layer(x, y, mask, alpha) - return x - - def __init__(self, dim_self: int, num_heads: int, num_layers: int, dim_ref: Optional[int] = None, - mlp_ratio: float = 2., act=nnf.relu, norm_layer: nn.Module = nn.LayerNorm): - super(Transformer, self).__init__() - dim_ref = dim_ref if dim_ref is not None else dim_self - self.layers = nn.ModuleList([TransformerLayer(dim_self, dim_ref, num_heads, mlp_ratio, act=act, - norm_layer=norm_layer) for _ in range(num_layers)]) diff --git a/spaces/Kakashi098/Narrative/app.py b/spaces/Kakashi098/Narrative/app.py deleted file mode 100644 index 3a62fc322fe51982e94e66f778ed04d22f422287..0000000000000000000000000000000000000000 --- a/spaces/Kakashi098/Narrative/app.py +++ /dev/null @@ -1,46 +0,0 @@ -import cohere -import gradio as gr -import requests -from PIL import Image -import io - -def generate_text(prompt,maxi,cohere_api_key): - co = cohere.Client(cohere_api_key) - - response = co.generate(prompt=prompt, - temperature=0, - max_tokens=maxi) - return response[0] - -def generate_image(prompt,gradio_api_key): - r = requests.post('https://clipdrop-api.co/text-to-image/v1', - files={ - 'prompt': (None, prompt, 'text/plain') - }, - headers={'x-api-key': gradio_api_key} - ) - - if r.ok: - images = Image.open(io.BytesIO(r.content)) - return images - else: - raise ValueError("Failed to generate image") - -def text_and_image_generator(prompt,maxi,cohere_api_key,gradio_api_key): - cohere_api_key = "jNjJ71MvKxwbLDEz1ABIf5IMRuXvIUDMTx5HEhfP" - gradio_api_key = "ae9e2d3ac28caa8e845f4a6e33de8018ee109bdea7c3d10135af8797ac1bd8b6a73140a961e346fbb02204aa63aa4f38" - text = generate_text(f"Generate a story with {prompt}. Conclude the generated story properly.",maxi,cohere_api_key) - image = generate_image(text,gradio_api_key) - title = generate_text(f"title for the story {prompt} within 5 words",5,cohere_api_key) - - return title,text,image - -app = gr.Interface( - title="Story and Image Generator", - fn=text_and_image_generator, - inputs = [gr.inputs.Textbox(label="Enter your prompt to generate a story"), - gr.inputs.Slider(420,1000,label="Story length")], - outputs= [gr.outputs.Textbox(label="Story title"),gr.outputs.Textbox(label="Story"),gr.outputs.Image(type="pil",label="Image based on the Generated story")],theme="dark" -) - -app.launch() \ No newline at end of file diff --git a/spaces/Kangarroar/streamlit-docker-example/README.md b/spaces/Kangarroar/streamlit-docker-example/README.md deleted file mode 100644 index 0a0bd19e81bd58b3a3a64670f8603ae8d03310ab..0000000000000000000000000000000000000000 --- a/spaces/Kangarroar/streamlit-docker-example/README.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Streamlit Docker Template -emoji: 📉 -colorFrom: blue -colorTo: green -sdk: docker -app_port: 8501 -pinned: false -duplicated_from: DockerTemplates/streamlit-docker-example ---- - -## 🧠 Streamlit Docker Template 🔎 - -Streamlit Docker Template is a template for creating a Streamlit app with Docker and Hugging Face Spaces. - -Code from https://docs.streamlit.io/library/get-started/create-an-app - ---- -extra_gated_prompt: "You agree to not use the model to conduct experiments that cause harm to human subjects." -extra_gated_fields: - Company: text - Country: text - I agree to use this model for non-commerical use ONLY: checkbox ---- \ No newline at end of file diff --git a/spaces/Kevin676/Real-Time-Voice-Cloning/synthesizer/inference.py b/spaces/Kevin676/Real-Time-Voice-Cloning/synthesizer/inference.py deleted file mode 100644 index af7bf083ffc9bed33ea6e2c77cb7f69e6b5c0475..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/Real-Time-Voice-Cloning/synthesizer/inference.py +++ /dev/null @@ -1,171 +0,0 @@ -import torch -from synthesizer import audio -from synthesizer.hparams import hparams -from synthesizer.models.tacotron import Tacotron -from synthesizer.utils.symbols import symbols -from synthesizer.utils.text import text_to_sequence -from vocoder.display import simple_table -from pathlib import Path -from typing import Union, List -import numpy as np -import librosa - - -class Synthesizer: - sample_rate = hparams.sample_rate - hparams = hparams - - def __init__(self, model_fpath: Path, verbose=True): - """ - The model isn't instantiated and loaded in memory until needed or until load() is called. - - :param model_fpath: path to the trained model file - :param verbose: if False, prints less information when using the model - """ - self.model_fpath = model_fpath - self.verbose = verbose - - # Check for GPU - if torch.cuda.is_available(): - self.device = torch.device("cuda") - else: - self.device = torch.device("cpu") - if self.verbose: - print("Synthesizer using device:", self.device) - - # Tacotron model will be instantiated later on first use. - self._model = None - - def is_loaded(self): - """ - Whether the model is loaded in memory. - """ - return self._model is not None - - def load(self): - """ - Instantiates and loads the model given the weights file that was passed in the constructor. - """ - self._model = Tacotron(embed_dims=hparams.tts_embed_dims, - num_chars=len(symbols), - encoder_dims=hparams.tts_encoder_dims, - decoder_dims=hparams.tts_decoder_dims, - n_mels=hparams.num_mels, - fft_bins=hparams.num_mels, - postnet_dims=hparams.tts_postnet_dims, - encoder_K=hparams.tts_encoder_K, - lstm_dims=hparams.tts_lstm_dims, - postnet_K=hparams.tts_postnet_K, - num_highways=hparams.tts_num_highways, - dropout=hparams.tts_dropout, - stop_threshold=hparams.tts_stop_threshold, - speaker_embedding_size=hparams.speaker_embedding_size).to(self.device) - - self._model.load(self.model_fpath) - self._model.eval() - - if self.verbose: - print("Loaded synthesizer \"%s\" trained to step %d" % (self.model_fpath.name, self._model.state_dict()["step"])) - - def synthesize_spectrograms(self, texts: List[str], - embeddings: Union[np.ndarray, List[np.ndarray]], - return_alignments=False): - """ - Synthesizes mel spectrograms from texts and speaker embeddings. - - :param texts: a list of N text prompts to be synthesized - :param embeddings: a numpy array or list of speaker embeddings of shape (N, 256) - :param return_alignments: if True, a matrix representing the alignments between the - characters - and each decoder output step will be returned for each spectrogram - :return: a list of N melspectrograms as numpy arrays of shape (80, Mi), where Mi is the - sequence length of spectrogram i, and possibly the alignments. - """ - # Load the model on the first request. - if not self.is_loaded(): - self.load() - - # Print some info about the model when it is loaded - tts_k = self._model.get_step() // 1000 - - simple_table([("Tacotron", str(tts_k) + "k"), - ("r", self._model.r)]) - - # Preprocess text inputs - inputs = [text_to_sequence(text.strip(), hparams.tts_cleaner_names) for text in texts] - if not isinstance(embeddings, list): - embeddings = [embeddings] - - # Batch inputs - batched_inputs = [inputs[i:i+hparams.synthesis_batch_size] - for i in range(0, len(inputs), hparams.synthesis_batch_size)] - batched_embeds = [embeddings[i:i+hparams.synthesis_batch_size] - for i in range(0, len(embeddings), hparams.synthesis_batch_size)] - - specs = [] - for i, batch in enumerate(batched_inputs, 1): - if self.verbose: - print(f"\n| Generating {i}/{len(batched_inputs)}") - - # Pad texts so they are all the same length - text_lens = [len(text) for text in batch] - max_text_len = max(text_lens) - chars = [pad1d(text, max_text_len) for text in batch] - chars = np.stack(chars) - - # Stack speaker embeddings into 2D array for batch processing - speaker_embeds = np.stack(batched_embeds[i-1]) - - # Convert to tensor - chars = torch.tensor(chars).long().to(self.device) - speaker_embeddings = torch.tensor(speaker_embeds).float().to(self.device) - - # Inference - _, mels, alignments = self._model.generate(chars, speaker_embeddings) - mels = mels.detach().cpu().numpy() - for m in mels: - # Trim silence from end of each spectrogram - while np.max(m[:, -1]) < hparams.tts_stop_threshold: - m = m[:, :-1] - specs.append(m) - - if self.verbose: - print("\n\nDone.\n") - return (specs, alignments) if return_alignments else specs - - @staticmethod - def load_preprocess_wav(fpath): - """ - Loads and preprocesses an audio file under the same conditions the audio files were used to - train the synthesizer. - """ - wav = librosa.load(str(fpath), hparams.sample_rate)[0] - if hparams.rescale: - wav = wav / np.abs(wav).max() * hparams.rescaling_max - return wav - - @staticmethod - def make_spectrogram(fpath_or_wav: Union[str, Path, np.ndarray]): - """ - Creates a mel spectrogram from an audio file in the same manner as the mel spectrograms that - were fed to the synthesizer when training. - """ - if isinstance(fpath_or_wav, str) or isinstance(fpath_or_wav, Path): - wav = Synthesizer.load_preprocess_wav(fpath_or_wav) - else: - wav = fpath_or_wav - - mel_spectrogram = audio.melspectrogram(wav, hparams).astype(np.float32) - return mel_spectrogram - - @staticmethod - def griffin_lim(mel): - """ - Inverts a mel spectrogram using Griffin-Lim. The mel spectrogram is expected to have been built - with the same parameters present in hparams.py. - """ - return audio.inv_mel_spectrogram(mel, hparams) - - -def pad1d(x, max_len, pad_value=0): - return np.pad(x, (0, max_len - len(x)), mode="constant", constant_values=pad_value) diff --git a/spaces/KevinQHLin/UniVTG/main/inference_qfvs.py b/spaces/KevinQHLin/UniVTG/main/inference_qfvs.py deleted file mode 100644 index 4feed8b8399a2dc1fb081e14e9acc4ece64650ed..0000000000000000000000000000000000000000 --- a/spaces/KevinQHLin/UniVTG/main/inference_qfvs.py +++ /dev/null @@ -1,342 +0,0 @@ -import os -import pdb -import time -import json -import pprint -import random -import importlib -import numpy as np -from tqdm import tqdm, trange -from collections import defaultdict - -import h5py -import torch -import torch.nn as nn -import torch.backends.cudnn as cudnn -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter - -import sys -sys.path.append('/Users/kevin/univtg') -from main.config import BaseOptions, setup_model -from main.dataset_qfvs import DatasetQFVS, prepare_batch_inputs_qfvs, start_end_collate_qfvs -from utils.basic_utils import set_seed, AverageMeter, dict_to_markdown, save_json, save_jsonl, load_json, load_pickle, l2_normalize_np_array -from utils.model_utils import count_parameters -from eval.qfvs import calculate_semantic_matching, load_videos_tag - -import logging -logger = logging.getLogger(__name__) -logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - level=logging.INFO) - -def eval_epoch(model, config, opt): - model.eval() - f1_sum = 0; p_sum = 0; r_sum = 0 - - assert len(config['test_videos']) == 1 - video_id = config['test_videos'][0] - embedding = load_pickle(f"./data/qfvs/txt_clip/{config['txt_feature']}.pkl") - - feat_type = config['vid_feature'] - feat = h5py.File(f'./data/qfvs/processed/P0{video_id}_{feat_type}.h5', 'r') - features = torch.from_numpy(feat['features'][()]) - seg_len = torch.from_numpy(feat['seg_len'][()]) - # seg_len = torch.tensor(feat['seg_len'][()]).unsqueeze(0).cuda() - - # dim = features.shape[-1] - # ctx_l = seg_len.sum().cpu() - - # dim = features.shape[-1] - # ctx_l = features.shape[1] - # seg_len = torch.ones(ctx_l) - # features = features.reshape(-1, dim)[:ctx_l] - - # tef_st = torch.arange(0, ctx_l, 1.0) / ctx_l - # tef_ed = tef_st + 1.0 / ctx_l - # tef = torch.stack([tef_st, tef_ed], dim=1).cuda() # (Lv, 2) - # features = torch.cat([features, tef], dim=1) # (Lv, Dv+2) - - transfer = {"Cupglass": "Glass", - "Musicalinstrument": "Instrument", - "Petsanimal": "Animal"} - - with open(os.path.join('./plot', opt.dset_name, str(opt.qfvs_split) +'.jsonl'), 'w') as f_write: - for _,_,files in os.walk("./data/qfvs/metadata/origin_data/Query-Focused_Summaries/Oracle_Summaries/P0"+str(video_id)): - evaluation_num=len(files) - - mask_GT = torch.zeros(config["max_segment_num"], config["max_frame_num"], dtype=torch.bool).cuda() - for j in range(len(seg_len)): - for k in range(seg_len[j]): - mask_GT[j][k] = 1 - - for file in files: - summaries_GT=[] - with open("./data/qfvs/metadata/origin_data/Query-Focused_Summaries/Oracle_Summaries/P0"+str(video_id)+"/"+file,"r") as f: - for line in f.readlines(): - summaries_GT.append(int(line.strip())) - - concept1, concept2 = file.split('_')[0:2] - - ############## - if concept1 in transfer: - concept1 = transfer[concept1] - if concept2 in transfer: - concept2 = transfer[concept2] - concept1 = embedding[concept1] - concept2 = embedding[concept2] - - concept1 = l2_normalize_np_array(concept1) - concept2 = l2_normalize_np_array(concept2) - - data = { - 'features':features, - 'seg_len': seg_len, - 'tokens_pad1':torch.from_numpy(concept1), - 'tokens_pad2':torch.from_numpy(concept2), - 'mask_GT': mask_GT - } - - input1, input2, input_oracle, mask = prepare_batch_inputs_qfvs(start_end_collate_qfvs([data]), config, eval=True) - - summaries_GT = [x - 1 for x in summaries_GT] - video_shots_tag = load_videos_tag(mat_path="./eval/Tags.mat") - - if opt.f_loss_coef == 0: - output_type = 'saliency_scores' - elif opt.s_loss_intra_coef == 0: - output_type = 'pred_logits' - else: - if config['qfvs_score_ensemble'] > 0: - output_type = ['pred_logits', 'saliency_scores'] - else: - output_type = 'pred_logits' - - with torch.no_grad(): - if not isinstance(output_type, list): - score1 = model(**input1)[output_type].squeeze() - score1 = score1.masked_select(mask_GT) - - score2 = model(**input2)[output_type].squeeze() - score2 = score2.masked_select(mask_GT) - - score = model(**input_oracle)[output_type].squeeze() - score = score.masked_select(mask_GT) - else: - score1, score2, score = torch.zeros((int(mask.sum().item()))).cuda(), torch.zeros((int(mask.sum().item()))).cuda(), torch.zeros((int(mask.sum().item()))).cuda() - for output_t in output_type: - score1 += model(**input1)[output_t].squeeze().masked_select(mask_GT) - score2 += model(**input2)[output_t].squeeze().masked_select(mask_GT) - score += model(**input_oracle)[output_t].squeeze().masked_select(mask_GT) - - if config['qfvs_score_gather'] > 0: - score = score + score1 + score2 - else: - score = score - - # since video4 features dim is greater than video_shots_tag. - score = score[:min(score.shape[0], video_shots_tag[video_id-1].shape[0])] - _, top_index = score.topk(int(score.shape[0] * config["top_percent"])) - - c1, c2 = file.split('_')[0:2] - if c1 in transfer: - c1 = transfer[c1] - if c2 in transfer: - c2 = transfer[c2] - - p, r, f1 = calculate_semantic_matching(list(top_index.cpu().numpy()), summaries_GT, video_shots_tag, video_id=video_id-1) - entry = {'concept1': c1, 'concept2': c2, - 'score':score.tolist(), - 'top_percent': config["top_percent"], - 'top_pred':top_index.tolist(), - 'gt':summaries_GT, - 'p': p, 'r': r, 'f1': f1, - 'shots': video_shots_tag[video_id-1].shape[0]} - f_write.write(json.dumps(entry) + '\n') - f1_sum+=f1; r_sum+=r; p_sum+=p - return {'F': round(100* f1_sum/evaluation_num,2) , - 'R': round(100* r_sum/evaluation_num,2) , - 'P': round(100* p_sum/evaluation_num,2) } - -def idx2time(idx): - sec1, sec2 = idx*5, (idx+1)*5 - - h1 = sec1 // 3600 - m1 = (sec1 - h1*3600) // 60 - s1 = sec1 % 60 - - h2 = sec2 // 3600 - m2 = (sec2 - h2*3600) // 60 - s2 = sec2 % 60 - print(h1,m1,s1,'\t', h2,m2,s2) - -def train_epoch(model, criterion, train_loader, optimizer, opt, config, epoch_i, tb_writer): - model.train() - criterion.train() - - # init meters - time_meters = defaultdict(AverageMeter) - loss_meters = defaultdict(AverageMeter) - - timer_dataloading = time.time() - loss_total = 0 - - for batch_idx, batch in enumerate(tqdm(train_loader)): - time_meters["dataloading_time"].update(time.time() - timer_dataloading) - timer_start = time.time() - model_input1, model_input2, model_input_oracle, \ - model_gt1, model_gt2, model_gt_oracle, \ - mask_GT = prepare_batch_inputs_qfvs(batch, config) - time_meters["prepare_inputs_time"].update(time.time() - timer_start) - - timer_start = time.time() - output1 = model(**model_input1) - output2 = model(**model_input2) - output_oracle = model(**model_input_oracle) - - loss_dict = {} - loss_dict1 = criterion(output1, model_gt1, mask_GT) - loss_dict2 = criterion(output2, model_gt2, mask_GT) - loss_dict3 = criterion(output_oracle, model_gt_oracle, mask_GT) - - weight_dict = criterion.weight_dict - if config['qfvs_loss_gather'] > 0: - for k in loss_dict1.keys(): - loss_dict[k] = loss_dict1[k] + loss_dict2[k] + loss_dict3[k] - else: - loss_dict = loss_dict3 - - losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) - loss_total += losses.item() - - time_meters["model_forward_time"].update(time.time() - timer_start) - timer_start = time.time() - optimizer.zero_grad() - losses.backward() - if opt.grad_clip > 0: - nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) - optimizer.step() - time_meters["model_backward_time"].update(time.time() - timer_start) - - timer_dataloading = time.time() - return round(loss_total / len(train_loader), 2) - -# train in single domain. -def train(model, criterion, optimizer, lr_scheduler, train_loader, opt, config): - # if opt.device.type == "cuda": - # logger.info("CUDA enabled.") - # model.to(opt.device) - - tb_writer = SummaryWriter(opt.tensorboard_log_dir) - tb_writer.add_text("hyperparameters", dict_to_markdown(vars(opt), max_str_len=None)) - opt.train_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str}\n" - opt.eval_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str} [Metrics] {eval_metrics_str}\n" - - prev_best_score = {'Fscore':0, 'Precision':0, 'Recall':0} - if opt.start_epoch is None: - start_epoch = -1 if opt.eval_init else 0 - else: - start_epoch = opt.start_epoch - - val_score = eval_epoch(model, config, opt) - tb_writer.add_scalar(f"Eval/QFVS-V{config['test_videos'][0]}-fscore", float(val_score['F']), 0) - logger.info(f"[Epoch {0}] [Fscore: {val_score['F']} / {prev_best_score['Fscore']}]" - f" [Precision: {val_score['P']} / {prev_best_score['Precision']}]" - f" [Recall: {val_score['R']} / {prev_best_score['Recall']}]") - for epoch_i in trange(start_epoch, opt.n_epoch, desc="Epoch"): - if epoch_i > -1: - loss_epoch = train_epoch(model, criterion, train_loader, optimizer, opt, config, epoch_i, tb_writer) - lr_scheduler.step() - eval_epoch_interval = opt.eval_epoch - if opt.eval_path is not None and (epoch_i + 1) % eval_epoch_interval == 0: - with torch.no_grad(): - val_score = eval_epoch(model, config, opt) - tb_writer.add_scalar(f"Eval/QFVS-V{config['test_videos'][0]}-fscore", float(val_score['F']), epoch_i+1) - logger.info(f"[Epoch {epoch_i + 1}, Loss {loss_epoch}] [Fscore: {val_score['F']} / {prev_best_score['Fscore']}]" - f" [Precision: {val_score['P']} / {prev_best_score['Precision']}]" - f" [Recall: {val_score['R']} / {prev_best_score['Recall']}]") - - if prev_best_score['Fscore'] < val_score['F']: - prev_best_score['Fscore'] = val_score['F'] - prev_best_score['Precision'] = val_score['P'] - prev_best_score['Recall'] = val_score['R'] - - checkpoint = { - "model": model.state_dict(), - "optimizer": optimizer.state_dict(), - "epoch": epoch_i, - "opt": opt - } - torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", f"_V{config['test_videos'][0]}_best.ckpt")) - tb_writer.close() - return prev_best_score - -def update_config(opt, config): - # for key in ["max_segment_num", "max_frame_num", "top_percent", - # "qfvs_vid_feature", "qfvs_txt_feature", "qfvs_dense_shot", - # "qfvs_score_ensemble", "qfvs_score_gather", "qfvs_loss_gather"]: - config["max_segment_num"] = opt.max_segment_num - config["max_frame_num"] = opt.max_frame_num - config["top_percent"] = opt.top_percent - config["vid_feature"] = opt.qfvs_vid_feature - config["txt_feature"] = opt.qfvs_txt_feature - config["qfvs_dense_shot"] = opt.qfvs_dense_shot - config["qfvs_score_ensemble"] = opt.qfvs_score_ensemble - config["qfvs_score_gather"] = opt.qfvs_score_gather - config["qfvs_loss_gather"] = opt.qfvs_loss_gather - return config - -def start_training(): - logger.info("Setup config, data and model...") - opt = BaseOptions().parse() - set_seed(opt.seed) - - # config = load_json("./main/config_qfvs.json") - config = {} - config = update_config(opt, config) - - tb_writer = SummaryWriter(opt.tensorboard_log_dir) - - # key -> test video; value -> training videos. - qfvs_split = { - 1: [2, 3, 4], - 2: [1, 3, 4], - 3: [1, 2, 4], - 4: [1, 2, 3] - } - - scores_videos = {} - for test_id, splits in qfvs_split.items(): - if opt.qfvs_split != -1: - if test_id != opt.qfvs_split: - continue - logger.info(f"Start Training {opt.dset_name}: {test_id}") - config['train_videos'] = qfvs_split[test_id] - config['test_videos'] = [test_id] - train_dataset = DatasetQFVS(config) - train_loader = DataLoader(train_dataset, batch_size=opt.bsz, collate_fn=start_end_collate_qfvs, shuffle=True, num_workers=opt.num_workers) - - model, criterion, optimizer, lr_scheduler = setup_model(opt) - count_parameters(model) - best_score = train(model, criterion, optimizer, lr_scheduler, train_loader, opt, config) - scores_videos['V'+str(test_id)] = best_score - - # save the final results. - avg_fscore = sum([v['Fscore'] for k, v in scores_videos.items()]) / len(scores_videos) - avg_precision = sum([v['Precision'] for k, v in scores_videos.items()]) / len(scores_videos) - avg_recall = sum([v['Recall'] for k, v in scores_videos.items()]) / len(scores_videos) - scores_videos['avg'] = {'Fscore':avg_fscore, 'Precision':avg_precision, 'Recall':avg_recall} - - save_metrics_path = os.path.join(opt.results_dir, f"best_{opt.dset_name}_{opt.eval_split_name}_preds_metrics.json") - save_json( scores_videos, save_metrics_path, save_pretty=True, sort_keys=False) - - tb_writer.add_scalar(f"Eval/QFVS-avg-fscore", round(avg_fscore, 2), 1) - tb_writer.add_text(f"Eval/QFVS-{opt.dset_name}", dict_to_markdown(scores_videos, max_str_len=None)) - tb_writer.close() - - print(scores_videos) - return - -if __name__ == '__main__': - start_training() - results = logger.info("\n\n\nFINISHED TRAINING!!!") diff --git a/spaces/Kreaols/ChuanhuChatGPT/assets/Kelpy-Codos.js b/spaces/Kreaols/ChuanhuChatGPT/assets/Kelpy-Codos.js deleted file mode 100644 index cfbaeedb4f371dfb5fe157db545b364046fca3e1..0000000000000000000000000000000000000000 --- a/spaces/Kreaols/ChuanhuChatGPT/assets/Kelpy-Codos.js +++ /dev/null @@ -1,76 +0,0 @@ -// ==UserScript== -// @name Kelpy Codos -// @namespace https://github.com/Keldos-Li/Kelpy-Codos -// @version 1.0.5 -// @author Keldos; https://keldos.me/ -// @description Add copy button to PRE tags before CODE tag, for Chuanhu ChatGPT especially. -// Based on Chuanhu ChatGPT version: ac04408 (2023-3-22) -// @license GPL-3.0 -// @grant none -// ==/UserScript== - -(function () { - 'use strict'; - - function addCopyButton(pre) { - var code = pre.querySelector('code'); - if (!code) { - return; // 如果没有找到 元素,则不添加按钮 - } - var firstChild = code.firstChild; - if (!firstChild) { - return; // 如果 元素没有子节点,则不添加按钮 - } - var button = document.createElement('button'); - button.textContent = '\uD83D\uDCCE'; // 使用 📎 符号作为“复制”按钮的文本 - button.style.position = 'relative'; - button.style.float = 'right'; - button.style.fontSize = '1em'; // 可选:调整按钮大小 - button.style.background = 'none'; // 可选:去掉背景颜色 - button.style.border = 'none'; // 可选:去掉边框 - button.style.cursor = 'pointer'; // 可选:显示指针样式 - button.addEventListener('click', function () { - var range = document.createRange(); - range.selectNodeContents(code); - range.setStartBefore(firstChild); // 将范围设置为第一个子节点之前 - var selection = window.getSelection(); - selection.removeAllRanges(); - selection.addRange(range); - - try { - var success = document.execCommand('copy'); - if (success) { - button.textContent = '\u2714'; - setTimeout(function () { - button.textContent = '\uD83D\uDCCE'; // 恢复按钮为“复制” - }, 2000); - } else { - button.textContent = '\u2716'; - } - } catch (e) { - console.error(e); - button.textContent = '\u2716'; - } - - selection.removeAllRanges(); - }); - code.insertBefore(button, firstChild); // 将按钮插入到第一个子元素之前 - } - - function handleNewElements(mutationsList, observer) { - for (var mutation of mutationsList) { - if (mutation.type === 'childList') { - for (var node of mutation.addedNodes) { - if (node.nodeName === 'PRE') { - addCopyButton(node); - } - } - } - } - } - - var observer = new MutationObserver(handleNewElements); - observer.observe(document.documentElement, { childList: true, subtree: true }); - - document.querySelectorAll('pre').forEach(addCopyButton); -})(); diff --git a/spaces/KyanChen/FunSR/models/__init__.py b/spaces/KyanChen/FunSR/models/__init__.py deleted file mode 100644 index dab41ef12975f22503d615e220e09060523dad0c..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/FunSR/models/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from .models import register, make -from . import edsr, rdn, rcan, swin_backbone -from . import mlp -from . import liif -from . import metasr -from . import rs_super, siren_modulation, transformer_neck, swin_neck -from . cnn_models import * -from . import rs_multiscale_super -from .funsr import FUNSR - -from .baselines import * diff --git a/spaces/KyanChen/FunSR/models/baselines/upsampler.py b/spaces/KyanChen/FunSR/models/baselines/upsampler.py deleted file mode 100644 index fbdddd29bd309e5e498a689d5d2ed25143c49dc9..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/FunSR/models/baselines/upsampler.py +++ /dev/null @@ -1,359 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.nn.utils.weight_norm as wn - -################ -# Upsampler -################ - -def make_coord(shape, ranges=None, flatten=True): - """Make coordinates at grid centers.""" - coord_seqs = [] - for i, n in enumerate(shape): - if ranges is None: - v0, v1 = -1, 1 - else: - v0, v1 = ranges[i] - r = (v1 - v0) / (2 * n) - seq = v0 + r + (2 * r) * torch.arange(n).float() - coord_seqs.append(seq) - ret = torch.stack(torch.meshgrid(*coord_seqs), dim=-1) - if flatten: - ret = ret.view(-1, ret.shape[-1]) - return ret - - -class UPLayer_MS_V9(nn.Module): - # Up-sampling net - def __init__(self, n_feats, kSize, out_channels, interpolate_mode, levels=4): - super().__init__() - self.interpolate_mode = interpolate_mode - self.levels = levels - - self.UPNet_x2_list = [] - - for _ in range(levels - 1): - self.UPNet_x2_list.append( - nn.Sequential( - *[ - nn.Conv2d( - n_feats, - n_feats * 4, - kSize, - padding=(kSize - 1) // 2, - stride=1, - ), - nn.PixelShuffle(2), - ] - ) - ) - - self.scale_aware_layer = nn.Sequential( - *[nn.Linear(1, 64), nn.ReLU(), nn.Linear(64, levels), nn.Sigmoid()] - ) - - self.UPNet_x2_list = nn.Sequential(*self.UPNet_x2_list) - - self.fuse = nn.Sequential( - *[ - nn.Conv2d(n_feats * levels, 256, kernel_size=1, padding=0, stride=1), - nn.ReLU(), - nn.Conv2d(256, 256, kernel_size=1, padding=0, stride=1), - nn.ReLU(), - nn.Conv2d(256, 256, kernel_size=1, padding=0, stride=1), - nn.ReLU(), - nn.Conv2d(256, 256, kernel_size=1, padding=0, stride=1), - nn.ReLU(), - nn.Conv2d(256, out_channels, kernel_size=1, padding=0, stride=1), - ] - ) - - def forward(self, x, out_size): - - if type(out_size) == int: - out_size = [out_size, out_size] - - if type(x) == list: - return self.forward_list(x, out_size) - - r = torch.tensor([x.shape[2] / out_size[0]], device="cuda") - - scale_w = self.scale_aware_layer(r.unsqueeze(0))[0] - - # scale_in = x.new_tensor(np.ones([x.shape[0], 1, out_size[0], out_size[1]])*r) - - x_list = [x] - for l in range(1, self.levels): - x_list.append(self.UPNet_x2_list[l - 1](x_list[l - 1])) - - x_resize_list = [] - for l in range(self.levels): - x_resize = F.interpolate( - x_list[l], out_size, mode=self.interpolate_mode, align_corners=False - ) - x_resize *= scale_w[l] - x_resize_list.append(x_resize) - - # x_resize_list.append(scale_in) - out = self.fuse(torch.cat(tuple(x_resize_list), 1)) - return out - - def forward_list(self, h_list, out_size): - assert ( - len(h_list) == self.levels - ), "The Length of input list must equal to the number of levels" - device = h_list[0].device - r = torch.tensor([h_list[0].shape[2] / out_size[0]], device=device) - scale_w = self.scale_aware_layer(r.unsqueeze(0))[0] - - x_resize_list = [] - for l in range(self.levels): - h = h_list[l] - for i in range(l): - h = self.UPNet_x2_list[i](h) - x_resize = F.interpolate( - h, out_size, mode=self.interpolate_mode, align_corners=False - ) - x_resize *= scale_w[l] - x_resize_list.append(x_resize) - - out = self.fuse(torch.cat(tuple(x_resize_list), 1)) - return out - - -class UPLayer_MS_WN(nn.Module): - # Up-sampling net - def __init__(self, n_feats, kSize, out_channels, interpolate_mode, levels=4): - super().__init__() - self.interpolate_mode = interpolate_mode - self.levels = levels - self.UPNet_x2_list = [] - - for _ in range(levels - 1): - self.UPNet_x2_list.append( - nn.Sequential( - *[ - wn( - nn.Conv2d( - n_feats, - n_feats * 4, - kSize, - padding=(kSize - 1) // 2, - stride=1, - ) - ), - nn.PixelShuffle(2), - ] - ) - ) - - self.scale_aware_layer = nn.Sequential( - *[wn(nn.Linear(1, 64)), nn.ReLU(), wn(nn.Linear(64, levels)), nn.Sigmoid()] - ) - - self.UPNet_x2_list = nn.Sequential(*self.UPNet_x2_list) - - self.fuse = nn.Sequential( - *[ - wn( - nn.Conv2d(n_feats * levels, 256, kernel_size=1, padding=0, stride=1) - ), - nn.ReLU(), - wn(nn.Conv2d(256, 256, kernel_size=1, padding=0, stride=1)), - nn.ReLU(), - wn(nn.Conv2d(256, 256, kernel_size=1, padding=0, stride=1)), - nn.ReLU(), - wn(nn.Conv2d(256, 256, kernel_size=1, padding=0, stride=1)), - nn.ReLU(), - wn(nn.Conv2d(256, out_channels, kernel_size=1, padding=0, stride=1)), - ] - ) - - assert self.interpolate_mode in ( - "bilinear", - "bicubic", - "nearest", - "MLP", - ), "Interpolate mode must be bilinear/bicubic/nearest/MLP" - if self.interpolate_mode == "MLP": - self.feature_interpolater = MLP_Interpolate(n_feats, radius=3) - elif self.interpolate_mode == "nearest": - self.feature_interpolater = lambda x, out_size: F.interpolate( - x, out_size, mode=self.interpolate_mode - ) - else: - self.feature_interpolater = lambda x, out_size: F.interpolate( - x, out_size, mode=self.interpolate_mode, align_corners=False - ) - - def forward(self, x, out_size): - if type(out_size) == int: - out_size = [out_size, out_size] - - if type(x) == list: - return self.forward_list(x, out_size) - - r = torch.tensor([x.shape[2] / out_size[0]], device="cuda") - - scale_w = self.scale_aware_layer(r.unsqueeze(0))[0] - - x_list = [x] - for l in range(1, self.levels): - x_list.append(self.UPNet_x2_list[l - 1](x_list[l - 1])) - - x_resize_list = [] - for l in range(self.levels): - x_resize = self.feature_interpolater(x_list[l], out_size) - x_resize *= scale_w[l] - x_resize_list.append(x_resize) - - out = self.fuse(torch.cat(tuple(x_resize_list), 1)) - return out - - def forward_list(self, h_list, out_size): - assert ( - len(h_list) == self.levels - ), "The Length of input list must equal to the number of levels" - device = h_list[0].device - r = torch.tensor([h_list[0].shape[2] / out_size[0]], device=device) - scale_w = self.scale_aware_layer(r.unsqueeze(0))[0] - - x_resize_list = [] - for l in range(self.levels): - h = h_list[l] - for i in range(l): - h = self.UPNet_x2_list[i](h) - x_resize = self.feature_interpolater(h, out_size) - x_resize *= scale_w[l] - x_resize_list.append(x_resize) - - out = self.fuse(torch.cat(tuple(x_resize_list), 1)) - return out - - -class UPLayer_MS_WN_woSA(UPLayer_MS_WN): - def __init__(self, n_feats, kSize, out_channels, interpolate_mode, levels=4): - super().__init__(n_feats, kSize, out_channels, interpolate_mode, levels) - - def forward(self, x, out_size): - if type(out_size) == int: - out_size = [out_size, out_size] - - if type(x) == list: - return self.forward_list(x, out_size) - - x_list = [x] - for l in range(1, self.levels): - x_list.append(self.UPNet_x2_list[l - 1](x_list[l - 1])) - - x_resize_list = [] - for l in range(self.levels): - x_resize = self.feature_interpolater(x_list[l], out_size) - x_resize_list.append(x_resize) - - out = self.fuse(torch.cat(tuple(x_resize_list), 1)) - return out - - def forward_list(self, h_list, out_size): - assert ( - len(h_list) == self.levels - ), "The Length of input list must equal to the number of levels" - - x_resize_list = [] - for l in range(self.levels): - h = h_list[l] - for i in range(l): - h = self.UPNet_x2_list[i](h) - x_resize = self.feature_interpolater(h, out_size) - x_resize_list.append(x_resize) - - out = self.fuse(torch.cat(tuple(x_resize_list), 1)) - return out - - -class OSM(nn.Module): - def __init__(self, n_feats, overscale): - super().__init__() - self.body = nn.Sequential( - wn(nn.Conv2d(n_feats, 1600, 3, padding=1)), - nn.PixelShuffle(overscale), - wn(nn.Conv2d(64, 3, 3, padding=1)), - ) - - def forward(self, x, out_size): - h = self.body(x) - return F.interpolate(h, out_size, mode="bicubic", align_corners=False) - - -class MLP_Interpolate(nn.Module): - def __init__(self, n_feat, radius=2): - super().__init__() - self.radius = radius - - self.f_transfer = nn.Sequential( - *[ - nn.Linear(n_feat * self.radius * self.radius + 2, n_feat), - nn.ReLU(True), - nn.Linear(n_feat, n_feat), - ] - ) - - def forward(self, x, out_size): - x_unfold = F.unfold(x, self.radius, padding=self.radius // 2) - x_unfold = x_unfold.view( - x.shape[0], x.shape[1] * (self.radius ** 2), x.shape[2], x.shape[3] - ) - - in_shape = x.shape[-2:] - in_coord = ( - make_coord(in_shape, flatten=False) - .cuda() - .permute(2, 0, 1) - .unsqueeze(0) - .expand(x.shape[0], 2, *in_shape) - ) - - if type(out_size) == int: - out_size = [out_size, out_size] - - out_coord = make_coord(out_size, flatten=True).cuda() - out_coord = out_coord.expand(x.shape[0], *out_coord.shape) - - q_feat = F.grid_sample( - x_unfold, - out_coord.flip(-1).unsqueeze(1), - mode="nearest", - align_corners=False, - )[:, :, 0, :].permute(0, 2, 1) - q_coord = F.grid_sample( - in_coord, - out_coord.flip(-1).unsqueeze(1), - mode="nearest", - align_corners=False, - )[:, :, 0, :].permute(0, 2, 1) - - rel_coord = out_coord - q_coord - rel_coord[:, :, 0] *= x.shape[-2] - rel_coord[:, :, 1] *= x.shape[-1] - - inp = torch.cat([q_feat, rel_coord], dim=-1) - - bs, q = out_coord.shape[:2] - pred = self.f_transfer(inp.view(bs * q, -1)).view(bs, q, -1) - pred = ( - pred.view(x.shape[0], *out_size, x.shape[1]) - .permute(0, 3, 1, 2) - .contiguous() - ) - - return pred - - -class LIIF_Upsampler(nn.Module): - def __init__(self): - super().__init__() - raise NotImplementedError - - def forward(self): - pass diff --git a/spaces/KyanChen/RSPrompter/mmpl/models/necks/hf_gpt_transformer_decoder.py b/spaces/KyanChen/RSPrompter/mmpl/models/necks/hf_gpt_transformer_decoder.py deleted file mode 100644 index 935785664fbbedb0f28a173d7e88958787261d95..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmpl/models/necks/hf_gpt_transformer_decoder.py +++ /dev/null @@ -1,33 +0,0 @@ -import torch.nn as nn -from mmengine.model import BaseModule -from mmengine.model.weight_init import constant_init -from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm -from mmpl.registry import MODELS -from mmengine.model import BaseModule -from transformers import GPT2Model, GPT2Config - - -@MODELS.register_module() -class HFGPTTransformerDecoderNeck(BaseModule): - def __init__( - self, - model_name='gpt2', - from_pretrained=True, - update_kwargs=dict( - max_position_embeddings=512, - hidden_size=512, - ) - ): - super(HFGPTTransformerDecoderNeck, self).__init__() - self.model_name = model_name - if from_pretrained: - self.gpt_model = GPT2Model.from_pretrained(model_name) - else: - config = GPT2Config.from_pretrained(model_name) - config.update(update_kwargs) - self.gpt_model = GPT2Model(config=config) - # self.wte = nn.Embedding(0, 512) - - def forward(self, *args, **kwargs): - out_puts = self.gpt_model(*args, **kwargs) - return out_puts diff --git a/spaces/Lajonbot/Chatbot-Share/model/bart.py b/spaces/Lajonbot/Chatbot-Share/model/bart.py deleted file mode 100644 index 49b39863303bff7d25cde004cd8f2cc019847329..0000000000000000000000000000000000000000 --- a/spaces/Lajonbot/Chatbot-Share/model/bart.py +++ /dev/null @@ -1,151 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -import numpy as np -from .modules import AudioEncoder -from transformers import BartForConditionalGeneration, BartTokenizer, BartConfig - -class BartCaptionModel(nn.Module): - def __init__(self, n_mels=128, num_of_conv=6, sr=16000, duration=10, max_length=128, label_smoothing=0.1, bart_type="facebook/bart-base", audio_dim=768): - super(BartCaptionModel, self).__init__() - # non-finetunning case - bart_config = BartConfig.from_pretrained(bart_type) - self.tokenizer = BartTokenizer.from_pretrained(bart_type) - self.bart = BartForConditionalGeneration(bart_config) - - self.n_sample = sr * duration - self.hop_length = int(0.01 * sr) # hard coding hop_size - self.n_frames = int(self.n_sample // self.hop_length) - self.num_of_stride_conv = num_of_conv - 1 - self.n_ctx = int(self.n_frames // 2**self.num_of_stride_conv) + 1 - self.audio_encoder = AudioEncoder( - n_mels = n_mels, # hard coding n_mel - n_ctx = self.n_ctx, - audio_dim = audio_dim, - text_dim = self.bart.config.hidden_size, - num_of_stride_conv = self.num_of_stride_conv - ) - - self.max_length = max_length - self.loss_fct = nn.CrossEntropyLoss(label_smoothing= label_smoothing, ignore_index=-100) - - @property - def device(self): - return list(self.parameters())[0].device - - def shift_tokens_right(self, input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): - """ - Shift input ids one token to the right.ls - """ - shifted_input_ids = input_ids.new_zeros(input_ids.shape) - shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() - shifted_input_ids[:, 0] = decoder_start_token_id - - if pad_token_id is None: - raise ValueError("self.model.config.pad_token_id has to be defined.") - # replace possible -100 values in labels by `pad_token_id` - shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) - return shifted_input_ids - - def forward_encoder(self, audio): - audio_embs = self.audio_encoder(audio) - encoder_outputs = self.bart.model.encoder( - input_ids=None, - inputs_embeds=audio_embs, - return_dict=True - )["last_hidden_state"] - return encoder_outputs, audio_embs - - def forward_decoder(self, text, encoder_outputs): - text = self.tokenizer(text, - padding='longest', - truncation=True, - max_length=self.max_length, - return_tensors="pt") - input_ids = text["input_ids"].to(self.device) - attention_mask = text["attention_mask"].to(self.device) - - decoder_targets = input_ids.masked_fill( - input_ids == self.tokenizer.pad_token_id, -100 - ) - - decoder_input_ids = self.shift_tokens_right( - decoder_targets, self.bart.config.pad_token_id, self.bart.config.decoder_start_token_id - ) - - decoder_outputs = self.bart( - input_ids=None, - attention_mask=None, - decoder_input_ids=decoder_input_ids, - decoder_attention_mask=attention_mask, - inputs_embeds=None, - labels=None, - encoder_outputs=(encoder_outputs,), - return_dict=True - ) - lm_logits = decoder_outputs["logits"] - loss = self.loss_fct(lm_logits.view(-1, self.tokenizer.vocab_size), decoder_targets.view(-1)) - return loss - - def forward(self, audio, text): - encoder_outputs, _ = self.forward_encoder(audio) - loss = self.forward_decoder(text, encoder_outputs) - return loss - - def generate(self, - samples, - use_nucleus_sampling=False, - num_beams=5, - max_length=128, - min_length=2, - top_p=0.9, - repetition_penalty=1.0, - ): - - # self.bart.force_bos_token_to_be_generated = True - audio_embs = self.audio_encoder(samples) - encoder_outputs = self.bart.model.encoder( - input_ids=None, - attention_mask=None, - head_mask=None, - inputs_embeds=audio_embs, - output_attentions=None, - output_hidden_states=None, - return_dict=True) - - input_ids = torch.zeros((encoder_outputs['last_hidden_state'].size(0), 1)).long().to(self.device) - input_ids[:, 0] = self.bart.config.decoder_start_token_id - decoder_attention_mask = torch.ones((encoder_outputs['last_hidden_state'].size(0), 1)).long().to(self.device) - if use_nucleus_sampling: - outputs = self.bart.generate( - input_ids=None, - attention_mask=None, - decoder_input_ids=input_ids, - decoder_attention_mask=decoder_attention_mask, - encoder_outputs=encoder_outputs, - max_length=max_length, - min_length=min_length, - do_sample=True, - top_p=top_p, - num_return_sequences=1, - repetition_penalty=1.1) - else: - outputs = self.bart.generate(input_ids=None, - attention_mask=None, - decoder_input_ids=input_ids, - decoder_attention_mask=decoder_attention_mask, - encoder_outputs=encoder_outputs, - head_mask=None, - decoder_head_mask=None, - inputs_embeds=None, - decoder_inputs_embeds=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - max_length=max_length, - min_length=min_length, - num_beams=num_beams, - repetition_penalty=repetition_penalty) - - captions = self.tokenizer.batch_decode(outputs, skip_special_tokens=True) - return captions diff --git a/spaces/Lamai/LAMAIGPT/autogpt/prompt.py b/spaces/Lamai/LAMAIGPT/autogpt/prompt.py deleted file mode 100644 index 03c132acdf26d08deeee119e41a561f430957806..0000000000000000000000000000000000000000 --- a/spaces/Lamai/LAMAIGPT/autogpt/prompt.py +++ /dev/null @@ -1,204 +0,0 @@ -from colorama import Fore - -from autogpt.config import Config -from autogpt.config.ai_config import AIConfig -from autogpt.config.config import Config -from autogpt.logs import logger -from autogpt.promptgenerator import PromptGenerator -from autogpt.setup import prompt_user -from autogpt.utils import clean_input - -CFG = Config() - - -def get_prompt() -> str: - """ - This function generates a prompt string that includes various constraints, - commands, resources, and performance evaluations. - - Returns: - str: The generated prompt string. - """ - - # Initialize the Config object - cfg = Config() - - # Initialize the PromptGenerator object - prompt_generator = PromptGenerator() - - # Add constraints to the PromptGenerator object - prompt_generator.add_constraint( - "~4000 word limit for short term memory. Your short term memory is short, so" - " immediately save important information to files." - ) - prompt_generator.add_constraint( - "If you are unsure how you previously did something or want to recall past" - " events, thinking about similar events will help you remember." - ) - prompt_generator.add_constraint("No user assistance") - prompt_generator.add_constraint( - 'Exclusively use the commands listed in double quotes e.g. "command name"' - ) - prompt_generator.add_constraint( - "Use subprocesses for commands that will not terminate within a few minutes" - ) - - # Define the command list - commands = [ - ("Google Search", "google", {"input": ""}), - ( - "Browse Website", - "browse_website", - {"url": "", "question": ""}, - ), - ( - "Start GPT Agent", - "start_agent", - {"name": "", "task": "", "prompt": ""}, - ), - ( - "Message GPT Agent", - "message_agent", - {"key": "", "message": ""}, - ), - ("List GPT Agents", "list_agents", {}), - ("Delete GPT Agent", "delete_agent", {"key": ""}), - ( - "Clone Repository", - "clone_repository", - {"repository_url": "", "clone_path": ""}, - ), - ("Write to file", "write_to_file", {"file": "", "text": ""}), - ("Read file", "read_file", {"file": ""}), - ("Append to file", "append_to_file", {"file": "", "text": ""}), - ("Delete file", "delete_file", {"file": ""}), - ("Search Files", "search_files", {"directory": ""}), - ("Analyze Code", "analyze_code", {"code": ""}), - ( - "Get Improved Code", - "improve_code", - {"suggestions": "", "code": ""}, - ), - ( - "Write Tests", - "write_tests", - {"code": "", "focus": ""}, - ), - ("Execute Python File", "execute_python_file", {"file": ""}), - ("Task Complete (Shutdown)", "task_complete", {"reason": ""}), - ("Generate Image", "generate_image", {"prompt": ""}), - ("Send Tweet", "send_tweet", {"text": ""}), - ] - - # Only add the audio to text command if the model is specified - if cfg.huggingface_audio_to_text_model: - commands.append( - ("Convert Audio to text", "read_audio_from_file", {"file": ""}), - ) - - # Only add shell command to the prompt if the AI is allowed to execute it - if cfg.execute_local_commands: - commands.append( - ( - "Execute Shell Command, non-interactive commands only", - "execute_shell", - {"command_line": ""}, - ), - ) - commands.append( - ( - "Execute Shell Command Popen, non-interactive commands only", - "execute_shell_popen", - {"command_line": ""}, - ), - ) - - # Only add the download file command if the AI is allowed to execute it - if cfg.allow_downloads: - commands.append( - ( - "Downloads a file from the internet, and stores it locally", - "download_file", - {"url": "", "file": ""}, - ), - ) - - # Add these command last. - commands.append( - ("Do Nothing", "do_nothing", {}), - ) - commands.append( - ("Task Complete (Shutdown)", "task_complete", {"reason": ""}), - ) - - # Add commands to the PromptGenerator object - for command_label, command_name, args in commands: - prompt_generator.add_command(command_label, command_name, args) - - # Add resources to the PromptGenerator object - prompt_generator.add_resource( - "Internet access for searches and information gathering." - ) - prompt_generator.add_resource("Long Term memory management.") - prompt_generator.add_resource( - "GPT-3.5 powered Agents for delegation of simple tasks." - ) - prompt_generator.add_resource("File output.") - - # Add performance evaluations to the PromptGenerator object - prompt_generator.add_performance_evaluation( - "Continuously review and analyze your actions to ensure you are performing to" - " the best of your abilities." - ) - prompt_generator.add_performance_evaluation( - "Constructively self-criticize your big-picture behavior constantly." - ) - prompt_generator.add_performance_evaluation( - "Reflect on past decisions and strategies to refine your approach." - ) - prompt_generator.add_performance_evaluation( - "Every command has a cost, so be smart and efficient. Aim to complete tasks in" - " the least number of steps." - ) - - # Generate the prompt string - return prompt_generator.generate_prompt_string() - - -def construct_prompt() -> str: - """Construct the prompt for the AI to respond to - - Returns: - str: The prompt string - """ - config = AIConfig.load(CFG.ai_settings_file) - if CFG.skip_reprompt and config.ai_name: - logger.typewriter_log("Name :", Fore.GREEN, config.ai_name) - logger.typewriter_log("Role :", Fore.GREEN, config.ai_role) - logger.typewriter_log("Goals:", Fore.GREEN, f"{config.ai_goals}") - elif config.ai_name: - logger.typewriter_log( - "Welcome back! ", - Fore.GREEN, - f"Would you like me to return to being {config.ai_name}?", - speak_text=True, - ) - should_continue = clean_input( - f"""Continue with the last settings? -Name: {config.ai_name} -Role: {config.ai_role} -Goals: {config.ai_goals} -Continue (y/n): """ - ) - if should_continue.lower() == "n": - config = AIConfig() - - if not config.ai_name: - config = prompt_user() - config.save(CFG.ai_settings_file) - - # Get rid of this global: - global ai_name - ai_name = config.ai_name - - return config.construct_full_prompt() diff --git a/spaces/LinkSoul/Chinese-LLaVa/static/css/fontawesome.all.min.css b/spaces/LinkSoul/Chinese-LLaVa/static/css/fontawesome.all.min.css deleted file mode 100644 index 656a50745f7224b3eca827869677851c705b26c9..0000000000000000000000000000000000000000 --- a/spaces/LinkSoul/Chinese-LLaVa/static/css/fontawesome.all.min.css +++ /dev/null @@ -1,5 +0,0 @@ -/*! - * Font Awesome Free 5.15.1 by @fontawesome - https://fontawesome.com - * License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) - */ -.fa,.fab,.fad,.fal,.far,.fas{-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;display:inline-block;font-style:normal;font-variant:normal;text-rendering:auto;line-height:1}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-.0667em}.fa-xs{font-size:.75em}.fa-sm{font-size:.875em}.fa-1x{font-size:1em}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-6x{font-size:6em}.fa-7x{font-size:7em}.fa-8x{font-size:8em}.fa-9x{font-size:9em}.fa-10x{font-size:10em}.fa-fw{text-align:center;width:1.25em}.fa-ul{list-style-type:none;margin-left:2.5em;padding-left:0}.fa-ul>li{position:relative}.fa-li{left:-2em;position:absolute;text-align:center;width:2em;line-height:inherit}.fa-border{border:.08em solid #eee;border-radius:.1em;padding:.2em .25em .15em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left,.fab.fa-pull-left,.fal.fa-pull-left,.far.fa-pull-left,.fas.fa-pull-left{margin-right:.3em}.fa.fa-pull-right,.fab.fa-pull-right,.fal.fa-pull-right,.far.fa-pull-right,.fas.fa-pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-webkit-transform:scaleY(-1);transform:scaleY(-1)}.fa-flip-both,.fa-flip-horizontal.fa-flip-vertical,.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)"}.fa-flip-both,.fa-flip-horizontal.fa-flip-vertical{-webkit-transform:scale(-1);transform:scale(-1)}:root .fa-flip-both,:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{-webkit-filter:none;filter:none}.fa-stack{display:inline-block;height:2em;line-height:2em;position:relative;vertical-align:middle;width:2.5em}.fa-stack-1x,.fa-stack-2x{left:0;position:absolute;text-align:center;width:100%}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-500px:before{content:"\f26e"}.fa-accessible-icon:before{content:"\f368"}.fa-accusoft:before{content:"\f369"}.fa-acquisitions-incorporated:before{content:"\f6af"}.fa-ad:before{content:"\f641"}.fa-address-book:before{content:"\f2b9"}.fa-address-card:before{content:"\f2bb"}.fa-adjust:before{content:"\f042"}.fa-adn:before{content:"\f170"}.fa-adversal:before{content:"\f36a"}.fa-affiliatetheme:before{content:"\f36b"}.fa-air-freshener:before{content:"\f5d0"}.fa-airbnb:before{content:"\f834"}.fa-algolia:before{content:"\f36c"}.fa-align-center:before{content:"\f037"}.fa-align-justify:before{content:"\f039"}.fa-align-left:before{content:"\f036"}.fa-align-right:before{content:"\f038"}.fa-alipay:before{content:"\f642"}.fa-allergies:before{content:"\f461"}.fa-amazon:before{content:"\f270"}.fa-amazon-pay:before{content:"\f42c"}.fa-ambulance:before{content:"\f0f9"}.fa-american-sign-language-interpreting:before{content:"\f2a3"}.fa-amilia:before{content:"\f36d"}.fa-anchor:before{content:"\f13d"}.fa-android:before{content:"\f17b"}.fa-angellist:before{content:"\f209"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-down:before{content:"\f107"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angry:before{content:"\f556"}.fa-angrycreative:before{content:"\f36e"}.fa-angular:before{content:"\f420"}.fa-ankh:before{content:"\f644"}.fa-app-store:before{content:"\f36f"}.fa-app-store-ios:before{content:"\f370"}.fa-apper:before{content:"\f371"}.fa-apple:before{content:"\f179"}.fa-apple-alt:before{content:"\f5d1"}.fa-apple-pay:before{content:"\f415"}.fa-archive:before{content:"\f187"}.fa-archway:before{content:"\f557"}.fa-arrow-alt-circle-down:before{content:"\f358"}.fa-arrow-alt-circle-left:before{content:"\f359"}.fa-arrow-alt-circle-right:before{content:"\f35a"}.fa-arrow-alt-circle-up:before{content:"\f35b"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-down:before{content:"\f063"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrows-alt:before{content:"\f0b2"}.fa-arrows-alt-h:before{content:"\f337"}.fa-arrows-alt-v:before{content:"\f338"}.fa-artstation:before{content:"\f77a"}.fa-assistive-listening-systems:before{content:"\f2a2"}.fa-asterisk:before{content:"\f069"}.fa-asymmetrik:before{content:"\f372"}.fa-at:before{content:"\f1fa"}.fa-atlas:before{content:"\f558"}.fa-atlassian:before{content:"\f77b"}.fa-atom:before{content:"\f5d2"}.fa-audible:before{content:"\f373"}.fa-audio-description:before{content:"\f29e"}.fa-autoprefixer:before{content:"\f41c"}.fa-avianex:before{content:"\f374"}.fa-aviato:before{content:"\f421"}.fa-award:before{content:"\f559"}.fa-aws:before{content:"\f375"}.fa-baby:before{content:"\f77c"}.fa-baby-carriage:before{content:"\f77d"}.fa-backspace:before{content:"\f55a"}.fa-backward:before{content:"\f04a"}.fa-bacon:before{content:"\f7e5"}.fa-bacteria:before{content:"\e059"}.fa-bacterium:before{content:"\e05a"}.fa-bahai:before{content:"\f666"}.fa-balance-scale:before{content:"\f24e"}.fa-balance-scale-left:before{content:"\f515"}.fa-balance-scale-right:before{content:"\f516"}.fa-ban:before{content:"\f05e"}.fa-band-aid:before{content:"\f462"}.fa-bandcamp:before{content:"\f2d5"}.fa-barcode:before{content:"\f02a"}.fa-bars:before{content:"\f0c9"}.fa-baseball-ball:before{content:"\f433"}.fa-basketball-ball:before{content:"\f434"}.fa-bath:before{content:"\f2cd"}.fa-battery-empty:before{content:"\f244"}.fa-battery-full:before{content:"\f240"}.fa-battery-half:before{content:"\f242"}.fa-battery-quarter:before{content:"\f243"}.fa-battery-three-quarters:before{content:"\f241"}.fa-battle-net:before{content:"\f835"}.fa-bed:before{content:"\f236"}.fa-beer:before{content:"\f0fc"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-bell:before{content:"\f0f3"}.fa-bell-slash:before{content:"\f1f6"}.fa-bezier-curve:before{content:"\f55b"}.fa-bible:before{content:"\f647"}.fa-bicycle:before{content:"\f206"}.fa-biking:before{content:"\f84a"}.fa-bimobject:before{content:"\f378"}.fa-binoculars:before{content:"\f1e5"}.fa-biohazard:before{content:"\f780"}.fa-birthday-cake:before{content:"\f1fd"}.fa-bitbucket:before{content:"\f171"}.fa-bitcoin:before{content:"\f379"}.fa-bity:before{content:"\f37a"}.fa-black-tie:before{content:"\f27e"}.fa-blackberry:before{content:"\f37b"}.fa-blender:before{content:"\f517"}.fa-blender-phone:before{content:"\f6b6"}.fa-blind:before{content:"\f29d"}.fa-blog:before{content:"\f781"}.fa-blogger:before{content:"\f37c"}.fa-blogger-b:before{content:"\f37d"}.fa-bluetooth:before{content:"\f293"}.fa-bluetooth-b:before{content:"\f294"}.fa-bold:before{content:"\f032"}.fa-bolt:before{content:"\f0e7"}.fa-bomb:before{content:"\f1e2"}.fa-bone:before{content:"\f5d7"}.fa-bong:before{content:"\f55c"}.fa-book:before{content:"\f02d"}.fa-book-dead:before{content:"\f6b7"}.fa-book-medical:before{content:"\f7e6"}.fa-book-open:before{content:"\f518"}.fa-book-reader:before{content:"\f5da"}.fa-bookmark:before{content:"\f02e"}.fa-bootstrap:before{content:"\f836"}.fa-border-all:before{content:"\f84c"}.fa-border-none:before{content:"\f850"}.fa-border-style:before{content:"\f853"}.fa-bowling-ball:before{content:"\f436"}.fa-box:before{content:"\f466"}.fa-box-open:before{content:"\f49e"}.fa-box-tissue:before{content:"\e05b"}.fa-boxes:before{content:"\f468"}.fa-braille:before{content:"\f2a1"}.fa-brain:before{content:"\f5dc"}.fa-bread-slice:before{content:"\f7ec"}.fa-briefcase:before{content:"\f0b1"}.fa-briefcase-medical:before{content:"\f469"}.fa-broadcast-tower:before{content:"\f519"}.fa-broom:before{content:"\f51a"}.fa-brush:before{content:"\f55d"}.fa-btc:before{content:"\f15a"}.fa-buffer:before{content:"\f837"}.fa-bug:before{content:"\f188"}.fa-building:before{content:"\f1ad"}.fa-bullhorn:before{content:"\f0a1"}.fa-bullseye:before{content:"\f140"}.fa-burn:before{content:"\f46a"}.fa-buromobelexperte:before{content:"\f37f"}.fa-bus:before{content:"\f207"}.fa-bus-alt:before{content:"\f55e"}.fa-business-time:before{content:"\f64a"}.fa-buy-n-large:before{content:"\f8a6"}.fa-buysellads:before{content:"\f20d"}.fa-calculator:before{content:"\f1ec"}.fa-calendar:before{content:"\f133"}.fa-calendar-alt:before{content:"\f073"}.fa-calendar-check:before{content:"\f274"}.fa-calendar-day:before{content:"\f783"}.fa-calendar-minus:before{content:"\f272"}.fa-calendar-plus:before{content:"\f271"}.fa-calendar-times:before{content:"\f273"}.fa-calendar-week:before{content:"\f784"}.fa-camera:before{content:"\f030"}.fa-camera-retro:before{content:"\f083"}.fa-campground:before{content:"\f6bb"}.fa-canadian-maple-leaf:before{content:"\f785"}.fa-candy-cane:before{content:"\f786"}.fa-cannabis:before{content:"\f55f"}.fa-capsules:before{content:"\f46b"}.fa-car:before{content:"\f1b9"}.fa-car-alt:before{content:"\f5de"}.fa-car-battery:before{content:"\f5df"}.fa-car-crash:before{content:"\f5e1"}.fa-car-side:before{content:"\f5e4"}.fa-caravan:before{content:"\f8ff"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-caret-square-down:before{content:"\f150"}.fa-caret-square-left:before{content:"\f191"}.fa-caret-square-right:before{content:"\f152"}.fa-caret-square-up:before{content:"\f151"}.fa-caret-up:before{content:"\f0d8"}.fa-carrot:before{content:"\f787"}.fa-cart-arrow-down:before{content:"\f218"}.fa-cart-plus:before{content:"\f217"}.fa-cash-register:before{content:"\f788"}.fa-cat:before{content:"\f6be"}.fa-cc-amazon-pay:before{content:"\f42d"}.fa-cc-amex:before{content:"\f1f3"}.fa-cc-apple-pay:before{content:"\f416"}.fa-cc-diners-club:before{content:"\f24c"}.fa-cc-discover:before{content:"\f1f2"}.fa-cc-jcb:before{content:"\f24b"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-cc-paypal:before{content:"\f1f4"}.fa-cc-stripe:before{content:"\f1f5"}.fa-cc-visa:before{content:"\f1f0"}.fa-centercode:before{content:"\f380"}.fa-centos:before{content:"\f789"}.fa-certificate:before{content:"\f0a3"}.fa-chair:before{content:"\f6c0"}.fa-chalkboard:before{content:"\f51b"}.fa-chalkboard-teacher:before{content:"\f51c"}.fa-charging-station:before{content:"\f5e7"}.fa-chart-area:before{content:"\f1fe"}.fa-chart-bar:before{content:"\f080"}.fa-chart-line:before{content:"\f201"}.fa-chart-pie:before{content:"\f200"}.fa-check:before{content:"\f00c"}.fa-check-circle:before{content:"\f058"}.fa-check-double:before{content:"\f560"}.fa-check-square:before{content:"\f14a"}.fa-cheese:before{content:"\f7ef"}.fa-chess:before{content:"\f439"}.fa-chess-bishop:before{content:"\f43a"}.fa-chess-board:before{content:"\f43c"}.fa-chess-king:before{content:"\f43f"}.fa-chess-knight:before{content:"\f441"}.fa-chess-pawn:before{content:"\f443"}.fa-chess-queen:before{content:"\f445"}.fa-chess-rook:before{content:"\f447"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-down:before{content:"\f078"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-chevron-up:before{content:"\f077"}.fa-child:before{content:"\f1ae"}.fa-chrome:before{content:"\f268"}.fa-chromecast:before{content:"\f838"}.fa-church:before{content:"\f51d"}.fa-circle:before{content:"\f111"}.fa-circle-notch:before{content:"\f1ce"}.fa-city:before{content:"\f64f"}.fa-clinic-medical:before{content:"\f7f2"}.fa-clipboard:before{content:"\f328"}.fa-clipboard-check:before{content:"\f46c"}.fa-clipboard-list:before{content:"\f46d"}.fa-clock:before{content:"\f017"}.fa-clone:before{content:"\f24d"}.fa-closed-captioning:before{content:"\f20a"}.fa-cloud:before{content:"\f0c2"}.fa-cloud-download-alt:before{content:"\f381"}.fa-cloud-meatball:before{content:"\f73b"}.fa-cloud-moon:before{content:"\f6c3"}.fa-cloud-moon-rain:before{content:"\f73c"}.fa-cloud-rain:before{content:"\f73d"}.fa-cloud-showers-heavy:before{content:"\f740"}.fa-cloud-sun:before{content:"\f6c4"}.fa-cloud-sun-rain:before{content:"\f743"}.fa-cloud-upload-alt:before{content:"\f382"}.fa-cloudflare:before{content:"\e07d"}.fa-cloudscale:before{content:"\f383"}.fa-cloudsmith:before{content:"\f384"}.fa-cloudversify:before{content:"\f385"}.fa-cocktail:before{content:"\f561"}.fa-code:before{content:"\f121"}.fa-code-branch:before{content:"\f126"}.fa-codepen:before{content:"\f1cb"}.fa-codiepie:before{content:"\f284"}.fa-coffee:before{content:"\f0f4"}.fa-cog:before{content:"\f013"}.fa-cogs:before{content:"\f085"}.fa-coins:before{content:"\f51e"}.fa-columns:before{content:"\f0db"}.fa-comment:before{content:"\f075"}.fa-comment-alt:before{content:"\f27a"}.fa-comment-dollar:before{content:"\f651"}.fa-comment-dots:before{content:"\f4ad"}.fa-comment-medical:before{content:"\f7f5"}.fa-comment-slash:before{content:"\f4b3"}.fa-comments:before{content:"\f086"}.fa-comments-dollar:before{content:"\f653"}.fa-compact-disc:before{content:"\f51f"}.fa-compass:before{content:"\f14e"}.fa-compress:before{content:"\f066"}.fa-compress-alt:before{content:"\f422"}.fa-compress-arrows-alt:before{content:"\f78c"}.fa-concierge-bell:before{content:"\f562"}.fa-confluence:before{content:"\f78d"}.fa-connectdevelop:before{content:"\f20e"}.fa-contao:before{content:"\f26d"}.fa-cookie:before{content:"\f563"}.fa-cookie-bite:before{content:"\f564"}.fa-copy:before{content:"\f0c5"}.fa-copyright:before{content:"\f1f9"}.fa-cotton-bureau:before{content:"\f89e"}.fa-couch:before{content:"\f4b8"}.fa-cpanel:before{content:"\f388"}.fa-creative-commons:before{content:"\f25e"}.fa-creative-commons-by:before{content:"\f4e7"}.fa-creative-commons-nc:before{content:"\f4e8"}.fa-creative-commons-nc-eu:before{content:"\f4e9"}.fa-creative-commons-nc-jp:before{content:"\f4ea"}.fa-creative-commons-nd:before{content:"\f4eb"}.fa-creative-commons-pd:before{content:"\f4ec"}.fa-creative-commons-pd-alt:before{content:"\f4ed"}.fa-creative-commons-remix:before{content:"\f4ee"}.fa-creative-commons-sa:before{content:"\f4ef"}.fa-creative-commons-sampling:before{content:"\f4f0"}.fa-creative-commons-sampling-plus:before{content:"\f4f1"}.fa-creative-commons-share:before{content:"\f4f2"}.fa-creative-commons-zero:before{content:"\f4f3"}.fa-credit-card:before{content:"\f09d"}.fa-critical-role:before{content:"\f6c9"}.fa-crop:before{content:"\f125"}.fa-crop-alt:before{content:"\f565"}.fa-cross:before{content:"\f654"}.fa-crosshairs:before{content:"\f05b"}.fa-crow:before{content:"\f520"}.fa-crown:before{content:"\f521"}.fa-crutch:before{content:"\f7f7"}.fa-css3:before{content:"\f13c"}.fa-css3-alt:before{content:"\f38b"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-cut:before{content:"\f0c4"}.fa-cuttlefish:before{content:"\f38c"}.fa-d-and-d:before{content:"\f38d"}.fa-d-and-d-beyond:before{content:"\f6ca"}.fa-dailymotion:before{content:"\e052"}.fa-dashcube:before{content:"\f210"}.fa-database:before{content:"\f1c0"}.fa-deaf:before{content:"\f2a4"}.fa-deezer:before{content:"\e077"}.fa-delicious:before{content:"\f1a5"}.fa-democrat:before{content:"\f747"}.fa-deploydog:before{content:"\f38e"}.fa-deskpro:before{content:"\f38f"}.fa-desktop:before{content:"\f108"}.fa-dev:before{content:"\f6cc"}.fa-deviantart:before{content:"\f1bd"}.fa-dharmachakra:before{content:"\f655"}.fa-dhl:before{content:"\f790"}.fa-diagnoses:before{content:"\f470"}.fa-diaspora:before{content:"\f791"}.fa-dice:before{content:"\f522"}.fa-dice-d20:before{content:"\f6cf"}.fa-dice-d6:before{content:"\f6d1"}.fa-dice-five:before{content:"\f523"}.fa-dice-four:before{content:"\f524"}.fa-dice-one:before{content:"\f525"}.fa-dice-six:before{content:"\f526"}.fa-dice-three:before{content:"\f527"}.fa-dice-two:before{content:"\f528"}.fa-digg:before{content:"\f1a6"}.fa-digital-ocean:before{content:"\f391"}.fa-digital-tachograph:before{content:"\f566"}.fa-directions:before{content:"\f5eb"}.fa-discord:before{content:"\f392"}.fa-discourse:before{content:"\f393"}.fa-disease:before{content:"\f7fa"}.fa-divide:before{content:"\f529"}.fa-dizzy:before{content:"\f567"}.fa-dna:before{content:"\f471"}.fa-dochub:before{content:"\f394"}.fa-docker:before{content:"\f395"}.fa-dog:before{content:"\f6d3"}.fa-dollar-sign:before{content:"\f155"}.fa-dolly:before{content:"\f472"}.fa-dolly-flatbed:before{content:"\f474"}.fa-donate:before{content:"\f4b9"}.fa-door-closed:before{content:"\f52a"}.fa-door-open:before{content:"\f52b"}.fa-dot-circle:before{content:"\f192"}.fa-dove:before{content:"\f4ba"}.fa-download:before{content:"\f019"}.fa-draft2digital:before{content:"\f396"}.fa-drafting-compass:before{content:"\f568"}.fa-dragon:before{content:"\f6d5"}.fa-draw-polygon:before{content:"\f5ee"}.fa-dribbble:before{content:"\f17d"}.fa-dribbble-square:before{content:"\f397"}.fa-dropbox:before{content:"\f16b"}.fa-drum:before{content:"\f569"}.fa-drum-steelpan:before{content:"\f56a"}.fa-drumstick-bite:before{content:"\f6d7"}.fa-drupal:before{content:"\f1a9"}.fa-dumbbell:before{content:"\f44b"}.fa-dumpster:before{content:"\f793"}.fa-dumpster-fire:before{content:"\f794"}.fa-dungeon:before{content:"\f6d9"}.fa-dyalog:before{content:"\f399"}.fa-earlybirds:before{content:"\f39a"}.fa-ebay:before{content:"\f4f4"}.fa-edge:before{content:"\f282"}.fa-edge-legacy:before{content:"\e078"}.fa-edit:before{content:"\f044"}.fa-egg:before{content:"\f7fb"}.fa-eject:before{content:"\f052"}.fa-elementor:before{content:"\f430"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-ello:before{content:"\f5f1"}.fa-ember:before{content:"\f423"}.fa-empire:before{content:"\f1d1"}.fa-envelope:before{content:"\f0e0"}.fa-envelope-open:before{content:"\f2b6"}.fa-envelope-open-text:before{content:"\f658"}.fa-envelope-square:before{content:"\f199"}.fa-envira:before{content:"\f299"}.fa-equals:before{content:"\f52c"}.fa-eraser:before{content:"\f12d"}.fa-erlang:before{content:"\f39d"}.fa-ethereum:before{content:"\f42e"}.fa-ethernet:before{content:"\f796"}.fa-etsy:before{content:"\f2d7"}.fa-euro-sign:before{content:"\f153"}.fa-evernote:before{content:"\f839"}.fa-exchange-alt:before{content:"\f362"}.fa-exclamation:before{content:"\f12a"}.fa-exclamation-circle:before{content:"\f06a"}.fa-exclamation-triangle:before{content:"\f071"}.fa-expand:before{content:"\f065"}.fa-expand-alt:before{content:"\f424"}.fa-expand-arrows-alt:before{content:"\f31e"}.fa-expeditedssl:before{content:"\f23e"}.fa-external-link-alt:before{content:"\f35d"}.fa-external-link-square-alt:before{content:"\f360"}.fa-eye:before{content:"\f06e"}.fa-eye-dropper:before{content:"\f1fb"}.fa-eye-slash:before{content:"\f070"}.fa-facebook:before{content:"\f09a"}.fa-facebook-f:before{content:"\f39e"}.fa-facebook-messenger:before{content:"\f39f"}.fa-facebook-square:before{content:"\f082"}.fa-fan:before{content:"\f863"}.fa-fantasy-flight-games:before{content:"\f6dc"}.fa-fast-backward:before{content:"\f049"}.fa-fast-forward:before{content:"\f050"}.fa-faucet:before{content:"\e005"}.fa-fax:before{content:"\f1ac"}.fa-feather:before{content:"\f52d"}.fa-feather-alt:before{content:"\f56b"}.fa-fedex:before{content:"\f797"}.fa-fedora:before{content:"\f798"}.fa-female:before{content:"\f182"}.fa-fighter-jet:before{content:"\f0fb"}.fa-figma:before{content:"\f799"}.fa-file:before{content:"\f15b"}.fa-file-alt:before{content:"\f15c"}.fa-file-archive:before{content:"\f1c6"}.fa-file-audio:before{content:"\f1c7"}.fa-file-code:before{content:"\f1c9"}.fa-file-contract:before{content:"\f56c"}.fa-file-csv:before{content:"\f6dd"}.fa-file-download:before{content:"\f56d"}.fa-file-excel:before{content:"\f1c3"}.fa-file-export:before{content:"\f56e"}.fa-file-image:before{content:"\f1c5"}.fa-file-import:before{content:"\f56f"}.fa-file-invoice:before{content:"\f570"}.fa-file-invoice-dollar:before{content:"\f571"}.fa-file-medical:before{content:"\f477"}.fa-file-medical-alt:before{content:"\f478"}.fa-file-pdf:before{content:"\f1c1"}.fa-file-powerpoint:before{content:"\f1c4"}.fa-file-prescription:before{content:"\f572"}.fa-file-signature:before{content:"\f573"}.fa-file-upload:before{content:"\f574"}.fa-file-video:before{content:"\f1c8"}.fa-file-word:before{content:"\f1c2"}.fa-fill:before{content:"\f575"}.fa-fill-drip:before{content:"\f576"}.fa-film:before{content:"\f008"}.fa-filter:before{content:"\f0b0"}.fa-fingerprint:before{content:"\f577"}.fa-fire:before{content:"\f06d"}.fa-fire-alt:before{content:"\f7e4"}.fa-fire-extinguisher:before{content:"\f134"}.fa-firefox:before{content:"\f269"}.fa-firefox-browser:before{content:"\e007"}.fa-first-aid:before{content:"\f479"}.fa-first-order:before{content:"\f2b0"}.fa-first-order-alt:before{content:"\f50a"}.fa-firstdraft:before{content:"\f3a1"}.fa-fish:before{content:"\f578"}.fa-fist-raised:before{content:"\f6de"}.fa-flag:before{content:"\f024"}.fa-flag-checkered:before{content:"\f11e"}.fa-flag-usa:before{content:"\f74d"}.fa-flask:before{content:"\f0c3"}.fa-flickr:before{content:"\f16e"}.fa-flipboard:before{content:"\f44d"}.fa-flushed:before{content:"\f579"}.fa-fly:before{content:"\f417"}.fa-folder:before{content:"\f07b"}.fa-folder-minus:before{content:"\f65d"}.fa-folder-open:before{content:"\f07c"}.fa-folder-plus:before{content:"\f65e"}.fa-font:before{content:"\f031"}.fa-font-awesome:before{content:"\f2b4"}.fa-font-awesome-alt:before{content:"\f35c"}.fa-font-awesome-flag:before{content:"\f425"}.fa-font-awesome-logo-full:before{content:"\f4e6"}.fa-fonticons:before{content:"\f280"}.fa-fonticons-fi:before{content:"\f3a2"}.fa-football-ball:before{content:"\f44e"}.fa-fort-awesome:before{content:"\f286"}.fa-fort-awesome-alt:before{content:"\f3a3"}.fa-forumbee:before{content:"\f211"}.fa-forward:before{content:"\f04e"}.fa-foursquare:before{content:"\f180"}.fa-free-code-camp:before{content:"\f2c5"}.fa-freebsd:before{content:"\f3a4"}.fa-frog:before{content:"\f52e"}.fa-frown:before{content:"\f119"}.fa-frown-open:before{content:"\f57a"}.fa-fulcrum:before{content:"\f50b"}.fa-funnel-dollar:before{content:"\f662"}.fa-futbol:before{content:"\f1e3"}.fa-galactic-republic:before{content:"\f50c"}.fa-galactic-senate:before{content:"\f50d"}.fa-gamepad:before{content:"\f11b"}.fa-gas-pump:before{content:"\f52f"}.fa-gavel:before{content:"\f0e3"}.fa-gem:before{content:"\f3a5"}.fa-genderless:before{content:"\f22d"}.fa-get-pocket:before{content:"\f265"}.fa-gg:before{content:"\f260"}.fa-gg-circle:before{content:"\f261"}.fa-ghost:before{content:"\f6e2"}.fa-gift:before{content:"\f06b"}.fa-gifts:before{content:"\f79c"}.fa-git:before{content:"\f1d3"}.fa-git-alt:before{content:"\f841"}.fa-git-square:before{content:"\f1d2"}.fa-github:before{content:"\f09b"}.fa-github-alt:before{content:"\f113"}.fa-github-square:before{content:"\f092"}.fa-gitkraken:before{content:"\f3a6"}.fa-gitlab:before{content:"\f296"}.fa-gitter:before{content:"\f426"}.fa-glass-cheers:before{content:"\f79f"}.fa-glass-martini:before{content:"\f000"}.fa-glass-martini-alt:before{content:"\f57b"}.fa-glass-whiskey:before{content:"\f7a0"}.fa-glasses:before{content:"\f530"}.fa-glide:before{content:"\f2a5"}.fa-glide-g:before{content:"\f2a6"}.fa-globe:before{content:"\f0ac"}.fa-globe-africa:before{content:"\f57c"}.fa-globe-americas:before{content:"\f57d"}.fa-globe-asia:before{content:"\f57e"}.fa-globe-europe:before{content:"\f7a2"}.fa-gofore:before{content:"\f3a7"}.fa-golf-ball:before{content:"\f450"}.fa-goodreads:before{content:"\f3a8"}.fa-goodreads-g:before{content:"\f3a9"}.fa-google:before{content:"\f1a0"}.fa-google-drive:before{content:"\f3aa"}.fa-google-pay:before{content:"\e079"}.fa-google-play:before{content:"\f3ab"}.fa-google-plus:before{content:"\f2b3"}.fa-google-plus-g:before{content:"\f0d5"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-wallet:before{content:"\f1ee"}.fa-gopuram:before{content:"\f664"}.fa-graduation-cap:before{content:"\f19d"}.fa-gratipay:before{content:"\f184"}.fa-grav:before{content:"\f2d6"}.fa-greater-than:before{content:"\f531"}.fa-greater-than-equal:before{content:"\f532"}.fa-grimace:before{content:"\f57f"}.fa-grin:before{content:"\f580"}.fa-grin-alt:before{content:"\f581"}.fa-grin-beam:before{content:"\f582"}.fa-grin-beam-sweat:before{content:"\f583"}.fa-grin-hearts:before{content:"\f584"}.fa-grin-squint:before{content:"\f585"}.fa-grin-squint-tears:before{content:"\f586"}.fa-grin-stars:before{content:"\f587"}.fa-grin-tears:before{content:"\f588"}.fa-grin-tongue:before{content:"\f589"}.fa-grin-tongue-squint:before{content:"\f58a"}.fa-grin-tongue-wink:before{content:"\f58b"}.fa-grin-wink:before{content:"\f58c"}.fa-grip-horizontal:before{content:"\f58d"}.fa-grip-lines:before{content:"\f7a4"}.fa-grip-lines-vertical:before{content:"\f7a5"}.fa-grip-vertical:before{content:"\f58e"}.fa-gripfire:before{content:"\f3ac"}.fa-grunt:before{content:"\f3ad"}.fa-guilded:before{content:"\e07e"}.fa-guitar:before{content:"\f7a6"}.fa-gulp:before{content:"\f3ae"}.fa-h-square:before{content:"\f0fd"}.fa-hacker-news:before{content:"\f1d4"}.fa-hacker-news-square:before{content:"\f3af"}.fa-hackerrank:before{content:"\f5f7"}.fa-hamburger:before{content:"\f805"}.fa-hammer:before{content:"\f6e3"}.fa-hamsa:before{content:"\f665"}.fa-hand-holding:before{content:"\f4bd"}.fa-hand-holding-heart:before{content:"\f4be"}.fa-hand-holding-medical:before{content:"\e05c"}.fa-hand-holding-usd:before{content:"\f4c0"}.fa-hand-holding-water:before{content:"\f4c1"}.fa-hand-lizard:before{content:"\f258"}.fa-hand-middle-finger:before{content:"\f806"}.fa-hand-paper:before{content:"\f256"}.fa-hand-peace:before{content:"\f25b"}.fa-hand-point-down:before{content:"\f0a7"}.fa-hand-point-left:before{content:"\f0a5"}.fa-hand-point-right:before{content:"\f0a4"}.fa-hand-point-up:before{content:"\f0a6"}.fa-hand-pointer:before{content:"\f25a"}.fa-hand-rock:before{content:"\f255"}.fa-hand-scissors:before{content:"\f257"}.fa-hand-sparkles:before{content:"\e05d"}.fa-hand-spock:before{content:"\f259"}.fa-hands:before{content:"\f4c2"}.fa-hands-helping:before{content:"\f4c4"}.fa-hands-wash:before{content:"\e05e"}.fa-handshake:before{content:"\f2b5"}.fa-handshake-alt-slash:before{content:"\e05f"}.fa-handshake-slash:before{content:"\e060"}.fa-hanukiah:before{content:"\f6e6"}.fa-hard-hat:before{content:"\f807"}.fa-hashtag:before{content:"\f292"}.fa-hat-cowboy:before{content:"\f8c0"}.fa-hat-cowboy-side:before{content:"\f8c1"}.fa-hat-wizard:before{content:"\f6e8"}.fa-hdd:before{content:"\f0a0"}.fa-head-side-cough:before{content:"\e061"}.fa-head-side-cough-slash:before{content:"\e062"}.fa-head-side-mask:before{content:"\e063"}.fa-head-side-virus:before{content:"\e064"}.fa-heading:before{content:"\f1dc"}.fa-headphones:before{content:"\f025"}.fa-headphones-alt:before{content:"\f58f"}.fa-headset:before{content:"\f590"}.fa-heart:before{content:"\f004"}.fa-heart-broken:before{content:"\f7a9"}.fa-heartbeat:before{content:"\f21e"}.fa-helicopter:before{content:"\f533"}.fa-highlighter:before{content:"\f591"}.fa-hiking:before{content:"\f6ec"}.fa-hippo:before{content:"\f6ed"}.fa-hips:before{content:"\f452"}.fa-hire-a-helper:before{content:"\f3b0"}.fa-history:before{content:"\f1da"}.fa-hive:before{content:"\e07f"}.fa-hockey-puck:before{content:"\f453"}.fa-holly-berry:before{content:"\f7aa"}.fa-home:before{content:"\f015"}.fa-hooli:before{content:"\f427"}.fa-hornbill:before{content:"\f592"}.fa-horse:before{content:"\f6f0"}.fa-horse-head:before{content:"\f7ab"}.fa-hospital:before{content:"\f0f8"}.fa-hospital-alt:before{content:"\f47d"}.fa-hospital-symbol:before{content:"\f47e"}.fa-hospital-user:before{content:"\f80d"}.fa-hot-tub:before{content:"\f593"}.fa-hotdog:before{content:"\f80f"}.fa-hotel:before{content:"\f594"}.fa-hotjar:before{content:"\f3b1"}.fa-hourglass:before{content:"\f254"}.fa-hourglass-end:before{content:"\f253"}.fa-hourglass-half:before{content:"\f252"}.fa-hourglass-start:before{content:"\f251"}.fa-house-damage:before{content:"\f6f1"}.fa-house-user:before{content:"\e065"}.fa-houzz:before{content:"\f27c"}.fa-hryvnia:before{content:"\f6f2"}.fa-html5:before{content:"\f13b"}.fa-hubspot:before{content:"\f3b2"}.fa-i-cursor:before{content:"\f246"}.fa-ice-cream:before{content:"\f810"}.fa-icicles:before{content:"\f7ad"}.fa-icons:before{content:"\f86d"}.fa-id-badge:before{content:"\f2c1"}.fa-id-card:before{content:"\f2c2"}.fa-id-card-alt:before{content:"\f47f"}.fa-ideal:before{content:"\e013"}.fa-igloo:before{content:"\f7ae"}.fa-image:before{content:"\f03e"}.fa-images:before{content:"\f302"}.fa-imdb:before{content:"\f2d8"}.fa-inbox:before{content:"\f01c"}.fa-indent:before{content:"\f03c"}.fa-industry:before{content:"\f275"}.fa-infinity:before{content:"\f534"}.fa-info:before{content:"\f129"}.fa-info-circle:before{content:"\f05a"}.fa-innosoft:before{content:"\e080"}.fa-instagram:before{content:"\f16d"}.fa-instagram-square:before{content:"\e055"}.fa-instalod:before{content:"\e081"}.fa-intercom:before{content:"\f7af"}.fa-internet-explorer:before{content:"\f26b"}.fa-invision:before{content:"\f7b0"}.fa-ioxhost:before{content:"\f208"}.fa-italic:before{content:"\f033"}.fa-itch-io:before{content:"\f83a"}.fa-itunes:before{content:"\f3b4"}.fa-itunes-note:before{content:"\f3b5"}.fa-java:before{content:"\f4e4"}.fa-jedi:before{content:"\f669"}.fa-jedi-order:before{content:"\f50e"}.fa-jenkins:before{content:"\f3b6"}.fa-jira:before{content:"\f7b1"}.fa-joget:before{content:"\f3b7"}.fa-joint:before{content:"\f595"}.fa-joomla:before{content:"\f1aa"}.fa-journal-whills:before{content:"\f66a"}.fa-js:before{content:"\f3b8"}.fa-js-square:before{content:"\f3b9"}.fa-jsfiddle:before{content:"\f1cc"}.fa-kaaba:before{content:"\f66b"}.fa-kaggle:before{content:"\f5fa"}.fa-key:before{content:"\f084"}.fa-keybase:before{content:"\f4f5"}.fa-keyboard:before{content:"\f11c"}.fa-keycdn:before{content:"\f3ba"}.fa-khanda:before{content:"\f66d"}.fa-kickstarter:before{content:"\f3bb"}.fa-kickstarter-k:before{content:"\f3bc"}.fa-kiss:before{content:"\f596"}.fa-kiss-beam:before{content:"\f597"}.fa-kiss-wink-heart:before{content:"\f598"}.fa-kiwi-bird:before{content:"\f535"}.fa-korvue:before{content:"\f42f"}.fa-landmark:before{content:"\f66f"}.fa-language:before{content:"\f1ab"}.fa-laptop:before{content:"\f109"}.fa-laptop-code:before{content:"\f5fc"}.fa-laptop-house:before{content:"\e066"}.fa-laptop-medical:before{content:"\f812"}.fa-laravel:before{content:"\f3bd"}.fa-lastfm:before{content:"\f202"}.fa-lastfm-square:before{content:"\f203"}.fa-laugh:before{content:"\f599"}.fa-laugh-beam:before{content:"\f59a"}.fa-laugh-squint:before{content:"\f59b"}.fa-laugh-wink:before{content:"\f59c"}.fa-layer-group:before{content:"\f5fd"}.fa-leaf:before{content:"\f06c"}.fa-leanpub:before{content:"\f212"}.fa-lemon:before{content:"\f094"}.fa-less:before{content:"\f41d"}.fa-less-than:before{content:"\f536"}.fa-less-than-equal:before{content:"\f537"}.fa-level-down-alt:before{content:"\f3be"}.fa-level-up-alt:before{content:"\f3bf"}.fa-life-ring:before{content:"\f1cd"}.fa-lightbulb:before{content:"\f0eb"}.fa-line:before{content:"\f3c0"}.fa-link:before{content:"\f0c1"}.fa-linkedin:before{content:"\f08c"}.fa-linkedin-in:before{content:"\f0e1"}.fa-linode:before{content:"\f2b8"}.fa-linux:before{content:"\f17c"}.fa-lira-sign:before{content:"\f195"}.fa-list:before{content:"\f03a"}.fa-list-alt:before{content:"\f022"}.fa-list-ol:before{content:"\f0cb"}.fa-list-ul:before{content:"\f0ca"}.fa-location-arrow:before{content:"\f124"}.fa-lock:before{content:"\f023"}.fa-lock-open:before{content:"\f3c1"}.fa-long-arrow-alt-down:before{content:"\f309"}.fa-long-arrow-alt-left:before{content:"\f30a"}.fa-long-arrow-alt-right:before{content:"\f30b"}.fa-long-arrow-alt-up:before{content:"\f30c"}.fa-low-vision:before{content:"\f2a8"}.fa-luggage-cart:before{content:"\f59d"}.fa-lungs:before{content:"\f604"}.fa-lungs-virus:before{content:"\e067"}.fa-lyft:before{content:"\f3c3"}.fa-magento:before{content:"\f3c4"}.fa-magic:before{content:"\f0d0"}.fa-magnet:before{content:"\f076"}.fa-mail-bulk:before{content:"\f674"}.fa-mailchimp:before{content:"\f59e"}.fa-male:before{content:"\f183"}.fa-mandalorian:before{content:"\f50f"}.fa-map:before{content:"\f279"}.fa-map-marked:before{content:"\f59f"}.fa-map-marked-alt:before{content:"\f5a0"}.fa-map-marker:before{content:"\f041"}.fa-map-marker-alt:before{content:"\f3c5"}.fa-map-pin:before{content:"\f276"}.fa-map-signs:before{content:"\f277"}.fa-markdown:before{content:"\f60f"}.fa-marker:before{content:"\f5a1"}.fa-mars:before{content:"\f222"}.fa-mars-double:before{content:"\f227"}.fa-mars-stroke:before{content:"\f229"}.fa-mars-stroke-h:before{content:"\f22b"}.fa-mars-stroke-v:before{content:"\f22a"}.fa-mask:before{content:"\f6fa"}.fa-mastodon:before{content:"\f4f6"}.fa-maxcdn:before{content:"\f136"}.fa-mdb:before{content:"\f8ca"}.fa-medal:before{content:"\f5a2"}.fa-medapps:before{content:"\f3c6"}.fa-medium:before{content:"\f23a"}.fa-medium-m:before{content:"\f3c7"}.fa-medkit:before{content:"\f0fa"}.fa-medrt:before{content:"\f3c8"}.fa-meetup:before{content:"\f2e0"}.fa-megaport:before{content:"\f5a3"}.fa-meh:before{content:"\f11a"}.fa-meh-blank:before{content:"\f5a4"}.fa-meh-rolling-eyes:before{content:"\f5a5"}.fa-memory:before{content:"\f538"}.fa-mendeley:before{content:"\f7b3"}.fa-menorah:before{content:"\f676"}.fa-mercury:before{content:"\f223"}.fa-meteor:before{content:"\f753"}.fa-microblog:before{content:"\e01a"}.fa-microchip:before{content:"\f2db"}.fa-microphone:before{content:"\f130"}.fa-microphone-alt:before{content:"\f3c9"}.fa-microphone-alt-slash:before{content:"\f539"}.fa-microphone-slash:before{content:"\f131"}.fa-microscope:before{content:"\f610"}.fa-microsoft:before{content:"\f3ca"}.fa-minus:before{content:"\f068"}.fa-minus-circle:before{content:"\f056"}.fa-minus-square:before{content:"\f146"}.fa-mitten:before{content:"\f7b5"}.fa-mix:before{content:"\f3cb"}.fa-mixcloud:before{content:"\f289"}.fa-mixer:before{content:"\e056"}.fa-mizuni:before{content:"\f3cc"}.fa-mobile:before{content:"\f10b"}.fa-mobile-alt:before{content:"\f3cd"}.fa-modx:before{content:"\f285"}.fa-monero:before{content:"\f3d0"}.fa-money-bill:before{content:"\f0d6"}.fa-money-bill-alt:before{content:"\f3d1"}.fa-money-bill-wave:before{content:"\f53a"}.fa-money-bill-wave-alt:before{content:"\f53b"}.fa-money-check:before{content:"\f53c"}.fa-money-check-alt:before{content:"\f53d"}.fa-monument:before{content:"\f5a6"}.fa-moon:before{content:"\f186"}.fa-mortar-pestle:before{content:"\f5a7"}.fa-mosque:before{content:"\f678"}.fa-motorcycle:before{content:"\f21c"}.fa-mountain:before{content:"\f6fc"}.fa-mouse:before{content:"\f8cc"}.fa-mouse-pointer:before{content:"\f245"}.fa-mug-hot:before{content:"\f7b6"}.fa-music:before{content:"\f001"}.fa-napster:before{content:"\f3d2"}.fa-neos:before{content:"\f612"}.fa-network-wired:before{content:"\f6ff"}.fa-neuter:before{content:"\f22c"}.fa-newspaper:before{content:"\f1ea"}.fa-nimblr:before{content:"\f5a8"}.fa-node:before{content:"\f419"}.fa-node-js:before{content:"\f3d3"}.fa-not-equal:before{content:"\f53e"}.fa-notes-medical:before{content:"\f481"}.fa-npm:before{content:"\f3d4"}.fa-ns8:before{content:"\f3d5"}.fa-nutritionix:before{content:"\f3d6"}.fa-object-group:before{content:"\f247"}.fa-object-ungroup:before{content:"\f248"}.fa-octopus-deploy:before{content:"\e082"}.fa-odnoklassniki:before{content:"\f263"}.fa-odnoklassniki-square:before{content:"\f264"}.fa-oil-can:before{content:"\f613"}.fa-old-republic:before{content:"\f510"}.fa-om:before{content:"\f679"}.fa-opencart:before{content:"\f23d"}.fa-openid:before{content:"\f19b"}.fa-opera:before{content:"\f26a"}.fa-optin-monster:before{content:"\f23c"}.fa-orcid:before{content:"\f8d2"}.fa-osi:before{content:"\f41a"}.fa-otter:before{content:"\f700"}.fa-outdent:before{content:"\f03b"}.fa-page4:before{content:"\f3d7"}.fa-pagelines:before{content:"\f18c"}.fa-pager:before{content:"\f815"}.fa-paint-brush:before{content:"\f1fc"}.fa-paint-roller:before{content:"\f5aa"}.fa-palette:before{content:"\f53f"}.fa-palfed:before{content:"\f3d8"}.fa-pallet:before{content:"\f482"}.fa-paper-plane:before{content:"\f1d8"}.fa-paperclip:before{content:"\f0c6"}.fa-parachute-box:before{content:"\f4cd"}.fa-paragraph:before{content:"\f1dd"}.fa-parking:before{content:"\f540"}.fa-passport:before{content:"\f5ab"}.fa-pastafarianism:before{content:"\f67b"}.fa-paste:before{content:"\f0ea"}.fa-patreon:before{content:"\f3d9"}.fa-pause:before{content:"\f04c"}.fa-pause-circle:before{content:"\f28b"}.fa-paw:before{content:"\f1b0"}.fa-paypal:before{content:"\f1ed"}.fa-peace:before{content:"\f67c"}.fa-pen:before{content:"\f304"}.fa-pen-alt:before{content:"\f305"}.fa-pen-fancy:before{content:"\f5ac"}.fa-pen-nib:before{content:"\f5ad"}.fa-pen-square:before{content:"\f14b"}.fa-pencil-alt:before{content:"\f303"}.fa-pencil-ruler:before{content:"\f5ae"}.fa-penny-arcade:before{content:"\f704"}.fa-people-arrows:before{content:"\e068"}.fa-people-carry:before{content:"\f4ce"}.fa-pepper-hot:before{content:"\f816"}.fa-perbyte:before{content:"\e083"}.fa-percent:before{content:"\f295"}.fa-percentage:before{content:"\f541"}.fa-periscope:before{content:"\f3da"}.fa-person-booth:before{content:"\f756"}.fa-phabricator:before{content:"\f3db"}.fa-phoenix-framework:before{content:"\f3dc"}.fa-phoenix-squadron:before{content:"\f511"}.fa-phone:before{content:"\f095"}.fa-phone-alt:before{content:"\f879"}.fa-phone-slash:before{content:"\f3dd"}.fa-phone-square:before{content:"\f098"}.fa-phone-square-alt:before{content:"\f87b"}.fa-phone-volume:before{content:"\f2a0"}.fa-photo-video:before{content:"\f87c"}.fa-php:before{content:"\f457"}.fa-pied-piper:before{content:"\f2ae"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-pied-piper-hat:before{content:"\f4e5"}.fa-pied-piper-pp:before{content:"\f1a7"}.fa-pied-piper-square:before{content:"\e01e"}.fa-piggy-bank:before{content:"\f4d3"}.fa-pills:before{content:"\f484"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-p:before{content:"\f231"}.fa-pinterest-square:before{content:"\f0d3"}.fa-pizza-slice:before{content:"\f818"}.fa-place-of-worship:before{content:"\f67f"}.fa-plane:before{content:"\f072"}.fa-plane-arrival:before{content:"\f5af"}.fa-plane-departure:before{content:"\f5b0"}.fa-plane-slash:before{content:"\e069"}.fa-play:before{content:"\f04b"}.fa-play-circle:before{content:"\f144"}.fa-playstation:before{content:"\f3df"}.fa-plug:before{content:"\f1e6"}.fa-plus:before{content:"\f067"}.fa-plus-circle:before{content:"\f055"}.fa-plus-square:before{content:"\f0fe"}.fa-podcast:before{content:"\f2ce"}.fa-poll:before{content:"\f681"}.fa-poll-h:before{content:"\f682"}.fa-poo:before{content:"\f2fe"}.fa-poo-storm:before{content:"\f75a"}.fa-poop:before{content:"\f619"}.fa-portrait:before{content:"\f3e0"}.fa-pound-sign:before{content:"\f154"}.fa-power-off:before{content:"\f011"}.fa-pray:before{content:"\f683"}.fa-praying-hands:before{content:"\f684"}.fa-prescription:before{content:"\f5b1"}.fa-prescription-bottle:before{content:"\f485"}.fa-prescription-bottle-alt:before{content:"\f486"}.fa-print:before{content:"\f02f"}.fa-procedures:before{content:"\f487"}.fa-product-hunt:before{content:"\f288"}.fa-project-diagram:before{content:"\f542"}.fa-pump-medical:before{content:"\e06a"}.fa-pump-soap:before{content:"\e06b"}.fa-pushed:before{content:"\f3e1"}.fa-puzzle-piece:before{content:"\f12e"}.fa-python:before{content:"\f3e2"}.fa-qq:before{content:"\f1d6"}.fa-qrcode:before{content:"\f029"}.fa-question:before{content:"\f128"}.fa-question-circle:before{content:"\f059"}.fa-quidditch:before{content:"\f458"}.fa-quinscape:before{content:"\f459"}.fa-quora:before{content:"\f2c4"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-quran:before{content:"\f687"}.fa-r-project:before{content:"\f4f7"}.fa-radiation:before{content:"\f7b9"}.fa-radiation-alt:before{content:"\f7ba"}.fa-rainbow:before{content:"\f75b"}.fa-random:before{content:"\f074"}.fa-raspberry-pi:before{content:"\f7bb"}.fa-ravelry:before{content:"\f2d9"}.fa-react:before{content:"\f41b"}.fa-reacteurope:before{content:"\f75d"}.fa-readme:before{content:"\f4d5"}.fa-rebel:before{content:"\f1d0"}.fa-receipt:before{content:"\f543"}.fa-record-vinyl:before{content:"\f8d9"}.fa-recycle:before{content:"\f1b8"}.fa-red-river:before{content:"\f3e3"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-alien:before{content:"\f281"}.fa-reddit-square:before{content:"\f1a2"}.fa-redhat:before{content:"\f7bc"}.fa-redo:before{content:"\f01e"}.fa-redo-alt:before{content:"\f2f9"}.fa-registered:before{content:"\f25d"}.fa-remove-format:before{content:"\f87d"}.fa-renren:before{content:"\f18b"}.fa-reply:before{content:"\f3e5"}.fa-reply-all:before{content:"\f122"}.fa-replyd:before{content:"\f3e6"}.fa-republican:before{content:"\f75e"}.fa-researchgate:before{content:"\f4f8"}.fa-resolving:before{content:"\f3e7"}.fa-restroom:before{content:"\f7bd"}.fa-retweet:before{content:"\f079"}.fa-rev:before{content:"\f5b2"}.fa-ribbon:before{content:"\f4d6"}.fa-ring:before{content:"\f70b"}.fa-road:before{content:"\f018"}.fa-robot:before{content:"\f544"}.fa-rocket:before{content:"\f135"}.fa-rocketchat:before{content:"\f3e8"}.fa-rockrms:before{content:"\f3e9"}.fa-route:before{content:"\f4d7"}.fa-rss:before{content:"\f09e"}.fa-rss-square:before{content:"\f143"}.fa-ruble-sign:before{content:"\f158"}.fa-ruler:before{content:"\f545"}.fa-ruler-combined:before{content:"\f546"}.fa-ruler-horizontal:before{content:"\f547"}.fa-ruler-vertical:before{content:"\f548"}.fa-running:before{content:"\f70c"}.fa-rupee-sign:before{content:"\f156"}.fa-rust:before{content:"\e07a"}.fa-sad-cry:before{content:"\f5b3"}.fa-sad-tear:before{content:"\f5b4"}.fa-safari:before{content:"\f267"}.fa-salesforce:before{content:"\f83b"}.fa-sass:before{content:"\f41e"}.fa-satellite:before{content:"\f7bf"}.fa-satellite-dish:before{content:"\f7c0"}.fa-save:before{content:"\f0c7"}.fa-schlix:before{content:"\f3ea"}.fa-school:before{content:"\f549"}.fa-screwdriver:before{content:"\f54a"}.fa-scribd:before{content:"\f28a"}.fa-scroll:before{content:"\f70e"}.fa-sd-card:before{content:"\f7c2"}.fa-search:before{content:"\f002"}.fa-search-dollar:before{content:"\f688"}.fa-search-location:before{content:"\f689"}.fa-search-minus:before{content:"\f010"}.fa-search-plus:before{content:"\f00e"}.fa-searchengin:before{content:"\f3eb"}.fa-seedling:before{content:"\f4d8"}.fa-sellcast:before{content:"\f2da"}.fa-sellsy:before{content:"\f213"}.fa-server:before{content:"\f233"}.fa-servicestack:before{content:"\f3ec"}.fa-shapes:before{content:"\f61f"}.fa-share:before{content:"\f064"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-share-square:before{content:"\f14d"}.fa-shekel-sign:before{content:"\f20b"}.fa-shield-alt:before{content:"\f3ed"}.fa-shield-virus:before{content:"\e06c"}.fa-ship:before{content:"\f21a"}.fa-shipping-fast:before{content:"\f48b"}.fa-shirtsinbulk:before{content:"\f214"}.fa-shoe-prints:before{content:"\f54b"}.fa-shopify:before{content:"\e057"}.fa-shopping-bag:before{content:"\f290"}.fa-shopping-basket:before{content:"\f291"}.fa-shopping-cart:before{content:"\f07a"}.fa-shopware:before{content:"\f5b5"}.fa-shower:before{content:"\f2cc"}.fa-shuttle-van:before{content:"\f5b6"}.fa-sign:before{content:"\f4d9"}.fa-sign-in-alt:before{content:"\f2f6"}.fa-sign-language:before{content:"\f2a7"}.fa-sign-out-alt:before{content:"\f2f5"}.fa-signal:before{content:"\f012"}.fa-signature:before{content:"\f5b7"}.fa-sim-card:before{content:"\f7c4"}.fa-simplybuilt:before{content:"\f215"}.fa-sink:before{content:"\e06d"}.fa-sistrix:before{content:"\f3ee"}.fa-sitemap:before{content:"\f0e8"}.fa-sith:before{content:"\f512"}.fa-skating:before{content:"\f7c5"}.fa-sketch:before{content:"\f7c6"}.fa-skiing:before{content:"\f7c9"}.fa-skiing-nordic:before{content:"\f7ca"}.fa-skull:before{content:"\f54c"}.fa-skull-crossbones:before{content:"\f714"}.fa-skyatlas:before{content:"\f216"}.fa-skype:before{content:"\f17e"}.fa-slack:before{content:"\f198"}.fa-slack-hash:before{content:"\f3ef"}.fa-slash:before{content:"\f715"}.fa-sleigh:before{content:"\f7cc"}.fa-sliders-h:before{content:"\f1de"}.fa-slideshare:before{content:"\f1e7"}.fa-smile:before{content:"\f118"}.fa-smile-beam:before{content:"\f5b8"}.fa-smile-wink:before{content:"\f4da"}.fa-smog:before{content:"\f75f"}.fa-smoking:before{content:"\f48d"}.fa-smoking-ban:before{content:"\f54d"}.fa-sms:before{content:"\f7cd"}.fa-snapchat:before{content:"\f2ab"}.fa-snapchat-ghost:before{content:"\f2ac"}.fa-snapchat-square:before{content:"\f2ad"}.fa-snowboarding:before{content:"\f7ce"}.fa-snowflake:before{content:"\f2dc"}.fa-snowman:before{content:"\f7d0"}.fa-snowplow:before{content:"\f7d2"}.fa-soap:before{content:"\e06e"}.fa-socks:before{content:"\f696"}.fa-solar-panel:before{content:"\f5ba"}.fa-sort:before{content:"\f0dc"}.fa-sort-alpha-down:before{content:"\f15d"}.fa-sort-alpha-down-alt:before{content:"\f881"}.fa-sort-alpha-up:before{content:"\f15e"}.fa-sort-alpha-up-alt:before{content:"\f882"}.fa-sort-amount-down:before{content:"\f160"}.fa-sort-amount-down-alt:before{content:"\f884"}.fa-sort-amount-up:before{content:"\f161"}.fa-sort-amount-up-alt:before{content:"\f885"}.fa-sort-down:before{content:"\f0dd"}.fa-sort-numeric-down:before{content:"\f162"}.fa-sort-numeric-down-alt:before{content:"\f886"}.fa-sort-numeric-up:before{content:"\f163"}.fa-sort-numeric-up-alt:before{content:"\f887"}.fa-sort-up:before{content:"\f0de"}.fa-soundcloud:before{content:"\f1be"}.fa-sourcetree:before{content:"\f7d3"}.fa-spa:before{content:"\f5bb"}.fa-space-shuttle:before{content:"\f197"}.fa-speakap:before{content:"\f3f3"}.fa-speaker-deck:before{content:"\f83c"}.fa-spell-check:before{content:"\f891"}.fa-spider:before{content:"\f717"}.fa-spinner:before{content:"\f110"}.fa-splotch:before{content:"\f5bc"}.fa-spotify:before{content:"\f1bc"}.fa-spray-can:before{content:"\f5bd"}.fa-square:before{content:"\f0c8"}.fa-square-full:before{content:"\f45c"}.fa-square-root-alt:before{content:"\f698"}.fa-squarespace:before{content:"\f5be"}.fa-stack-exchange:before{content:"\f18d"}.fa-stack-overflow:before{content:"\f16c"}.fa-stackpath:before{content:"\f842"}.fa-stamp:before{content:"\f5bf"}.fa-star:before{content:"\f005"}.fa-star-and-crescent:before{content:"\f699"}.fa-star-half:before{content:"\f089"}.fa-star-half-alt:before{content:"\f5c0"}.fa-star-of-david:before{content:"\f69a"}.fa-star-of-life:before{content:"\f621"}.fa-staylinked:before{content:"\f3f5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-steam-symbol:before{content:"\f3f6"}.fa-step-backward:before{content:"\f048"}.fa-step-forward:before{content:"\f051"}.fa-stethoscope:before{content:"\f0f1"}.fa-sticker-mule:before{content:"\f3f7"}.fa-sticky-note:before{content:"\f249"}.fa-stop:before{content:"\f04d"}.fa-stop-circle:before{content:"\f28d"}.fa-stopwatch:before{content:"\f2f2"}.fa-stopwatch-20:before{content:"\e06f"}.fa-store:before{content:"\f54e"}.fa-store-alt:before{content:"\f54f"}.fa-store-alt-slash:before{content:"\e070"}.fa-store-slash:before{content:"\e071"}.fa-strava:before{content:"\f428"}.fa-stream:before{content:"\f550"}.fa-street-view:before{content:"\f21d"}.fa-strikethrough:before{content:"\f0cc"}.fa-stripe:before{content:"\f429"}.fa-stripe-s:before{content:"\f42a"}.fa-stroopwafel:before{content:"\f551"}.fa-studiovinari:before{content:"\f3f8"}.fa-stumbleupon:before{content:"\f1a4"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-subscript:before{content:"\f12c"}.fa-subway:before{content:"\f239"}.fa-suitcase:before{content:"\f0f2"}.fa-suitcase-rolling:before{content:"\f5c1"}.fa-sun:before{content:"\f185"}.fa-superpowers:before{content:"\f2dd"}.fa-superscript:before{content:"\f12b"}.fa-supple:before{content:"\f3f9"}.fa-surprise:before{content:"\f5c2"}.fa-suse:before{content:"\f7d6"}.fa-swatchbook:before{content:"\f5c3"}.fa-swift:before{content:"\f8e1"}.fa-swimmer:before{content:"\f5c4"}.fa-swimming-pool:before{content:"\f5c5"}.fa-symfony:before{content:"\f83d"}.fa-synagogue:before{content:"\f69b"}.fa-sync:before{content:"\f021"}.fa-sync-alt:before{content:"\f2f1"}.fa-syringe:before{content:"\f48e"}.fa-table:before{content:"\f0ce"}.fa-table-tennis:before{content:"\f45d"}.fa-tablet:before{content:"\f10a"}.fa-tablet-alt:before{content:"\f3fa"}.fa-tablets:before{content:"\f490"}.fa-tachometer-alt:before{content:"\f3fd"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-tape:before{content:"\f4db"}.fa-tasks:before{content:"\f0ae"}.fa-taxi:before{content:"\f1ba"}.fa-teamspeak:before{content:"\f4f9"}.fa-teeth:before{content:"\f62e"}.fa-teeth-open:before{content:"\f62f"}.fa-telegram:before{content:"\f2c6"}.fa-telegram-plane:before{content:"\f3fe"}.fa-temperature-high:before{content:"\f769"}.fa-temperature-low:before{content:"\f76b"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-tenge:before{content:"\f7d7"}.fa-terminal:before{content:"\f120"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-th:before{content:"\f00a"}.fa-th-large:before{content:"\f009"}.fa-th-list:before{content:"\f00b"}.fa-the-red-yeti:before{content:"\f69d"}.fa-theater-masks:before{content:"\f630"}.fa-themeco:before{content:"\f5c6"}.fa-themeisle:before{content:"\f2b2"}.fa-thermometer:before{content:"\f491"}.fa-thermometer-empty:before{content:"\f2cb"}.fa-thermometer-full:before{content:"\f2c7"}.fa-thermometer-half:before{content:"\f2c9"}.fa-thermometer-quarter:before{content:"\f2ca"}.fa-thermometer-three-quarters:before{content:"\f2c8"}.fa-think-peaks:before{content:"\f731"}.fa-thumbs-down:before{content:"\f165"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbtack:before{content:"\f08d"}.fa-ticket-alt:before{content:"\f3ff"}.fa-tiktok:before{content:"\e07b"}.fa-times:before{content:"\f00d"}.fa-times-circle:before{content:"\f057"}.fa-tint:before{content:"\f043"}.fa-tint-slash:before{content:"\f5c7"}.fa-tired:before{content:"\f5c8"}.fa-toggle-off:before{content:"\f204"}.fa-toggle-on:before{content:"\f205"}.fa-toilet:before{content:"\f7d8"}.fa-toilet-paper:before{content:"\f71e"}.fa-toilet-paper-slash:before{content:"\e072"}.fa-toolbox:before{content:"\f552"}.fa-tools:before{content:"\f7d9"}.fa-tooth:before{content:"\f5c9"}.fa-torah:before{content:"\f6a0"}.fa-torii-gate:before{content:"\f6a1"}.fa-tractor:before{content:"\f722"}.fa-trade-federation:before{content:"\f513"}.fa-trademark:before{content:"\f25c"}.fa-traffic-light:before{content:"\f637"}.fa-trailer:before{content:"\e041"}.fa-train:before{content:"\f238"}.fa-tram:before{content:"\f7da"}.fa-transgender:before{content:"\f224"}.fa-transgender-alt:before{content:"\f225"}.fa-trash:before{content:"\f1f8"}.fa-trash-alt:before{content:"\f2ed"}.fa-trash-restore:before{content:"\f829"}.fa-trash-restore-alt:before{content:"\f82a"}.fa-tree:before{content:"\f1bb"}.fa-trello:before{content:"\f181"}.fa-tripadvisor:before{content:"\f262"}.fa-trophy:before{content:"\f091"}.fa-truck:before{content:"\f0d1"}.fa-truck-loading:before{content:"\f4de"}.fa-truck-monster:before{content:"\f63b"}.fa-truck-moving:before{content:"\f4df"}.fa-truck-pickup:before{content:"\f63c"}.fa-tshirt:before{content:"\f553"}.fa-tty:before{content:"\f1e4"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-tv:before{content:"\f26c"}.fa-twitch:before{content:"\f1e8"}.fa-twitter:before{content:"\f099"}.fa-twitter-square:before{content:"\f081"}.fa-typo3:before{content:"\f42b"}.fa-uber:before{content:"\f402"}.fa-ubuntu:before{content:"\f7df"}.fa-uikit:before{content:"\f403"}.fa-umbraco:before{content:"\f8e8"}.fa-umbrella:before{content:"\f0e9"}.fa-umbrella-beach:before{content:"\f5ca"}.fa-uncharted:before{content:"\e084"}.fa-underline:before{content:"\f0cd"}.fa-undo:before{content:"\f0e2"}.fa-undo-alt:before{content:"\f2ea"}.fa-uniregistry:before{content:"\f404"}.fa-unity:before{content:"\e049"}.fa-universal-access:before{content:"\f29a"}.fa-university:before{content:"\f19c"}.fa-unlink:before{content:"\f127"}.fa-unlock:before{content:"\f09c"}.fa-unlock-alt:before{content:"\f13e"}.fa-unsplash:before{content:"\e07c"}.fa-untappd:before{content:"\f405"}.fa-upload:before{content:"\f093"}.fa-ups:before{content:"\f7e0"}.fa-usb:before{content:"\f287"}.fa-user:before{content:"\f007"}.fa-user-alt:before{content:"\f406"}.fa-user-alt-slash:before{content:"\f4fa"}.fa-user-astronaut:before{content:"\f4fb"}.fa-user-check:before{content:"\f4fc"}.fa-user-circle:before{content:"\f2bd"}.fa-user-clock:before{content:"\f4fd"}.fa-user-cog:before{content:"\f4fe"}.fa-user-edit:before{content:"\f4ff"}.fa-user-friends:before{content:"\f500"}.fa-user-graduate:before{content:"\f501"}.fa-user-injured:before{content:"\f728"}.fa-user-lock:before{content:"\f502"}.fa-user-md:before{content:"\f0f0"}.fa-user-minus:before{content:"\f503"}.fa-user-ninja:before{content:"\f504"}.fa-user-nurse:before{content:"\f82f"}.fa-user-plus:before{content:"\f234"}.fa-user-secret:before{content:"\f21b"}.fa-user-shield:before{content:"\f505"}.fa-user-slash:before{content:"\f506"}.fa-user-tag:before{content:"\f507"}.fa-user-tie:before{content:"\f508"}.fa-user-times:before{content:"\f235"}.fa-users:before{content:"\f0c0"}.fa-users-cog:before{content:"\f509"}.fa-users-slash:before{content:"\e073"}.fa-usps:before{content:"\f7e1"}.fa-ussunnah:before{content:"\f407"}.fa-utensil-spoon:before{content:"\f2e5"}.fa-utensils:before{content:"\f2e7"}.fa-vaadin:before{content:"\f408"}.fa-vector-square:before{content:"\f5cb"}.fa-venus:before{content:"\f221"}.fa-venus-double:before{content:"\f226"}.fa-venus-mars:before{content:"\f228"}.fa-vest:before{content:"\e085"}.fa-vest-patches:before{content:"\e086"}.fa-viacoin:before{content:"\f237"}.fa-viadeo:before{content:"\f2a9"}.fa-viadeo-square:before{content:"\f2aa"}.fa-vial:before{content:"\f492"}.fa-vials:before{content:"\f493"}.fa-viber:before{content:"\f409"}.fa-video:before{content:"\f03d"}.fa-video-slash:before{content:"\f4e2"}.fa-vihara:before{content:"\f6a7"}.fa-vimeo:before{content:"\f40a"}.fa-vimeo-square:before{content:"\f194"}.fa-vimeo-v:before{content:"\f27d"}.fa-vine:before{content:"\f1ca"}.fa-virus:before{content:"\e074"}.fa-virus-slash:before{content:"\e075"}.fa-viruses:before{content:"\e076"}.fa-vk:before{content:"\f189"}.fa-vnv:before{content:"\f40b"}.fa-voicemail:before{content:"\f897"}.fa-volleyball-ball:before{content:"\f45f"}.fa-volume-down:before{content:"\f027"}.fa-volume-mute:before{content:"\f6a9"}.fa-volume-off:before{content:"\f026"}.fa-volume-up:before{content:"\f028"}.fa-vote-yea:before{content:"\f772"}.fa-vr-cardboard:before{content:"\f729"}.fa-vuejs:before{content:"\f41f"}.fa-walking:before{content:"\f554"}.fa-wallet:before{content:"\f555"}.fa-warehouse:before{content:"\f494"}.fa-watchman-monitoring:before{content:"\e087"}.fa-water:before{content:"\f773"}.fa-wave-square:before{content:"\f83e"}.fa-waze:before{content:"\f83f"}.fa-weebly:before{content:"\f5cc"}.fa-weibo:before{content:"\f18a"}.fa-weight:before{content:"\f496"}.fa-weight-hanging:before{content:"\f5cd"}.fa-weixin:before{content:"\f1d7"}.fa-whatsapp:before{content:"\f232"}.fa-whatsapp-square:before{content:"\f40c"}.fa-wheelchair:before{content:"\f193"}.fa-whmcs:before{content:"\f40d"}.fa-wifi:before{content:"\f1eb"}.fa-wikipedia-w:before{content:"\f266"}.fa-wind:before{content:"\f72e"}.fa-window-close:before{content:"\f410"}.fa-window-maximize:before{content:"\f2d0"}.fa-window-minimize:before{content:"\f2d1"}.fa-window-restore:before{content:"\f2d2"}.fa-windows:before{content:"\f17a"}.fa-wine-bottle:before{content:"\f72f"}.fa-wine-glass:before{content:"\f4e3"}.fa-wine-glass-alt:before{content:"\f5ce"}.fa-wix:before{content:"\f5cf"}.fa-wizards-of-the-coast:before{content:"\f730"}.fa-wodu:before{content:"\e088"}.fa-wolf-pack-battalion:before{content:"\f514"}.fa-won-sign:before{content:"\f159"}.fa-wordpress:before{content:"\f19a"}.fa-wordpress-simple:before{content:"\f411"}.fa-wpbeginner:before{content:"\f297"}.fa-wpexplorer:before{content:"\f2de"}.fa-wpforms:before{content:"\f298"}.fa-wpressr:before{content:"\f3e4"}.fa-wrench:before{content:"\f0ad"}.fa-x-ray:before{content:"\f497"}.fa-xbox:before{content:"\f412"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-y-combinator:before{content:"\f23b"}.fa-yahoo:before{content:"\f19e"}.fa-yammer:before{content:"\f840"}.fa-yandex:before{content:"\f413"}.fa-yandex-international:before{content:"\f414"}.fa-yarn:before{content:"\f7e3"}.fa-yelp:before{content:"\f1e9"}.fa-yen-sign:before{content:"\f157"}.fa-yin-yang:before{content:"\f6ad"}.fa-yoast:before{content:"\f2b1"}.fa-youtube:before{content:"\f167"}.fa-youtube-square:before{content:"\f431"}.fa-zhihu:before{content:"\f63f"}.sr-only{border:0;clip:rect(0,0,0,0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.sr-only-focusable:active,.sr-only-focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}@font-face{font-family:"Font Awesome 5 Brands";font-style:normal;font-weight:400;font-display:block;src:url(../webfonts/fa-brands-400.eot);src:url(../webfonts/fa-brands-400.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-brands-400.woff2) format("woff2"),url(../webfonts/fa-brands-400.woff) format("woff"),url(../webfonts/fa-brands-400.ttf) format("truetype"),url(../webfonts/fa-brands-400.svg#fontawesome) format("svg")}.fab{font-family:"Font Awesome 5 Brands"}@font-face{font-family:"Font Awesome 5 Free";font-style:normal;font-weight:400;font-display:block;src:url(../webfonts/fa-regular-400.eot);src:url(../webfonts/fa-regular-400.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.woff) format("woff"),url(../webfonts/fa-regular-400.ttf) format("truetype"),url(../webfonts/fa-regular-400.svg#fontawesome) format("svg")}.fab,.far{font-weight:400}@font-face{font-family:"Font Awesome 5 Free";font-style:normal;font-weight:900;font-display:block;src:url(../webfonts/fa-solid-900.eot);src:url(../webfonts/fa-solid-900.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.woff) format("woff"),url(../webfonts/fa-solid-900.ttf) format("truetype"),url(../webfonts/fa-solid-900.svg#fontawesome) format("svg")}.fa,.far,.fas{font-family:"Font Awesome 5 Free"}.fa,.fas{font-weight:900} \ No newline at end of file diff --git a/spaces/MCkernick/Image_Restoration_Colorization/Global/data/__init__.py b/spaces/MCkernick/Image_Restoration_Colorization/Global/data/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/MathysL/AutoGPT4/autogpt/json_utils/__init__.py b/spaces/MathysL/AutoGPT4/autogpt/json_utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/MercurialAi/OncoMedleyMini/OncoMedley/src/clinical_only.py b/spaces/MercurialAi/OncoMedleyMini/OncoMedley/src/clinical_only.py deleted file mode 100644 index 481d1221b17282bda4c64fe262477985548880e5..0000000000000000000000000000000000000000 --- a/spaces/MercurialAi/OncoMedleyMini/OncoMedley/src/clinical_only.py +++ /dev/null @@ -1,15 +0,0 @@ -import torch.nn.functional as F -import torch - -class clinical_only(torch.nn.Module): - def __init__(self): - super().__init__() - self.lin1=torch.nn.Linear(10, 7) - self.lin2 = torch.nn.Linear(7, 4) - self.lin3 = torch.nn.Linear(4, 1) - - def forward(self, x): - x = F.relu(self.lin1(x)) - x = F.relu(self.lin2(x)) - x = self.lin3(x) - return x \ No newline at end of file diff --git a/spaces/MirageML/sjc/sd1/ldm/models/diffusion/classifier.py b/spaces/MirageML/sjc/sd1/ldm/models/diffusion/classifier.py deleted file mode 100644 index 67e98b9d8ffb96a150b517497ace0a242d7163ef..0000000000000000000000000000000000000000 --- a/spaces/MirageML/sjc/sd1/ldm/models/diffusion/classifier.py +++ /dev/null @@ -1,267 +0,0 @@ -import os -import torch -import pytorch_lightning as pl -from omegaconf import OmegaConf -from torch.nn import functional as F -from torch.optim import AdamW -from torch.optim.lr_scheduler import LambdaLR -from copy import deepcopy -from einops import rearrange -from glob import glob -from natsort import natsorted - -from ldm.modules.diffusionmodules.openaimodel import EncoderUNetModel, UNetModel -from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config - -__models__ = { - 'class_label': EncoderUNetModel, - 'segmentation': UNetModel -} - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -class NoisyLatentImageClassifier(pl.LightningModule): - - def __init__(self, - diffusion_path, - num_classes, - ckpt_path=None, - pool='attention', - label_key=None, - diffusion_ckpt_path=None, - scheduler_config=None, - weight_decay=1.e-2, - log_steps=10, - monitor='val/loss', - *args, - **kwargs): - super().__init__(*args, **kwargs) - self.num_classes = num_classes - # get latest config of diffusion model - diffusion_config = natsorted(glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')))[-1] - self.diffusion_config = OmegaConf.load(diffusion_config).model - self.diffusion_config.params.ckpt_path = diffusion_ckpt_path - self.load_diffusion() - - self.monitor = monitor - self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1 - self.log_time_interval = self.diffusion_model.num_timesteps // log_steps - self.log_steps = log_steps - - self.label_key = label_key if not hasattr(self.diffusion_model, 'cond_stage_key') \ - else self.diffusion_model.cond_stage_key - - assert self.label_key is not None, 'label_key neither in diffusion model nor in model.params' - - if self.label_key not in __models__: - raise NotImplementedError() - - self.load_classifier(ckpt_path, pool) - - self.scheduler_config = scheduler_config - self.use_scheduler = self.scheduler_config is not None - self.weight_decay = weight_decay - - def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): - sd = torch.load(path, map_location="cpu") - if "state_dict" in list(sd.keys()): - sd = sd["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( - sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys: {missing}") - if len(unexpected) > 0: - print(f"Unexpected Keys: {unexpected}") - - def load_diffusion(self): - model = instantiate_from_config(self.diffusion_config) - self.diffusion_model = model.eval() - self.diffusion_model.train = disabled_train - for param in self.diffusion_model.parameters(): - param.requires_grad = False - - def load_classifier(self, ckpt_path, pool): - model_config = deepcopy(self.diffusion_config.params.unet_config.params) - model_config.in_channels = self.diffusion_config.params.unet_config.params.out_channels - model_config.out_channels = self.num_classes - if self.label_key == 'class_label': - model_config.pool = pool - - self.model = __models__[self.label_key](**model_config) - if ckpt_path is not None: - print('#####################################################################') - print(f'load from ckpt "{ckpt_path}"') - print('#####################################################################') - self.init_from_ckpt(ckpt_path) - - @torch.no_grad() - def get_x_noisy(self, x, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x)) - continuous_sqrt_alpha_cumprod = None - if self.diffusion_model.use_continuous_noise: - continuous_sqrt_alpha_cumprod = self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1) - # todo: make sure t+1 is correct here - - return self.diffusion_model.q_sample(x_start=x, t=t, noise=noise, - continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod) - - def forward(self, x_noisy, t, *args, **kwargs): - return self.model(x_noisy, t) - - @torch.no_grad() - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = rearrange(x, 'b h w c -> b c h w') - x = x.to(memory_format=torch.contiguous_format).float() - return x - - @torch.no_grad() - def get_conditioning(self, batch, k=None): - if k is None: - k = self.label_key - assert k is not None, 'Needs to provide label key' - - targets = batch[k].to(self.device) - - if self.label_key == 'segmentation': - targets = rearrange(targets, 'b h w c -> b c h w') - for down in range(self.numd): - h, w = targets.shape[-2:] - targets = F.interpolate(targets, size=(h // 2, w // 2), mode='nearest') - - # targets = rearrange(targets,'b c h w -> b h w c') - - return targets - - def compute_top_k(self, logits, labels, k, reduction="mean"): - _, top_ks = torch.topk(logits, k, dim=1) - if reduction == "mean": - return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item() - elif reduction == "none": - return (top_ks == labels[:, None]).float().sum(dim=-1) - - def on_train_epoch_start(self): - # save some memory - self.diffusion_model.model.to('cpu') - - @torch.no_grad() - def write_logs(self, loss, logits, targets): - log_prefix = 'train' if self.training else 'val' - log = {} - log[f"{log_prefix}/loss"] = loss.mean() - log[f"{log_prefix}/acc@1"] = self.compute_top_k( - logits, targets, k=1, reduction="mean" - ) - log[f"{log_prefix}/acc@5"] = self.compute_top_k( - logits, targets, k=5, reduction="mean" - ) - - self.log_dict(log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True) - self.log('loss', log[f"{log_prefix}/loss"], prog_bar=True, logger=False) - self.log('global_step', self.global_step, logger=False, on_epoch=False, prog_bar=True) - lr = self.optimizers().param_groups[0]['lr'] - self.log('lr_abs', lr, on_step=True, logger=True, on_epoch=False, prog_bar=True) - - def shared_step(self, batch, t=None): - x, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key) - targets = self.get_conditioning(batch) - if targets.dim() == 4: - targets = targets.argmax(dim=1) - if t is None: - t = torch.randint(0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device).long() - else: - t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long() - x_noisy = self.get_x_noisy(x, t) - logits = self(x_noisy, t) - - loss = F.cross_entropy(logits, targets, reduction='none') - - self.write_logs(loss.detach(), logits.detach(), targets.detach()) - - loss = loss.mean() - return loss, logits, x_noisy, targets - - def training_step(self, batch, batch_idx): - loss, *_ = self.shared_step(batch) - return loss - - def reset_noise_accs(self): - self.noisy_acc = {t: {'acc@1': [], 'acc@5': []} for t in - range(0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t)} - - def on_validation_start(self): - self.reset_noise_accs() - - @torch.no_grad() - def validation_step(self, batch, batch_idx): - loss, *_ = self.shared_step(batch) - - for t in self.noisy_acc: - _, logits, _, targets = self.shared_step(batch, t) - self.noisy_acc[t]['acc@1'].append(self.compute_top_k(logits, targets, k=1, reduction='mean')) - self.noisy_acc[t]['acc@5'].append(self.compute_top_k(logits, targets, k=5, reduction='mean')) - - return loss - - def configure_optimizers(self): - optimizer = AdamW(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) - - if self.use_scheduler: - scheduler = instantiate_from_config(self.scheduler_config) - - print("Setting up LambdaLR scheduler...") - scheduler = [ - { - 'scheduler': LambdaLR(optimizer, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }] - return [optimizer], scheduler - - return optimizer - - @torch.no_grad() - def log_images(self, batch, N=8, *args, **kwargs): - log = dict() - x = self.get_input(batch, self.diffusion_model.first_stage_key) - log['inputs'] = x - - y = self.get_conditioning(batch) - - if self.label_key == 'class_label': - y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) - log['labels'] = y - - if ismap(y): - log['labels'] = self.diffusion_model.to_rgb(y) - - for step in range(self.log_steps): - current_time = step * self.log_time_interval - - _, logits, x_noisy, _ = self.shared_step(batch, t=current_time) - - log[f'inputs@t{current_time}'] = x_noisy - - pred = F.one_hot(logits.argmax(dim=1), num_classes=self.num_classes) - pred = rearrange(pred, 'b h w c -> b c h w') - - log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb(pred) - - for key in log: - log[key] = log[key][:N] - - return log diff --git a/spaces/NCTCMumbai/NCTC/models/research/adversarial_logit_pairing/README.md b/spaces/NCTCMumbai/NCTC/models/research/adversarial_logit_pairing/README.md deleted file mode 100644 index d3f576836c4e0fb28eee9882906b18d88a90c564..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/research/adversarial_logit_pairing/README.md +++ /dev/null @@ -1,281 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Adversarial logit pairing - -This directory contains implementation of -[Adversarial logit pairing](https://arxiv.org/abs/1803.06373) paper as well as -few models pre-trained on ImageNet and Tiny ImageNet. - -Please contact [Alexey Kurakin](https://github.com/AlexeyKurakin) regarding -this code. - -## Pre-requesites - -Code dependencies: - -* TensorFlow 1.8 and Python 2.7 (other versions may work, but were not tested) -* [Abseil Python](https://github.com/abseil/abseil-py). -* Script which converts Tiny Imagenet dataset into TFRecord format also - depends on [Pandas](https://pandas.pydata.org/). - -## Datasets - -To use this code you need to download datasets. You only need to download -those datasets which you're going to use. Following list of datasets is -supported: - -* [ImageNet](http://www.image-net.org/). Follow - [Preparing the datasets](https://github.com/tensorflow/models/tree/master/research/slim#Data) - instructions in TF-Slim documentation to download and convert ImageNet dataset - to TFRecord format. - -* [Tiny ImageNet](https://tiny-imagenet.herokuapp.com/). - To obtain Tiny ImageNet dataset do following: - - ``` - # Download zip archive with TinyImagenet - curl -O http://cs231n.stanford.edu/tiny-imagenet-200.zip - - # Extract archive - unzip tiny-imagenet-200.zip - - # Convert dataset to TFRecord format - mkdir tiny-imagenet-tfrecord - python tiny_imagenet_converter/converter.py \ - --input_dir=tiny-imagenet-200 \ - --output_dir=tiny-imagenet-tfrecord - ``` - -## Running the code - -NOTE: Provided code supports distributed training on multiple machines, -and all provided checkpoints were trained in a distributed way. However it is -beyond the scope of this document to describe how to do distributed training. -Readed should refer to -[other material](https://www.tensorflow.org/deploy/distributed) to learn -about it. - -### Training - -Following command runs training: - -``` -# Following arguments has to be specified for training: -# - MAX_NUMBER_OF_TRAINING_STEPS - maximum number of training steps, -# omit this flag or set it to -1 to have unlimited number of training steps. -# - MODEL_NAME - name of the model, now only "resnet_v2_50" is supported. -# - MOVING_AVG_DECAY - decay rate for exponential moving average of the -# trainable variables. Training with exponential moving average usually -# leads to better accuracy. Default of 0.9999. -1 disable exponential moving -# average. Default works well, so typically you set it only if you want -# to disable this feature. -# - HYPERPARAMETERS - string with hyperparameters, -# see model_lib.py for full list of hyperparameters. -# - DATASET - dataset, either "imagenet" or "tiny_imagenet". -# - IMAGE_SIZE - size of the image (single number). -# - OUTPUT_DIRECTORY - directory where to write results. -# - IMAGENET_DIR - directory with ImageNet dataset in TFRecord format. -# - TINY_IMAGENET_DIR - directory with Tiny ImageNet dataset in TFRecord format. -# -# Note that only one of IMAGENET_DIR or TINY_IMAGENET_DIR has to be provided -# depending on which dataset you use. -# -python train.py \ - --max_steps="${MAX_NUMBER_OF_TRAINING_STEPS}" \ - --model_name="${MODEL_NAME}" \ - --moving_average_decay="${MOVING_AVG_DECAY}" \ - --hparams="${HYPERPARAMETERS}" \ - --dataset="${DATASET}" \ - --dataset_image_size="${IMAGE_SIZE}" \ - --output_dir="${OUTPUT_DIRECTORY}" \ - --imagenet_data_dir="${IMAGENET_DIR}" \ - --tiny_imagenet_data_dir="${TINY_IMAGENET_DIR}" -``` - -Full list of training hyperparameters could be found in `model_lib.py`. -These hyperparameters control learning rate schedule, optimizer, weight decay, -label smoothing and adversarial training. - -Adversarial training is controlled by following hyperparameters: - -* `train_adv_method` - method which is used to craft adversarial examples during - training. Could be one of the following: - - * `clean` - perform regular training with clean examples; - * `pgd_EPS_STEP_NITER` - use non targeted PGD with maximum size of - perturbation equal to `EPS`, step size equal to `STEP` - and number of iterations equal to `NITER`. Size of perturbation and step - size are expected to be integers between 1 and 255. - * `pgdll_EPS_STEP_NITER` - use targeted PGD, where target class is least - likely prediction of the network. - * `pgdrnd_EPS_STEP_NITER` - use targeted PGD, where target class is chosen - randomly. - -* `train_lp_weight` - weight of adversarial logit pairing loss. If zero or - negarive, then no logit pairing is performed and training is done using - mixed minibatch PGD. If positive then adversarial logit pairing term is added - to the loss. - -Below is example of how to run training with adversarial logit pairing on -ImageNet 64x64: - -``` -python train.py \ - --model_name="resnet_v2_50" \ - --hparams="train_adv_method=pgdll_16_2_10,train_lp_weight=0.5" \ - --dataset="imagenet" \ - --dataset_image_size=64 \ - --output_dir="/tmp/adv_train" \ - --imagenet_data_dir="${IMAGENET_DIR}" -``` - -### Fine tuning - -Provided trainin script could be used to fine tune pre-trained checkpoint. -Following command does this: - -``` -# Fine tuning adds following additional arguments: -# - SCOPES_DO_NOT_LOAD_FROM_CHECKPOINT - comma separates list of scopes of -# variables, which should not be loadeded from checkpoint (and default -# initialization should be used instead). -# SCOPES_DO_NOT_LOAD_FROM_CHECKPOINT should be either same or a subset of -# LIST_OF_SCOPES_OF_TRAINABLE_VARS. -# - LIST_OF_SCOPES_OF_TRAINABLE_VARS - comma separated list of scopes of -# trainable variables. Only variables which are prefixed with these scopes -# will be trained. -# - PATH_TO_PRETRAINED_CHECKPOINT - directory with pretrained checkpoint which -# is used as initialization for fine tuning. -# -python train.py \ - --max_steps="${MAX_NUMBER_OF_TRAINING_STEPS}" \ - --model_name="${MODEL_NAME}" \ - --moving_average_decay="${MOVING_AVG_DECAY}" \ - --hparams="${HYPERPARAMETERS}" \ - --dataset="${DATASET}" \ - --dataset_image_size="${IMAGE_SIZE}" \ - --output_dir="${OUTPUT_DIRECTORY}" \ - --imagenet_data_dir="${IMAGENET_DIR}" \ - --tiny_imagenet_data_dir="${TINY_IMAGENET_DIR}" \ - --finetune_exclude_pretrained_scopes="${SCOPES_DO_NOT_LOAD_FROM_CHECKPOINT}" \ - --finetune_trainable_scopes="${LIST_OF_SCOPES_OF_TRAINABLE_VARS}" \ - --finetune_checkpoint_path="${PATH_TO_PRETRAINED_CHECKPOINT}" -``` - -Below is an example of how to fine tune last few layers of the model on -Tiny Imagenet dataset: - -``` -python train.py \ - --model_name="resnet_v2_50" \ - --hparams="train_adv_method=pgdll_16_2_10,train_lp_weight=0.5,learning_rate=0.02" \ - --dataset="tiny_imagenet" \ - --dataset_image_size=64 \ - --output_dir="/tmp/adv_finetune" \ - --tiny_imagenet_data_dir="${TINY_IMAGENET_DIR}" \ - --finetune_exclude_pretrained_scopes="resnet_v2_50/logits" \ - --finetune_trainable_scopes="resnet_v2_50/logits,resnet_v2_50/postnorm" \ - --finetune_checkpoint_path="/tmp/adv_train" -``` - -### Evaluation - -Following command runs evaluation: - -``` -# Following arguments should be provided for eval: -# - TRAINING_DIRECTORY - directory where training checkpoints are saved. -# - TRAINABLE_SCOPES - when loading checkpoint which was obtained by fine tuning -# this argument should be the same as LIST_OF_SCOPES_OF_TRAINABLE_VARS -# during training. Otherwise it should be empty. -# This is needed to properly load exponential moving average variables. -# If exponential moving averages are disabled then this flag could be -# omitted. -# - EVAL_SUBDIR_NAME - name of the subdirectory inside TRAINING_DIRECTORY -# where evaluation code will be saving event files. -# - DATASET - name of the dataset. -# - IMAGE_SIZE - size of the image in the dataset. -# - DATSET_SPLIT_NAME - name of the split in the dataset, -# either 'train' or 'validation'. Default is 'validation'. -# - MODEL_NAME - name of the model. -# - MOVING_AVG_DECAY - decay rate for exponential moving average. -# - ADV_METHOD_FOR_EVAL - should be "clean" to evaluate on clean example or -# description of the adversarial method to evaluate on adversarial examples. -# - HYPERPARAMETERS - hyperparameters, only "eval_batch_size" matters for eval -# - NUMBER_OF_EXAMPLES - how many examples from the dataset use for evaluation, -# specify -1 to use all examples. -# - EVAL_ONCE - if True then evaluate only once, otherwise keep evaluation -# running repeatedly on new checkpoints. Repeated evaluation might be useful -# when running concurrent with training. -# - IMAGENET_DIR - directory with ImageNet dataset in TFRecord format. -# - TINY_IMAGENET_DIR - directory with Tiny ImageNet dataset in TFRecord format. -# -python eval.py \ - --train_dir="${TRAINING_DIRECTORY} \ - --trainable_scopes="${TRAINABLE_SCOPES}" \ - --eval_name="${EVAL_SUBDIR_NAME}" \ - --dataset="${DATASET}" \ - --dataset_image_size="${IMAGE_SIZE}" \ - --split_name="${DATSET_SPLIT_NAME}" \ - --model_name="${MODEL_NAME}" \ - --moving_average_decay="${MOVING_AVG_DECAY}" \ - --adv_method="${ADV_METHOD_FOR_EVAL}" \ - --hparams="${HYPERPARAMETERS}" \ - --num_examples="${NUMBER_OF_EXAMPLES}" \ - --eval_once="${EVAL_ONCE}" \ - --imagenet_data_dir="${IMAGENET_DIR}" \ - --tiny_imagenet_data_dir="${TINY_IMAGENET_DIR}" -``` - -Example of running evaluation on 10000 of clean examples from ImageNet -training set: - -``` -python eval.py \ - --train_dir=/tmp/adv_train \ - --dataset=imagenet \ - --dataset_image_size=64 \ - --split_name=train \ - --adv_method=clean \ - --hparams="eval_batch_size=50" \ - --num_examples=10000 \ - --eval_once=True \ - --imagenet_data_dir="${IMAGENET_DIR}" -``` - -Example of running evaluatin on adversarial images generated from Tiny ImageNet -validation set using fine-tuned checkpoint: - -``` -python eval.py \ - --train_dir=tmp/adv_finetune \ - --trainable_scopes="resnet_v2_50/logits,resnet_v2_50/postnorm" \ - --dataset=tiny_imagenet \ - --dataset_image_size=64 \ - --adv_method=pgdrnd_16_2_10 \ - --hparams="eval_batch_size=50" \ - --eval_once=True \ - --tiny_imagenet_data_dir="${TINY_IMAGENET_DIR}" -``` - -### Pre-trained models - -Following set of pre-trained checkpoints released with this code: - -| Model | Dataset | Accuracy on
    clean images | Accuracy on
    `pgdll_16_1_20` | Accuracy on
    `pgdll_16_2_10` | -| ----------- | ------------ | --------------- | --------------------------- | -------------- | -| [Baseline ResNet-v2-50](http://download.tensorflow.org/models/adversarial_logit_pairing/imagenet64_base_2018_06_26.ckpt.tar.gz) | ImageNet 64x64 | 60.5% | 1.8% | 3.5% | -| [ALP-trained ResNet-v2-50](http://download.tensorflow.org/models/adversarial_logit_pairing/imagenet64_alp025_2018_06_26.ckpt.tar.gz) | ImageNet 64x64 | 55.7% | 27.5% | 27.8% | -| [Baseline ResNet-v2-50](http://download.tensorflow.org/models/adversarial_logit_pairing/tiny_imagenet_base_2018_06_26.ckpt.tar.gz) | Tiny ImageNet | 69.2% | 0.1% | 0.3% | -| [ALP-trained ResNet-v2-50](http://download.tensorflow.org/models/adversarial_logit_pairing/tiny_imagenet_alp05_2018_06_26.ckpt.tar.gz) | Tiny ImageNet | 72.0% | 41.3% | 40.8% | - - -* All provided checkpoints were initially trained with exponential moving - average. However for ease of use they were re-saved without it. - So to load and use provided checkpoints you need to specify - `--moving_average_decay=-1` flag. -* All ALP models were trained with `pgdll_16_2_10` adversarial examples. -* All Tiny Imagenet models were obtained by fine tuning corresponding - ImageNet 64x64 models. ALP-trained models were fine tuned with ALP. diff --git a/spaces/NoriZC/vits-models/text/__init__.py b/spaces/NoriZC/vits-models/text/__init__.py deleted file mode 100644 index 663c4b6416affb53c9dc56dddbc8b2b65d4bf518..0000000000000000000000000000000000000000 --- a/spaces/NoriZC/vits-models/text/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners -from text.symbols import symbols - - -# Mappings from symbol to numeric ID and vice versa: -_symbol_to_id = {s: i for i, s in enumerate(symbols)} -_id_to_symbol = {i: s for i, s in enumerate(symbols)} - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence, clean_text - - -def cleaned_text_to_sequence(cleaned_text): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()] - return sequence - - -def sequence_to_text(sequence): - '''Converts a sequence of IDs back to a string''' - result = '' - for symbol_id in sequence: - s = _id_to_symbol[symbol_id] - result += s - return result - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/OAOA/DifFace/basicsr/ops/__init__.py b/spaces/OAOA/DifFace/basicsr/ops/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/OAOA/DifFace/utils/util_net.py b/spaces/OAOA/DifFace/utils/util_net.py deleted file mode 100644 index d830f041bff3020ebabebfa5ac36c4e09052d123..0000000000000000000000000000000000000000 --- a/spaces/OAOA/DifFace/utils/util_net.py +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env python -# -*- coding:utf-8 -*- -# Power by Zongsheng Yue 2021-11-24 20:29:36 - -import math -import torch -from pathlib import Path -from collections import OrderedDict -import torch.nn.functional as F - -def calculate_parameters(net): - out = 0 - for param in net.parameters(): - out += param.numel() - return out - -def pad_input(x, mod): - h, w = x.shape[-2:] - bottom = int(math.ceil(h/mod)*mod -h) - right = int(math.ceil(w/mod)*mod - w) - x_pad = F.pad(x, pad=(0, right, 0, bottom), mode='reflect') - return x_pad - -def forward_chop(net, x, net_kwargs=None, scale=1, shave=10, min_size=160000): - n_GPUs = 1 - b, c, h, w = x.size() - h_half, w_half = h // 2, w // 2 - h_size, w_size = h_half + shave, w_half + shave - lr_list = [ - x[:, :, 0:h_size, 0:w_size], - x[:, :, 0:h_size, (w - w_size):w], - x[:, :, (h - h_size):h, 0:w_size], - x[:, :, (h - h_size):h, (w - w_size):w]] - - if w_size * h_size < min_size: - sr_list = [] - for i in range(0, 4, n_GPUs): - lr_batch = torch.cat(lr_list[i:(i + n_GPUs)], dim=0) - if net_kwargs is None: - sr_batch = net(lr_batch) - else: - sr_batch = net(lr_batch, **net_kwargs) - sr_list.extend(sr_batch.chunk(n_GPUs, dim=0)) - else: - sr_list = [ - forward_chop(patch, shave=shave, min_size=min_size) \ - for patch in lr_list - ] - - h, w = scale * h, scale * w - h_half, w_half = scale * h_half, scale * w_half - h_size, w_size = scale * h_size, scale * w_size - shave *= scale - - output = x.new(b, c, h, w) - output[:, :, 0:h_half, 0:w_half] \ - = sr_list[0][:, :, 0:h_half, 0:w_half] - output[:, :, 0:h_half, w_half:w] \ - = sr_list[1][:, :, 0:h_half, (w_size - w + w_half):w_size] - output[:, :, h_half:h, 0:w_half] \ - = sr_list[2][:, :, (h_size - h + h_half):h_size, 0:w_half] - output[:, :, h_half:h, w_half:w] \ - = sr_list[3][:, :, (h_size - h + h_half):h_size, (w_size - w + w_half):w_size] - - return output - -def measure_time(net, inputs, num_forward=100): - ''' - Measuring the average runing time (seconds) for pytorch. - out = net(*inputs) - ''' - start = torch.cuda.Event(enable_timing=True) - end = torch.cuda.Event(enable_timing=True) - - start.record() - with torch.set_grad_enabled(False): - for _ in range(num_forward): - out = net(*inputs) - end.record() - - torch.cuda.synchronize() - - return start.elapsed_time(end) / 1000 - -def reload_model(model, ckpt): - if list(model.state_dict().keys())[0].startswith('module.'): - if list(ckpt.keys())[0].startswith('module.'): - ckpt = ckpt - else: - ckpt = OrderedDict({f'module.{key}':value for key, value in ckpt.items()}) - else: - if list(ckpt.keys())[0].startswith('module.'): - ckpt = OrderedDict({key[7:]:value for key, value in ckpt.items()}) - else: - ckpt = ckpt - model.load_state_dict(ckpt) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/docs/make.bat b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/docs/make.bat deleted file mode 100644 index baa9d02a79266ed17e0841f08a83931d46583393..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/docs/make.bat +++ /dev/null @@ -1,36 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=python -msphinx -) -set SOURCEDIR=. -set BUILDDIR=_build -set SPHINXPROJ=fairseq - -if "%1" == "" goto help - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The Sphinx module was not found. Make sure you have Sphinx installed, - echo.then set the SPHINXBUILD environment variable to point to the full - echo.path of the 'sphinx-build' executable. Alternatively you may add the - echo.Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 -) - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% - -:end -popd diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/criss/sentence_retrieval/sentence_retrieval_tatoeba.sh b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/criss/sentence_retrieval/sentence_retrieval_tatoeba.sh deleted file mode 100644 index 0428d8bef9d426ac3e664cd281ce0b688f5f580f..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/criss/sentence_retrieval/sentence_retrieval_tatoeba.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# -source_lang=kk_KZ -target_lang=en_XX -MODEL=criss_checkpoints/criss.3rd.pt -SPM=criss_checkpoints/sentence.bpe.model -SPLIT=test -LANG_DICT=criss_checkpoints/lang_dict.txt -ENCODER_ANALYSIS=sentence_retrieval/encoder_analysis.py -SAVE_ENCODER=save_encoder.py -ENCODER_SAVE_ROOT=sentence_embeddings/$MODEL - - - -DATA_DIR=data_tmp -INPUT_DIR=$DATA_DIR/${source_lang}-${target_lang}-tatoeba -ENCODER_SAVE_DIR=${ENCODER_SAVE_ROOT}/${source_lang}-${target_lang} -mkdir -p $ENCODER_SAVE_DIR/${target_lang} -mkdir -p $ENCODER_SAVE_DIR/${source_lang} - -# Save encoder outputs for source sentences -python $SAVE_ENCODER \ - ${INPUT_DIR} \ - --path ${MODEL} \ - --task translation_multi_simple_epoch \ - --lang-dict ${LANG_DICT} \ - --gen-subset ${SPLIT} \ - --bpe 'sentencepiece' \ - --lang-pairs ${source_lang}-${target_lang} \ - -s ${source_lang} -t ${target_lang} \ - --sentencepiece-model ${SPM} \ - --remove-bpe 'sentencepiece' \ - --beam 1 \ - --lang-tok-style mbart \ - --encoder-save-dir ${ENCODER_SAVE_DIR}/${source_lang} - -# Save encoder outputs for target sentences -python $SAVE_ENCODER \ - ${INPUT_DIR} \ - --path ${MODEL} \ - --lang-dict ${LANG_DICT} \ - --task translation_multi_simple_epoch \ - --gen-subset ${SPLIT} \ - --bpe 'sentencepiece' \ - --lang-pairs ${target_lang}-${source_lang} \ - -t ${source_lang} -s ${target_lang} \ - --sentencepiece-model ${SPM} \ - --remove-bpe 'sentencepiece' \ - --beam 1 \ - --lang-tok-style mbart \ - --encoder-save-dir ${ENCODER_SAVE_DIR}/${target_lang} - -# Analyze sentence retrieval accuracy -python $ENCODER_ANALYSIS --langs "${source_lang},${target_lang}" ${ENCODER_SAVE_DIR} diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/speech_recognition/tasks/speech_recognition.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/speech_recognition/tasks/speech_recognition.py deleted file mode 100644 index d9f011d55ff4fdfeb4c04ca790c314d685708c3a..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/speech_recognition/tasks/speech_recognition.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import json -import os -import re -import sys - -import torch -from examples.speech_recognition.data import AsrDataset -from examples.speech_recognition.data.replabels import replabel_symbol -from fairseq.data import Dictionary -from fairseq.tasks import LegacyFairseqTask, register_task - - -def get_asr_dataset_from_json(data_json_path, tgt_dict): - """ - Parse data json and create dataset. - See scripts/asr_prep_json.py which pack json from raw files - - Json example: - { - "utts": { - "4771-29403-0025": { - "input": { - "length_ms": 170, - "path": "/tmp/file1.flac" - }, - "output": { - "text": "HELLO \n", - "token": "HE LLO", - "tokenid": "4815, 861" - } - }, - "1564-142299-0096": { - ... - } - } - """ - if not os.path.isfile(data_json_path): - raise FileNotFoundError("Dataset not found: {}".format(data_json_path)) - with open(data_json_path, "rb") as f: - data_samples = json.load(f)["utts"] - assert len(data_samples) != 0 - sorted_samples = sorted( - data_samples.items(), - key=lambda sample: int(sample[1]["input"]["length_ms"]), - reverse=True, - ) - aud_paths = [s[1]["input"]["path"] for s in sorted_samples] - ids = [s[0] for s in sorted_samples] - speakers = [] - for s in sorted_samples: - m = re.search("(.+?)-(.+?)-(.+?)", s[0]) - speakers.append(m.group(1) + "_" + m.group(2)) - frame_sizes = [s[1]["input"]["length_ms"] for s in sorted_samples] - tgt = [ - [int(i) for i in s[1]["output"]["tokenid"].split(", ")] - for s in sorted_samples - ] - # append eos - tgt = [[*t, tgt_dict.eos()] for t in tgt] - return AsrDataset(aud_paths, frame_sizes, tgt, tgt_dict, ids, speakers) - - -@register_task("speech_recognition") -class SpeechRecognitionTask(LegacyFairseqTask): - """ - Task for training speech recognition model. - """ - - @staticmethod - def add_args(parser): - """Add task-specific arguments to the parser.""" - parser.add_argument("data", help="path to data directory") - parser.add_argument( - "--silence-token", default="\u2581", help="token for silence (used by w2l)" - ) - parser.add_argument( - "--max-source-positions", - default=sys.maxsize, - type=int, - metavar="N", - help="max number of frames in the source sequence", - ) - parser.add_argument( - "--max-target-positions", - default=1024, - type=int, - metavar="N", - help="max number of tokens in the target sequence", - ) - - def __init__(self, args, tgt_dict): - super().__init__(args) - self.tgt_dict = tgt_dict - - @classmethod - def setup_task(cls, args, **kwargs): - """Setup the task (e.g., load dictionaries).""" - dict_path = os.path.join(args.data, "dict.txt") - if not os.path.isfile(dict_path): - raise FileNotFoundError("Dict not found: {}".format(dict_path)) - tgt_dict = Dictionary.load(dict_path) - - if args.criterion == "ctc_loss": - tgt_dict.add_symbol("") - elif args.criterion == "asg_loss": - for i in range(1, args.max_replabel + 1): - tgt_dict.add_symbol(replabel_symbol(i)) - - print("| dictionary: {} types".format(len(tgt_dict))) - return cls(args, tgt_dict) - - def load_dataset(self, split, combine=False, **kwargs): - """Load a given dataset split. - - Args: - split (str): name of the split (e.g., train, valid, test) - """ - data_json_path = os.path.join(self.args.data, "{}.json".format(split)) - self.datasets[split] = get_asr_dataset_from_json(data_json_path, self.tgt_dict) - - def build_generator(self, models, args, **unused): - w2l_decoder = getattr(args, "w2l_decoder", None) - if w2l_decoder == "viterbi": - from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder - - return W2lViterbiDecoder(args, self.target_dictionary) - elif w2l_decoder == "kenlm": - from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder - - return W2lKenLMDecoder(args, self.target_dictionary) - elif w2l_decoder == "fairseqlm": - from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder - - return W2lFairseqLMDecoder(args, self.target_dictionary) - else: - return super().build_generator(models, args) - - @property - def target_dictionary(self): - """Return the :class:`~fairseq.data.Dictionary` for the language - model.""" - return self.tgt_dict - - @property - def source_dictionary(self): - """Return the source :class:`~fairseq.data.Dictionary` (if applicable - for this task).""" - return None - - def max_positions(self): - """Return the max speech and sentence length allowed by the task.""" - return (self.args.max_source_positions, self.args.max_target_positions) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_binaries.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_binaries.py deleted file mode 100644 index 4e207742625427f108f78bcd24d487a081b6ccf7..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_binaries.py +++ /dev/null @@ -1,1874 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import contextlib -import logging -import json -import os -import random -import sys -import tempfile -import unittest -from io import StringIO -from typing import List, Dict -import torch -from fairseq import options -from fairseq_cli import eval_lm, train -from tests.utils import ( - create_dummy_data, - generate_main, - preprocess_lm_data, - preprocess_summarization_data, - preprocess_translation_data, - create_laser_data_and_config_json, - train_translation_model, - train_language_model, -) - - -try: - import transformers # noqa - - has_hf_transformers = True -except ImportError: - has_hf_transformers = False - - -class TestTranslation(unittest.TestCase): - def setUp(self): - logging.disable(logging.CRITICAL) - - def tearDown(self): - logging.disable(logging.NOTSET) - - def test_fconv(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_fconv") as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir) - train_translation_model(data_dir, "fconv_iwslt_de_en") - generate_main(data_dir) - - def test_raw(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_fconv_raw") as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir, ["--dataset-impl", "raw"]) - train_translation_model( - data_dir, "fconv_iwslt_de_en", ["--dataset-impl", "raw"] - ) - generate_main(data_dir, ["--dataset-impl", "raw"]) - - def test_update_freq(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_update_freq") as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir) - train_translation_model( - data_dir, "fconv_iwslt_de_en", ["--update-freq", "3"] - ) - generate_main(data_dir) - - def test_max_positions(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_max_positions") as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir) - with self.assertRaises(Exception) as context: - train_translation_model( - data_dir, - "fconv_iwslt_de_en", - ["--max-target-positions", "5"], - ) - self.assertTrue( - "skip this example with --skip-invalid-size-inputs-valid-test" - in str(context.exception) - ) - train_translation_model( - data_dir, - "fconv_iwslt_de_en", - [ - "--max-target-positions", - "5", - "--skip-invalid-size-inputs-valid-test", - ], - ) - with self.assertRaises(Exception) as context: - generate_main(data_dir) - generate_main(data_dir, ["--skip-invalid-size-inputs-valid-test"]) - - def test_generation(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_sampling") as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir) - train_translation_model(data_dir, "fconv_iwslt_de_en") - generate_main( - data_dir, - [ - "--sampling", - "--temperature", - "2", - "--beam", - "2", - "--nbest", - "2", - ], - ) - generate_main( - data_dir, - [ - "--sampling", - "--sampling-topk", - "3", - "--beam", - "2", - "--nbest", - "2", - ], - ) - generate_main( - data_dir, - [ - "--sampling", - "--sampling-topp", - "0.2", - "--beam", - "2", - "--nbest", - "2", - ], - ) - generate_main( - data_dir, - [ - "--diversity-rate", - "0.5", - "--beam", - "6", - ], - ) - with self.assertRaises(ValueError): - generate_main( - data_dir, - [ - "--diverse-beam-groups", - "4", - "--match-source-len", - ], - ) - generate_main(data_dir, ["--prefix-size", "2"]) - generate_main(data_dir, ["--retain-dropout"]) - - def test_eval_bleu(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_eval_bleu") as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir) - train_translation_model( - data_dir, - "fconv_iwslt_de_en", - [ - "--eval-bleu", - "--eval-bleu-print-samples", - "--eval-bleu-remove-bpe", - "--eval-bleu-detok", - "space", - "--eval-bleu-args", - '{"beam": 4, "min_len": 10}', - ], - ) - - def test_lstm(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_lstm") as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir) - train_translation_model( - data_dir, - "lstm_wiseman_iwslt_de_en", - [ - "--encoder-layers", - "2", - "--decoder-layers", - "2", - "--encoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - "--decoder-out-embed-dim", - "8", - ], - ) - generate_main(data_dir) - - def test_lstm_bidirectional(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_lstm_bidirectional") as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir) - train_translation_model( - data_dir, - "lstm", - [ - "--encoder-layers", - "2", - "--encoder-bidirectional", - "--encoder-hidden-size", - "16", - "--encoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - "--decoder-out-embed-dim", - "8", - "--decoder-layers", - "2", - ], - ) - generate_main(data_dir) - - def test_transformer(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_transformer") as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir) - train_translation_model( - data_dir, - "transformer_iwslt_de_en", - [ - "--encoder-layers", - "2", - "--decoder-layers", - "2", - "--encoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - ], - run_validation=True, - ) - generate_main(data_dir) - - def test_multilingual_transformer(self): - # test with all combinations of encoder/decoder lang tokens - encoder_langtok_flags = [ - [], - ["--encoder-langtok", "src"], - ["--encoder-langtok", "tgt"], - ] - decoder_langtok_flags = [[], ["--decoder-langtok"]] - with contextlib.redirect_stdout(StringIO()): - for i in range(len(encoder_langtok_flags)): - for j in range(len(decoder_langtok_flags)): - enc_ltok_flag = encoder_langtok_flags[i] - dec_ltok_flag = decoder_langtok_flags[j] - with tempfile.TemporaryDirectory( - f"test_multilingual_transformer_{i}_{j}" - ) as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir) - train_translation_model( - data_dir, - arch="multilingual_transformer", - task="multilingual_translation", - extra_flags=[ - "--encoder-layers", - "2", - "--decoder-layers", - "2", - "--encoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - ] - + enc_ltok_flag - + dec_ltok_flag, - lang_flags=["--lang-pairs", "in-out,out-in"], - run_validation=True, - extra_valid_flags=enc_ltok_flag + dec_ltok_flag, - ) - generate_main( - data_dir, - extra_flags=[ - "--task", - "multilingual_translation", - "--lang-pairs", - "in-out,out-in", - "--source-lang", - "in", - "--target-lang", - "out", - ] - + enc_ltok_flag - + dec_ltok_flag, - ) - - @unittest.skipIf( - sys.platform.lower() == "darwin", "skip latent depth test on MacOS" - ) - def test_multilingual_translation_latent_depth(self): - # test with latent depth in encoder, decoder, or both - encoder_latent_layer = [[], ["--encoder-latent-layer"]] - decoder_latent_layer = [[], ["--decoder-latent-layer"]] - with contextlib.redirect_stdout(StringIO()): - for i in range(len(encoder_latent_layer)): - for j in range(len(decoder_latent_layer)): - if i == 0 and j == 0: - continue - enc_ll_flag = encoder_latent_layer[i] - dec_ll_flag = decoder_latent_layer[j] - with tempfile.TemporaryDirectory( - f"test_multilingual_translation_latent_depth_{i}_{j}" - ) as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data( - data_dir, extra_flags=["--joined-dictionary"] - ) - train_translation_model( - data_dir, - arch="latent_multilingual_transformer", - task="multilingual_translation_latent_depth", - extra_flags=[ - "--user-dir", - "examples/latent_depth/latent_depth_src", - "--encoder-layers", - "2", - "--decoder-layers", - "2", - "--encoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - "--share-encoders", - "--share-decoders", - "--sparsity-weight", - "0.1", - ] - + enc_ll_flag - + dec_ll_flag, - lang_flags=["--lang-pairs", "in-out,out-in"], - run_validation=True, - extra_valid_flags=[ - "--user-dir", - "examples/latent_depth/latent_depth_src", - ] - + enc_ll_flag - + dec_ll_flag, - ) - generate_main( - data_dir, - extra_flags=[ - "--user-dir", - "examples/latent_depth/latent_depth_src", - "--task", - "multilingual_translation_latent_depth", - "--lang-pairs", - "in-out,out-in", - "--source-lang", - "in", - "--target-lang", - "out", - ] - + enc_ll_flag - + dec_ll_flag, - ) - - def test_translation_multi_simple_epoch(self): - # test with all combinations of encoder/decoder lang tokens - encoder_langtok_flags = [ - [], - ["--encoder-langtok", "src"], - ["--encoder-langtok", "tgt"], - ] - decoder_langtok_flags = [[], ["--decoder-langtok"]] - with contextlib.redirect_stdout(StringIO()): - for i in range(len(encoder_langtok_flags)): - for j in range(len(decoder_langtok_flags)): - enc_ltok_flag = encoder_langtok_flags[i] - dec_ltok_flag = decoder_langtok_flags[j] - with tempfile.TemporaryDirectory( - f"test_translation_multi_simple_epoch_{i}_{j}" - ) as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data( - data_dir, extra_flags=["--joined-dictionary"] - ) - train_translation_model( - data_dir, - arch="transformer", - task="translation_multi_simple_epoch", - extra_flags=[ - "--encoder-layers", - "2", - "--decoder-layers", - "2", - "--encoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - "--sampling-method", - "temperature", - "--sampling-temperature", - "1.5", - "--virtual-epoch-size", - "1000", - ] - + enc_ltok_flag - + dec_ltok_flag, - lang_flags=["--lang-pairs", "in-out,out-in"], - run_validation=True, - extra_valid_flags=enc_ltok_flag + dec_ltok_flag, - ) - generate_main( - data_dir, - extra_flags=[ - "--task", - "translation_multi_simple_epoch", - "--lang-pairs", - "in-out,out-in", - "--source-lang", - "in", - "--target-lang", - "out", - ] - + enc_ltok_flag - + dec_ltok_flag, - ) - - def test_translation_multi_simple_epoch_no_vepoch(self): - # test with all combinations of encoder/decoder lang tokens - with contextlib.redirect_stdout(StringIO()): - enc_ltok_flag = ["--encoder-langtok", "src"] - dec_ltok_flag = ["--decoder-langtok"] - with tempfile.TemporaryDirectory( - "test_translation_multi_simple_epoch_dict" - ) as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir, extra_flags=[]) - train_translation_model( - data_dir, - arch="transformer", - task="translation_multi_simple_epoch", - extra_flags=[ - "--encoder-layers", - "2", - "--decoder-layers", - "2", - "--encoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - "--sampling-method", - "temperature", - "--sampling-temperature", - "1.5", - ] - + enc_ltok_flag - + dec_ltok_flag, - lang_flags=["--lang-pairs", "in-out"], - run_validation=True, - extra_valid_flags=enc_ltok_flag + dec_ltok_flag, - ) - generate_main( - data_dir, - extra_flags=[ - "--task", - "translation_multi_simple_epoch", - "--lang-pairs", - "in-out", - "--source-lang", - "in", - "--target-lang", - "out", - ] - + enc_ltok_flag - + dec_ltok_flag, - ) - - def test_translation_multi_simple_epoch_dicts(self): - # test with all combinations of encoder/decoder lang tokens - with contextlib.redirect_stdout(StringIO()): - enc_ltok_flag = ["--encoder-langtok", "src"] - dec_ltok_flag = ["--decoder-langtok"] - with tempfile.TemporaryDirectory( - "test_translation_multi_simple_epoch_dict" - ) as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir, extra_flags=[]) - train_translation_model( - data_dir, - arch="transformer", - task="translation_multi_simple_epoch", - extra_flags=[ - "--encoder-layers", - "2", - "--decoder-layers", - "2", - "--encoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - "--sampling-method", - "temperature", - "--sampling-temperature", - "1.5", - "--virtual-epoch-size", - "1000", - ] - + enc_ltok_flag - + dec_ltok_flag, - lang_flags=["--lang-pairs", "in-out"], - run_validation=True, - extra_valid_flags=enc_ltok_flag + dec_ltok_flag, - ) - generate_main( - data_dir, - extra_flags=[ - "--task", - "translation_multi_simple_epoch", - "--lang-pairs", - "in-out", - "--source-lang", - "in", - "--target-lang", - "out", - ] - + enc_ltok_flag - + dec_ltok_flag, - ) - - def test_translation_multi_simple_epoch_src_tgt_dict_spec(self): - # test the specification of explicit --src-dict and --tgt-dict - with contextlib.redirect_stdout(StringIO()): - enc_ltok_flag = ["--encoder-langtok", "src"] - dec_ltok_flag = ["--decoder-langtok"] - with tempfile.TemporaryDirectory( - "test_translation_multi_simple_epoch_dict" - ) as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir, extra_flags=[]) - train_translation_model( - data_dir, - arch="transformer", - task="translation_multi_simple_epoch", - extra_flags=[ - "--source-dict", - f"{data_dir}/dict.in.txt", - "--target-dict", - f"{data_dir}/dict.out.txt", - "--encoder-layers", - "2", - "--decoder-layers", - "2", - "--encoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - "--sampling-method", - "temperature", - "--sampling-temperature", - "1.5", - "--virtual-epoch-size", - "1000", - ] - + enc_ltok_flag - + dec_ltok_flag, - lang_flags=["--lang-pairs", "in-out"], - run_validation=True, - extra_valid_flags=enc_ltok_flag + dec_ltok_flag, - ) - generate_main( - data_dir, - extra_flags=[ - "--task", - "translation_multi_simple_epoch", - "--lang-pairs", - "in-out", - "--source-lang", - "in", - "--target-lang", - "out", - ] - + enc_ltok_flag - + dec_ltok_flag, - ) - - def test_transformer_cross_self_attention(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory( - "test_transformer_cross_self_attention" - ) as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir) - train_translation_model( - data_dir, - "transformer_iwslt_de_en", - [ - "--encoder-layers", - "2", - "--decoder-layers", - "2", - "--encoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - "--no-cross-attention", - "--cross-self-attention", - ], - run_validation=True, - ) - generate_main(data_dir, extra_flags=[]) - - def test_transformer_pointer_generator(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory( - "test_transformer_pointer_generator" - ) as data_dir: - create_dummy_data(data_dir) - preprocess_summarization_data(data_dir) - train_translation_model( - data_dir, - "transformer_pointer_generator", - extra_flags=[ - "--user-dir", - "examples/pointer_generator/pointer_generator_src", - "--encoder-layers", - "2", - "--decoder-layers", - "2", - "--encoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - "--alignment-layer", - "-1", - "--alignment-heads", - "1", - "--source-position-markers", - "0", - ], - run_validation=True, - extra_valid_flags=[ - "--user-dir", - "examples/pointer_generator/pointer_generator_src", - ], - ) - generate_main( - data_dir, - extra_flags=[ - "--user-dir", - "examples/pointer_generator/pointer_generator_src", - ], - ) - - def test_lightconv(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_lightconv") as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir) - train_translation_model( - data_dir, - "lightconv_iwslt_de_en", - [ - "--encoder-conv-type", - "lightweight", - "--decoder-conv-type", - "lightweight", - "--encoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - ], - ) - generate_main(data_dir) - - def test_dynamicconv(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_dynamicconv") as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir) - train_translation_model( - data_dir, - "lightconv_iwslt_de_en", - [ - "--encoder-conv-type", - "dynamic", - "--decoder-conv-type", - "dynamic", - "--encoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - ], - ) - generate_main(data_dir) - - def test_cmlm_transformer(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_cmlm_transformer") as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir, ["--joined-dictionary"]) - train_translation_model( - data_dir, - "cmlm_transformer", - [ - "--apply-bert-init", - "--criterion", - "nat_loss", - "--noise", - "full_mask", - "--pred-length-offset", - "--length-loss-factor", - "0.1", - ], - task="translation_lev", - ) - generate_main( - data_dir, - [ - "--task", - "translation_lev", - "--iter-decode-max-iter", - "9", - "--iter-decode-eos-penalty", - "0", - "--print-step", - ], - ) - - def test_nonautoregressive_transformer(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory( - "test_nonautoregressive_transformer" - ) as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir, ["--joined-dictionary"]) - train_translation_model( - data_dir, - "nonautoregressive_transformer", - [ - "--apply-bert-init", - "--src-embedding-copy", - "--criterion", - "nat_loss", - "--noise", - "full_mask", - "--pred-length-offset", - "--length-loss-factor", - "0.1", - ], - task="translation_lev", - ) - generate_main( - data_dir, - [ - "--task", - "translation_lev", - "--iter-decode-max-iter", - "0", - "--iter-decode-eos-penalty", - "0", - "--print-step", - ], - ) - - # def test_nat_crf_transformer(self): - # with contextlib.redirect_stdout(StringIO()): - # with tempfile.TemporaryDirectory('test_nat_crf_transformer') as data_dir: - # create_dummy_data(data_dir) - # preprocess_translation_data(data_dir, ['--joined-dictionary']) - # train_translation_model(data_dir, 'nacrf_transformer', [ - # '--apply-bert-init', '--criterion', - # 'nat_loss', '--noise', 'full_mask', '--pred-length-offset', - # '--length-loss-factor', '0.1', - # '--word-ins-loss-factor', '0.5', - # '--crf-lowrank-approx', '1', - # '--crf-beam-approx', '1' - # ], task='translation_lev') - # generate_main(data_dir, [ - # '--task', 'translation_lev', - # '--iter-decode-max-iter', '0', - # '--iter-decode-eos-penalty', '0', - # '--print-step', - # ]) - - def test_iterative_nonautoregressive_transformer(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory( - "test_iterative_nonautoregressive_transformer" - ) as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir, ["--joined-dictionary"]) - train_translation_model( - data_dir, - "iterative_nonautoregressive_transformer", - [ - "--apply-bert-init", - "--src-embedding-copy", - "--criterion", - "nat_loss", - "--noise", - "full_mask", - "--stochastic-approx", - "--dae-ratio", - "0.5", - "--train-step", - "3", - ], - task="translation_lev", - ) - generate_main( - data_dir, - [ - "--task", - "translation_lev", - "--iter-decode-max-iter", - "9", - "--iter-decode-eos-penalty", - "0", - "--print-step", - ], - ) - - def test_insertion_transformer(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_insertion_transformer") as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir, ["--joined-dictionary"]) - train_translation_model( - data_dir, - "insertion_transformer", - [ - "--apply-bert-init", - "--criterion", - "nat_loss", - "--noise", - "random_mask", - ], - task="translation_lev", - ) - generate_main( - data_dir, - [ - "--task", - "translation_lev", - "--iter-decode-max-iter", - "9", - "--iter-decode-eos-penalty", - "0", - "--print-step", - ], - ) - - def test_mixture_of_experts(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_moe") as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir) - train_translation_model( - data_dir, - "transformer_iwslt_de_en", - [ - "--task", - "translation_moe", - "--user-dir", - "examples/translation_moe/translation_moe_src", - "--method", - "hMoElp", - "--mean-pool-gating-network", - "--num-experts", - "3", - "--encoder-layers", - "2", - "--decoder-layers", - "2", - "--encoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - ], - ) - generate_main( - data_dir, - [ - "--task", - "translation_moe", - "--user-dir", - "examples/translation_moe/translation_moe_src", - "--method", - "hMoElp", - "--mean-pool-gating-network", - "--num-experts", - "3", - "--gen-expert", - "0", - ], - ) - - def test_alignment(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_alignment") as data_dir: - create_dummy_data(data_dir, alignment=True) - preprocess_translation_data(data_dir, ["--align-suffix", "align"]) - train_translation_model( - data_dir, - "transformer_align", - [ - "--encoder-layers", - "2", - "--decoder-layers", - "2", - "--encoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - "--load-alignments", - "--alignment-layer", - "1", - "--criterion", - "label_smoothed_cross_entropy_with_alignment", - ], - run_validation=True, - ) - generate_main(data_dir) - - def test_laser_lstm(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_laser_lstm") as data_dir: - laser_config_file = create_laser_data_and_config_json(data_dir) - train_translation_model( - laser_config_file.name, - "laser_lstm", - [ - "--user-dir", - "examples/laser/laser_src", - "--weighting-alpha", - "0.3", - "--encoder-bidirectional", - "--encoder-hidden-size", - "512", - "--encoder-layers", - "5", - "--decoder-layers", - "1", - "--encoder-embed-dim", - "320", - "--decoder-embed-dim", - "320", - "--decoder-lang-embed-dim", - "32", - "--save-dir", - data_dir, - "--disable-validation", - ], - task="laser", - lang_flags=[], - ) - - def test_laser_transformer(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_laser_transformer") as data_dir: - laser_config_file = create_laser_data_and_config_json(data_dir) - train_translation_model( - laser_config_file.name, - "laser_transformer", - [ - "--user-dir", - "examples/laser/laser_src", - "--weighting-alpha", - "0.3", - "--encoder-embed-dim", - "320", - "--decoder-embed-dim", - "320", - "--decoder-lang-embed-dim", - "32", - "--save-dir", - data_dir, - "--disable-validation", - ], - task="laser", - lang_flags=[], - ) - - def test_alignment_full_context(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_alignment") as data_dir: - create_dummy_data(data_dir, alignment=True) - preprocess_translation_data(data_dir, ["--align-suffix", "align"]) - train_translation_model( - data_dir, - "transformer_align", - [ - "--encoder-layers", - "2", - "--decoder-layers", - "2", - "--encoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - "--load-alignments", - "--alignment-layer", - "1", - "--criterion", - "label_smoothed_cross_entropy_with_alignment", - "--full-context-alignment", - ], - run_validation=True, - ) - generate_main(data_dir) - - def test_transformer_layerdrop(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_transformer_layerdrop") as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir) - train_translation_model( - data_dir, - "transformer_iwslt_de_en", - [ - "--encoder-layers", - "3", - "--decoder-layers", - "3", - "--encoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - "--encoder-layerdrop", - "0.01", - "--decoder-layerdrop", - "0.01", - ], - ) - generate_main(data_dir) - generate_main( - data_dir, - [ - "--model-overrides", - "{'encoder_layers_to_keep':'0,2','decoder_layers_to_keep':'1'}", - ], - ) - - -class TestStories(unittest.TestCase): - def setUp(self): - logging.disable(logging.CRITICAL) - - def tearDown(self): - logging.disable(logging.NOTSET) - - def test_fconv_self_att_wp(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_fconv_self_att_wp") as data_dir: - create_dummy_data(data_dir) - preprocess_translation_data(data_dir) - config = [ - "--encoder-layers", - "[(128, 3)] * 2", - "--decoder-layers", - "[(128, 3)] * 2", - "--decoder-attention", - "True", - "--encoder-attention", - "False", - "--gated-attention", - "True", - "--self-attention", - "True", - "--project-input", - "True", - "--encoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - "--decoder-out-embed-dim", - "8", - "--multihead-self-attention-nheads", - "2", - ] - train_translation_model(data_dir, "fconv_self_att_wp", config) - generate_main(data_dir) - - # fusion model - os.rename( - os.path.join(data_dir, "checkpoint_last.pt"), - os.path.join(data_dir, "pretrained.pt"), - ) - config.extend( - [ - "--pretrained", - "True", - "--pretrained-checkpoint", - os.path.join(data_dir, "pretrained.pt"), - "--save-dir", - os.path.join(data_dir, "fusion_model"), - ] - ) - train_translation_model(data_dir, "fconv_self_att_wp", config) - - -class TestLanguageModeling(unittest.TestCase): - def setUp(self): - logging.disable(logging.CRITICAL) - - def tearDown(self): - logging.disable(logging.NOTSET) - - def test_fconv_lm(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_fconv_lm") as data_dir: - create_dummy_data(data_dir) - preprocess_lm_data(data_dir) - train_language_model( - data_dir, - "fconv_lm", - [ - "--decoder-layers", - "[(850, 3)] * 2 + [(1024,4)]", - "--decoder-embed-dim", - "280", - "--optimizer", - "nag", - "--lr", - "0.1", - ], - ) - eval_lm_main(data_dir) - generate_main( - data_dir, - [ - "--task", - "language_modeling", - "--sample-break-mode", - "eos", - "--tokens-per-sample", - "500", - ], - ) - - def test_transformer_lm(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir: - create_dummy_data(data_dir) - preprocess_lm_data(data_dir) - train_language_model( - data_dir, - "transformer_lm", - ["--add-bos-token", '--nval', '1'], - run_validation=True, - ) - eval_lm_main(data_dir) - eval_lm_main(data_dir, extra_flags=["--context-window", "25"]) - generate_main( - data_dir, - [ - "--task", - "language_modeling", - "--sample-break-mode", - "eos", - "--tokens-per-sample", - "500", - ], - ) - - def test_transformer_lm_with_adaptive_softmax(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory( - "test_transformer_lm_with_adaptive_softmax" - ) as data_dir: - create_dummy_data(data_dir) - preprocess_lm_data(data_dir) - train_language_model( - data_dir, - "transformer_lm", - [ - "--add-bos-token", - "--criterion", - "adaptive_loss", - "--adaptive-softmax-cutoff", - "5,10,15", - ], - run_validation=True, - ) - eval_lm_main(data_dir) - generate_main( - data_dir, - [ - "--task", - "language_modeling", - "--sample-break-mode", - "eos", - "--tokens-per-sample", - "500", - ], - ) - - def test_lightconv_lm(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_lightconv_lm") as data_dir: - create_dummy_data(data_dir) - preprocess_lm_data(data_dir) - train_language_model( - data_dir, - "lightconv_lm", - ["--add-bos-token"], - run_validation=True, - ) - eval_lm_main(data_dir) - generate_main( - data_dir, - [ - "--task", - "language_modeling", - "--sample-break-mode", - "eos", - "--tokens-per-sample", - "500", - ], - ) - - def test_lstm_lm(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_lstm_lm") as data_dir: - create_dummy_data(data_dir) - preprocess_lm_data(data_dir) - train_language_model( - data_dir, - "lstm_lm", - ["--add-bos-token"], - run_validation=True, - ) - eval_lm_main(data_dir) - generate_main( - data_dir, - [ - "--task", - "language_modeling", - "--sample-break-mode", - "eos", - "--tokens-per-sample", - "500", - ], - ) - - def test_lstm_lm_residuals(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_lstm_lm_residuals") as data_dir: - create_dummy_data(data_dir) - preprocess_lm_data(data_dir) - train_language_model( - data_dir, - "lstm_lm", - ["--add-bos-token", "--residuals"], - run_validation=True, - ) - eval_lm_main(data_dir) - generate_main( - data_dir, - [ - "--task", - "language_modeling", - "--sample-break-mode", - "eos", - "--tokens-per-sample", - "500", - ], - ) - - @unittest.skipIf(not has_hf_transformers, "skip test if transformers is missing") - def test_transformer_xl_bptt_lm(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_transformer_xl_bptt_lm") as data_dir: - create_dummy_data(data_dir) - preprocess_lm_data(data_dir) - task_flags = [ - "--user-dir", - "examples/truncated_bptt", - "--task", - "truncated_bptt_lm", - "--batch-size", - "2", - "--tokens-per-sample", - "50", - ] - train_language_model( - data_dir=data_dir, - arch="transformer_xl", - extra_flags=task_flags - + [ - "--n-layer", - "2", - ], - task="truncated_bptt_lm", - run_validation=True, - extra_valid_flags=task_flags, - ) - eval_lm_main(data_dir, extra_flags=task_flags) - # Train with activation offloading - train_language_model( - data_dir=data_dir, - arch="transformer_xl", - extra_flags=task_flags - + [ - "--n-layer", - "2", - "--offload-activations", - ], - task="truncated_bptt_lm", - run_validation=True, - extra_valid_flags=task_flags, - ) - - -class TestMaskedLanguageModel(unittest.TestCase): - def setUp(self): - logging.disable(logging.CRITICAL) - - def tearDown(self): - logging.disable(logging.NOTSET) - - def test_legacy_masked_lm(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_legacy_mlm") as data_dir: - create_dummy_data(data_dir) - preprocess_lm_data(data_dir) - train_legacy_masked_language_model(data_dir, "masked_lm") - - def test_roberta_masked_lm(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_roberta_mlm") as data_dir: - create_dummy_data(data_dir) - preprocess_lm_data(data_dir) - train_masked_lm( - data_dir, "roberta_base", extra_flags=["--encoder-layers", "2"] - ) - - def test_roberta_sentence_prediction(self): - num_classes = 3 - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_roberta_head") as data_dir: - create_dummy_roberta_head_data(data_dir, num_classes=num_classes) - preprocess_lm_data(os.path.join(data_dir, "input0")) - preprocess_lm_data(os.path.join(data_dir, "label")) - train_roberta_head(data_dir, "roberta_base", num_classes=num_classes) - - def test_roberta_regression_single(self): - num_classes = 1 - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory( - "test_roberta_regression_single" - ) as data_dir: - create_dummy_roberta_head_data( - data_dir, num_classes=num_classes, regression=True - ) - preprocess_lm_data(os.path.join(data_dir, "input0")) - train_roberta_head( - data_dir, - "roberta_base", - num_classes=num_classes, - extra_flags=["--regression-target"], - ) - - def test_roberta_regression_multiple(self): - num_classes = 3 - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory( - "test_roberta_regression_multiple" - ) as data_dir: - create_dummy_roberta_head_data( - data_dir, num_classes=num_classes, regression=True - ) - preprocess_lm_data(os.path.join(data_dir, "input0")) - train_roberta_head( - data_dir, - "roberta_base", - num_classes=num_classes, - extra_flags=["--regression-target"], - ) - - def test_linformer_roberta_masked_lm(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_linformer_roberta_mlm") as data_dir: - create_dummy_data(data_dir) - preprocess_lm_data(data_dir) - train_masked_lm( - data_dir, - "linformer_roberta_base", - extra_flags=[ - "--user-dir", - "examples/linformer/linformer_src", - "--encoder-layers", - "2", - ], - ) - - def test_linformer_roberta_sentence_prediction(self): - num_classes = 3 - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_linformer_roberta_head") as data_dir: - create_dummy_roberta_head_data(data_dir, num_classes=num_classes) - preprocess_lm_data(os.path.join(data_dir, "input0")) - preprocess_lm_data(os.path.join(data_dir, "label")) - train_roberta_head( - data_dir, - "linformer_roberta_base", - num_classes=num_classes, - extra_flags=["--user-dir", "examples/linformer/linformer_src"], - ) - - def test_linformer_roberta_regression_single(self): - num_classes = 1 - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory( - "test_linformer_roberta_regression_single" - ) as data_dir: - create_dummy_roberta_head_data( - data_dir, num_classes=num_classes, regression=True - ) - preprocess_lm_data(os.path.join(data_dir, "input0")) - train_roberta_head( - data_dir, - "linformer_roberta_base", - num_classes=num_classes, - extra_flags=[ - "--regression-target", - "--user-dir", - "examples/linformer/linformer_src", - ], - ) - - def test_linformer_roberta_regression_multiple(self): - num_classes = 3 - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory( - "test_linformer_roberta_regression_multiple" - ) as data_dir: - create_dummy_roberta_head_data( - data_dir, num_classes=num_classes, regression=True - ) - preprocess_lm_data(os.path.join(data_dir, "input0")) - train_roberta_head( - data_dir, - "linformer_roberta_base", - num_classes=num_classes, - extra_flags=[ - "--regression-target", - "--user-dir", - "examples/linformer/linformer_src", - ], - ) - - def _test_pretrained_masked_lm_for_translation(self, learned_pos_emb, encoder_only): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_mlm") as data_dir: - create_dummy_data(data_dir) - preprocess_lm_data(data_dir) - train_legacy_masked_language_model( - data_dir, - arch="masked_lm", - extra_args=("--encoder-learned-pos",) if learned_pos_emb else (), - ) - with tempfile.TemporaryDirectory( - "test_mlm_translation" - ) as translation_dir: - create_dummy_data(translation_dir) - preprocess_translation_data( - translation_dir, extra_flags=["--joined-dictionary"] - ) - # Train transformer with data_dir/checkpoint_last.pt - train_translation_model( - translation_dir, - arch="transformer_from_pretrained_xlm", - extra_flags=[ - "--decoder-layers", - "1", - "--decoder-embed-dim", - "32", - "--decoder-attention-heads", - "1", - "--decoder-ffn-embed-dim", - "32", - "--encoder-layers", - "1", - "--encoder-embed-dim", - "32", - "--encoder-attention-heads", - "1", - "--encoder-ffn-embed-dim", - "32", - "--pretrained-xlm-checkpoint", - "{}/checkpoint_last.pt".format(data_dir), - "--activation-fn", - "gelu", - "--max-source-positions", - "500", - "--max-target-positions", - "500", - ] - + ( - ["--encoder-learned-pos", "--decoder-learned-pos"] - if learned_pos_emb - else [] - ) - + (["--init-encoder-only"] if encoder_only else []), - task="translation_from_pretrained_xlm", - ) - - def test_pretrained_masked_lm_for_translation_learned_pos_emb(self): - self._test_pretrained_masked_lm_for_translation(True, False) - - def test_pretrained_masked_lm_for_translation_sinusoidal_pos_emb(self): - self._test_pretrained_masked_lm_for_translation(False, False) - - def test_pretrained_masked_lm_for_translation_encoder_only(self): - self._test_pretrained_masked_lm_for_translation(True, True) - - def test_r4f_roberta(self): - num_classes = 3 - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_r4f_roberta_head") as data_dir: - create_dummy_roberta_head_data(data_dir, num_classes=num_classes) - preprocess_lm_data(os.path.join(data_dir, "input0")) - preprocess_lm_data(os.path.join(data_dir, "label")) - train_roberta_head( - data_dir, - "roberta_base", - num_classes=num_classes, - extra_flags=[ - "--user-dir", - "examples/rxf/rxf_src", - "--criterion", - "sentence_prediction_r3f", - "--spectral-norm-classification-head", - ], - ) - - -def train_legacy_masked_language_model(data_dir, arch, extra_args=()): - train_parser = options.get_training_parser() - # TODO: langs should be in and out right? - train_args = options.parse_args_and_arch( - train_parser, - [ - "--task", - "cross_lingual_lm", - data_dir, - "--arch", - arch, - # Optimizer args - "--optimizer", - "adam", - "--lr-scheduler", - "reduce_lr_on_plateau", - "--lr-shrink", - "0.5", - "--lr", - "0.0001", - "--stop-min-lr", - "1e-09", - # dropout, attention args - "--dropout", - "0.1", - "--attention-dropout", - "0.1", - # MLM args - "--criterion", - "legacy_masked_lm_loss", - "--masked-lm-only", - "--monolingual-langs", - "in,out", - "--num-segment", - "5", - # Transformer args: use a small transformer model for fast training - "--encoder-layers", - "1", - "--encoder-embed-dim", - "32", - "--encoder-attention-heads", - "1", - "--encoder-ffn-embed-dim", - "32", - # Other training args - "--max-tokens", - "500", - "--tokens-per-sample", - "500", - "--save-dir", - data_dir, - "--max-epoch", - "1", - "--no-progress-bar", - "--distributed-world-size", - "1", - "--dataset-impl", - "raw", - "--num-workers", - "0", - ] - + list(extra_args), - ) - train.main(train_args) - - -class TestOptimizers(unittest.TestCase): - def setUp(self): - logging.disable(logging.CRITICAL) - - def tearDown(self): - logging.disable(logging.NOTSET) - - def test_optimizers(self): - with contextlib.redirect_stdout(StringIO()): - with tempfile.TemporaryDirectory("test_optimizers") as data_dir: - # Use just a bit of data and tiny model to keep this test runtime reasonable - create_dummy_data(data_dir, num_examples=10, maxlen=5) - preprocess_translation_data(data_dir) - optimizers = ["adafactor", "adam", "nag", "adagrad", "sgd", "adadelta"] - last_checkpoint = os.path.join(data_dir, "checkpoint_last.pt") - for optimizer in optimizers: - if os.path.exists(last_checkpoint): - os.remove(last_checkpoint) - train_translation_model( - data_dir, - "lstm", - [ - "--required-batch-size-multiple", - "1", - "--encoder-layers", - "1", - "--encoder-hidden-size", - "32", - "--decoder-layers", - "1", - "--optimizer", - optimizer, - ], - ) - generate_main(data_dir) - - -def read_last_log_entry( - logs: List[logging.LogRecord], logger_name: str -) -> Dict[str, float]: - for x in reversed(logs): - if x.name == logger_name: - return json.loads(x.message) - raise ValueError(f"No entries from {logger_name} found in captured logs") - - -class TestActivationCheckpointing(unittest.TestCase): - base_flags = [ - "--encoder-layers", - "2", - "--decoder-layers", - "2", - "--encoder-embed-dim", - "8", - "--decoder-embed-dim", - "8", - "--restore-file", - "x.pt", - "--log-format", - "json", - "--log-interval", - "1", - "--max-update", - "2", - ] - - def _train(self, data_dir, extra_flags): - with self.assertLogs() as logs: - train_translation_model( - data_dir, - "transformer_iwslt_de_en", - self.base_flags + extra_flags, - run_validation=True, - extra_valid_flags=["--log-format", "json"], - ) - return logs.records - - def test_activation_offloading_does_not_change_metrics(self): - """Neither ----checkpoint-activations nor --offload-activations should change loss""" - with tempfile.TemporaryDirectory("test_transformer_with_act_cpt") as data_dir: - - with self.assertLogs(): - create_dummy_data(data_dir, num_examples=20) - preprocess_translation_data(data_dir) - offload_logs = self._train(data_dir, ["--offload-activations"]) - baseline_logs = self._train(data_dir, []) - - assert len(baseline_logs) == len(offload_logs) - - baseline_valid_stats = read_last_log_entry(baseline_logs, "valid") - offload_valid_stats = read_last_log_entry(offload_logs, "valid") - baseline_train_stats = read_last_log_entry(baseline_logs, "train") - offload_train_stats = read_last_log_entry(offload_logs, "train") - - assert ( - baseline_train_stats["train_loss"] == offload_train_stats["train_loss"] - ) - assert ( - baseline_valid_stats["valid_loss"] == offload_valid_stats["valid_loss"] - ) - - def test_activation_checkpointing_does_not_change_metrics(self): - """--checkpoint-activations should not change loss""" - - with tempfile.TemporaryDirectory("test_transformer_with_act_cpt") as data_dir: - with self.assertLogs(): - create_dummy_data(data_dir, num_examples=20) - preprocess_translation_data(data_dir) - ckpt_logs = self._train(data_dir, ["--checkpoint-activations"]) - baseline_logs = self._train(data_dir, []) - assert len(baseline_logs) == len(ckpt_logs) - - baseline_train_stats = read_last_log_entry(baseline_logs, "train") - ckpt_train_stats = read_last_log_entry(ckpt_logs, "train") - assert baseline_train_stats["train_loss"] == ckpt_train_stats["train_loss"] - - baseline_valid_stats = read_last_log_entry(baseline_logs, "valid") - ckpt_valid_stats = read_last_log_entry(ckpt_logs, "valid") - assert baseline_valid_stats["valid_loss"] == ckpt_valid_stats["valid_loss"] - - -def create_dummy_roberta_head_data( - data_dir, num_examples=100, maxlen=10, num_classes=2, regression=False -): - input_dir = "input0" - - def _create_dummy_data(filename): - random_data = torch.rand(num_examples * maxlen) - input_data = 97 + torch.floor(26 * random_data).int() - if regression: - output_data = torch.rand((num_examples, num_classes)) - else: - output_data = 1 + torch.floor(num_classes * torch.rand(num_examples)).int() - with open(os.path.join(data_dir, input_dir, filename + ".out"), "w") as f_in: - label_filename = filename + ".label" if regression else filename + ".out" - with open(os.path.join(data_dir, "label", label_filename), "w") as f_out: - offset = 0 - for i in range(num_examples): - # write example input - ex_len = random.randint(1, maxlen) - ex_str = " ".join(map(chr, input_data[offset : offset + ex_len])) - print(ex_str, file=f_in) - # write example label - if regression: - class_str = " ".join(map(str, output_data[i].numpy())) - print(class_str, file=f_out) - else: - class_str = "class{}".format(output_data[i]) - print(class_str, file=f_out) - offset += ex_len - - os.mkdir(os.path.join(data_dir, input_dir)) - os.mkdir(os.path.join(data_dir, "label")) - _create_dummy_data("train") - _create_dummy_data("valid") - _create_dummy_data("test") - - -def train_masked_lm(data_dir, arch, extra_flags=None): - train_parser = options.get_training_parser() - train_args = options.parse_args_and_arch( - train_parser, - [ - "--task", - "masked_lm", - data_dir, - "--arch", - arch, - "--optimizer", - "adam", - "--lr", - "0.0001", - "--criterion", - "masked_lm", - "--batch-size", - "500", - "--save-dir", - data_dir, - "--max-epoch", - "1", - "--no-progress-bar", - "--distributed-world-size", - "1", - "--ddp-backend", - "no_c10d", - "--num-workers", - "0", - ] - + (extra_flags or []), - ) - train.main(train_args) - - -def train_roberta_head(data_dir, arch, num_classes=2, extra_flags=None): - train_parser = options.get_training_parser() - train_args = options.parse_args_and_arch( - train_parser, - [ - "--task", - "sentence_prediction", - data_dir, - "--arch", - arch, - "--encoder-layers", - "2", - "--num-classes", - str(num_classes), - "--optimizer", - "adam", - "--lr", - "0.0001", - "--criterion", - "sentence_prediction", - "--max-tokens", - "500", - "--max-positions", - "500", - "--batch-size", - "500", - "--save-dir", - data_dir, - "--max-epoch", - "1", - "--no-progress-bar", - "--distributed-world-size", - "1", - "--ddp-backend", - "no_c10d", - "--num-workers", - "0", - ] - + (extra_flags or []), - ) - train.main(train_args) - - -def eval_lm_main(data_dir, extra_flags=None): - eval_lm_parser = options.get_eval_lm_parser() - eval_lm_args = options.parse_args_and_arch( - eval_lm_parser, - [ - data_dir, - "--path", - os.path.join(data_dir, "checkpoint_last.pt"), - "--no-progress-bar", - "--num-workers", - "0", - ] - + (extra_flags or []), - ) - eval_lm.main(eval_lm_args) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/tasks/__init__.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/tasks/__init__.py deleted file mode 100644 index 7ac3b8dc69639c92cc129294356e9012745e3fb2..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/tasks/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -import importlib -import os - - -for file in sorted(os.listdir(os.path.dirname(__file__))): - if file.endswith(".py") and not file.startswith("_"): - task_name = file[: file.find(".py")] - importlib.import_module("examples.speech_recognition.tasks." + task_name) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/encoders/sentencepiece_bpe.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/encoders/sentencepiece_bpe.py deleted file mode 100644 index a76d46a2014e81eff72b19f6c13084a855fcd477..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/encoders/sentencepiece_bpe.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from dataclasses import dataclass, field - -from fairseq import file_utils -from fairseq.data.encoders import register_bpe -from fairseq.dataclass import FairseqDataclass - - -@dataclass -class SentencepieceConfig(FairseqDataclass): - sentencepiece_model: str = field( - default="???", metadata={"help": "path to sentencepiece model"} - ) - - -@register_bpe("sentencepiece", dataclass=SentencepieceConfig) -class SentencepieceBPE(object): - def __init__(self, cfg): - sentencepiece_model = file_utils.cached_path(cfg.sentencepiece_model) - try: - import sentencepiece as spm - - self.sp = spm.SentencePieceProcessor() - self.sp.Load(sentencepiece_model) - except ImportError: - raise ImportError( - "Please install sentencepiece with: pip install sentencepiece" - ) - - def encode(self, x: str) -> str: - return " ".join(self.sp.EncodeAsPieces(x)) - - def decode(self, x: str) -> str: - return x.replace(" ", "").replace("\u2581", " ").strip() - - def is_beginning_of_word(self, x: str) -> bool: - if x in ["", "", "", ""]: - # special elements are always considered beginnings - # HACK: this logic is already present in fairseq/tasks/masked_lm.py - # but these special tokens are also contained in the sentencepiece - # vocabulary which causes duplicate special tokens. This hack makes - # sure that they are all taken into account. - return True - return x.startswith("\u2581") diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/tests/distributed/__init__.py b/spaces/OFA-Sys/OFA-vqa/fairseq/tests/distributed/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-4.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-4.go deleted file mode 100644 index dc03e362b957083bfe3077cfb8fc26c82f083c9e..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-4.go and /dev/null differ diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/core/__init__.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/core/__init__.py deleted file mode 100644 index 965605587211b7bf0bd6bc3acdbb33dd49cab023..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/core/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .evaluation import * # noqa: F401, F403 -from .seg import * # noqa: F401, F403 -from .utils import * # noqa: F401, F403 diff --git a/spaces/PlanetHades361/Change-Your-Style/app.py b/spaces/PlanetHades361/Change-Your-Style/app.py deleted file mode 100644 index 01697bce5f73bef0cae6a1b3fc2e76f6877175b7..0000000000000000000000000000000000000000 --- a/spaces/PlanetHades361/Change-Your-Style/app.py +++ /dev/null @@ -1,15 +0,0 @@ -import gradio as gr -from utils import change_style - -def generate(Image, Style, Inference_Steps, Guidance, Start_Step): - if Inference_Steps > Start_Step: - return change_style(Image, Style, Inference_Steps, Guidance, Start_Step) - -style = gr.Radio(['GTA 5', 'Manga', 'Ghibli', 'Sims', 'Kaya Ghost Assasin', 'Arcane', 'Uzumaki']) -inf_steps = gr.Slider(minimum = 10, maximum = 100, value = 50, step = 1) -guidance = gr.Slider(minimum = 5, maximum = 50, value = 10, step = 1) -str_step = gr.Slider(minimum = 10, maximum = 100, value = 25, step = 1) - -io = gr.Interface(generate, ["image", style, inf_steps, guidance, str_step], gr.Image()) -io.launch() - diff --git a/spaces/Qiukai/gpt/crazy_functions/test_project/cpp/cppipc/ipc.cpp b/spaces/Qiukai/gpt/crazy_functions/test_project/cpp/cppipc/ipc.cpp deleted file mode 100644 index c713b852ea5a51fbeb4729b64561da482caaf351..0000000000000000000000000000000000000000 --- a/spaces/Qiukai/gpt/crazy_functions/test_project/cpp/cppipc/ipc.cpp +++ /dev/null @@ -1,701 +0,0 @@ - -#include -#include -#include -#include // std::pair, std::move, std::forward -#include -#include // aligned_storage_t -#include -#include -#include -#include - -#include "libipc/ipc.h" -#include "libipc/def.h" -#include "libipc/shm.h" -#include "libipc/pool_alloc.h" -#include "libipc/queue.h" -#include "libipc/policy.h" -#include "libipc/rw_lock.h" -#include "libipc/waiter.h" - -#include "libipc/utility/log.h" -#include "libipc/utility/id_pool.h" -#include "libipc/utility/scope_guard.h" -#include "libipc/utility/utility.h" - -#include "libipc/memory/resource.h" -#include "libipc/platform/detail.h" -#include "libipc/circ/elem_array.h" - -namespace { - -using msg_id_t = std::uint32_t; -using acc_t = std::atomic; - -template -struct msg_t; - -template -struct msg_t<0, AlignSize> { - msg_id_t cc_id_; - msg_id_t id_; - std::int32_t remain_; - bool storage_; -}; - -template -struct msg_t : msg_t<0, AlignSize> { - std::aligned_storage_t data_ {}; - - msg_t() = default; - msg_t(msg_id_t cc_id, msg_id_t id, std::int32_t remain, void const * data, std::size_t size) - : msg_t<0, AlignSize> {cc_id, id, remain, (data == nullptr) || (size == 0)} { - if (this->storage_) { - if (data != nullptr) { - // copy storage-id - *reinterpret_cast(&data_) = - *static_cast(data); - } - } - else std::memcpy(&data_, data, size); - } -}; - -template -ipc::buff_t make_cache(T& data, std::size_t size) { - auto ptr = ipc::mem::alloc(size); - std::memcpy(ptr, &data, (ipc::detail::min)(sizeof(data), size)); - return { ptr, size, ipc::mem::free }; -} - -struct cache_t { - std::size_t fill_; - ipc::buff_t buff_; - - cache_t(std::size_t f, ipc::buff_t && b) - : fill_(f), buff_(std::move(b)) - {} - - void append(void const * data, std::size_t size) { - if (fill_ >= buff_.size() || data == nullptr || size == 0) return; - auto new_fill = (ipc::detail::min)(fill_ + size, buff_.size()); - std::memcpy(static_cast(buff_.data()) + fill_, data, new_fill - fill_); - fill_ = new_fill; - } -}; - -auto cc_acc() { - static ipc::shm::handle acc_h("__CA_CONN__", sizeof(acc_t)); - return static_cast(acc_h.get()); -} - -IPC_CONSTEXPR_ std::size_t align_chunk_size(std::size_t size) noexcept { - return (((size - 1) / ipc::large_msg_align) + 1) * ipc::large_msg_align; -} - -IPC_CONSTEXPR_ std::size_t calc_chunk_size(std::size_t size) noexcept { - return ipc::make_align(alignof(std::max_align_t), align_chunk_size( - ipc::make_align(alignof(std::max_align_t), sizeof(std::atomic)) + size)); -} - -struct chunk_t { - std::atomic &conns() noexcept { - return *reinterpret_cast *>(this); - } - - void *data() noexcept { - return reinterpret_cast(this) - + ipc::make_align(alignof(std::max_align_t), sizeof(std::atomic)); - } -}; - -struct chunk_info_t { - ipc::id_pool<> pool_; - ipc::spin_lock lock_; - - IPC_CONSTEXPR_ static std::size_t chunks_mem_size(std::size_t chunk_size) noexcept { - return ipc::id_pool<>::max_count * chunk_size; - } - - ipc::byte_t *chunks_mem() noexcept { - return reinterpret_cast(this + 1); - } - - chunk_t *at(std::size_t chunk_size, ipc::storage_id_t id) noexcept { - if (id < 0) return nullptr; - return reinterpret_cast(chunks_mem() + (chunk_size * id)); - } -}; - -auto& chunk_storages() { - class chunk_handle_t { - ipc::shm::handle handle_; - - public: - chunk_info_t *get_info(std::size_t chunk_size) { - if (!handle_.valid() && - !handle_.acquire( ("__CHUNK_INFO__" + ipc::to_string(chunk_size)).c_str(), - sizeof(chunk_info_t) + chunk_info_t::chunks_mem_size(chunk_size) )) { - ipc::error("[chunk_storages] chunk_shm.id_info_.acquire failed: chunk_size = %zd\n", chunk_size); - return nullptr; - } - auto info = static_cast(handle_.get()); - if (info == nullptr) { - ipc::error("[chunk_storages] chunk_shm.id_info_.get failed: chunk_size = %zd\n", chunk_size); - return nullptr; - } - return info; - } - }; - static ipc::map chunk_hs; - return chunk_hs; -} - -chunk_info_t *chunk_storage_info(std::size_t chunk_size) { - auto &storages = chunk_storages(); - std::decay_t::iterator it; - { - static ipc::rw_lock lock; - IPC_UNUSED_ std::shared_lock guard {lock}; - if ((it = storages.find(chunk_size)) == storages.end()) { - using chunk_handle_t = std::decay_t::value_type::second_type; - guard.unlock(); - IPC_UNUSED_ std::lock_guard guard {lock}; - it = storages.emplace(chunk_size, chunk_handle_t{}).first; - } - } - return it->second.get_info(chunk_size); -} - -std::pair acquire_storage(std::size_t size, ipc::circ::cc_t conns) { - std::size_t chunk_size = calc_chunk_size(size); - auto info = chunk_storage_info(chunk_size); - if (info == nullptr) return {}; - - info->lock_.lock(); - info->pool_.prepare(); - // got an unique id - auto id = info->pool_.acquire(); - info->lock_.unlock(); - - auto chunk = info->at(chunk_size, id); - if (chunk == nullptr) return {}; - chunk->conns().store(conns, std::memory_order_relaxed); - return { id, chunk->data() }; -} - -void *find_storage(ipc::storage_id_t id, std::size_t size) { - if (id < 0) { - ipc::error("[find_storage] id is invalid: id = %ld, size = %zd\n", (long)id, size); - return nullptr; - } - std::size_t chunk_size = calc_chunk_size(size); - auto info = chunk_storage_info(chunk_size); - if (info == nullptr) return nullptr; - return info->at(chunk_size, id)->data(); -} - -void release_storage(ipc::storage_id_t id, std::size_t size) { - if (id < 0) { - ipc::error("[release_storage] id is invalid: id = %ld, size = %zd\n", (long)id, size); - return; - } - std::size_t chunk_size = calc_chunk_size(size); - auto info = chunk_storage_info(chunk_size); - if (info == nullptr) return; - info->lock_.lock(); - info->pool_.release(id); - info->lock_.unlock(); -} - -template -bool sub_rc(ipc::wr, - std::atomic &/*conns*/, ipc::circ::cc_t /*curr_conns*/, ipc::circ::cc_t /*conn_id*/) noexcept { - return true; -} - -template -bool sub_rc(ipc::wr, - std::atomic &conns, ipc::circ::cc_t curr_conns, ipc::circ::cc_t conn_id) noexcept { - auto last_conns = curr_conns & ~conn_id; - for (unsigned k = 0;;) { - auto chunk_conns = conns.load(std::memory_order_acquire); - if (conns.compare_exchange_weak(chunk_conns, chunk_conns & last_conns, std::memory_order_release)) { - return (chunk_conns & last_conns) == 0; - } - ipc::yield(k); - } -} - -template -void recycle_storage(ipc::storage_id_t id, std::size_t size, ipc::circ::cc_t curr_conns, ipc::circ::cc_t conn_id) { - if (id < 0) { - ipc::error("[recycle_storage] id is invalid: id = %ld, size = %zd\n", (long)id, size); - return; - } - std::size_t chunk_size = calc_chunk_size(size); - auto info = chunk_storage_info(chunk_size); - if (info == nullptr) return; - - auto chunk = info->at(chunk_size, id); - if (chunk == nullptr) return; - - if (!sub_rc(Flag{}, chunk->conns(), curr_conns, conn_id)) { - return; - } - info->lock_.lock(); - info->pool_.release(id); - info->lock_.unlock(); -} - -template -bool clear_message(void* p) { - auto msg = static_cast(p); - if (msg->storage_) { - std::int32_t r_size = static_cast(ipc::data_length) + msg->remain_; - if (r_size <= 0) { - ipc::error("[clear_message] invalid msg size: %d\n", (int)r_size); - return true; - } - release_storage( - *reinterpret_cast(&msg->data_), - static_cast(r_size)); - } - return true; -} - -struct conn_info_head { - - ipc::string name_; - msg_id_t cc_id_; // connection-info id - ipc::detail::waiter cc_waiter_, wt_waiter_, rd_waiter_; - ipc::shm::handle acc_h_; - - conn_info_head(char const * name) - : name_ {name} - , cc_id_ {(cc_acc() == nullptr) ? 0 : cc_acc()->fetch_add(1, std::memory_order_relaxed)} - , cc_waiter_{("__CC_CONN__" + name_).c_str()} - , wt_waiter_{("__WT_CONN__" + name_).c_str()} - , rd_waiter_{("__RD_CONN__" + name_).c_str()} - , acc_h_ {("__AC_CONN__" + name_).c_str(), sizeof(acc_t)} { - } - - void quit_waiting() { - cc_waiter_.quit_waiting(); - wt_waiter_.quit_waiting(); - rd_waiter_.quit_waiting(); - } - - auto acc() { - return static_cast(acc_h_.get()); - } - - auto& recv_cache() { - thread_local ipc::unordered_map tls; - return tls; - } -}; - -template -bool wait_for(W& waiter, F&& pred, std::uint64_t tm) { - if (tm == 0) return !pred(); - for (unsigned k = 0; pred();) { - bool ret = true; - ipc::sleep(k, [&k, &ret, &waiter, &pred, tm] { - ret = waiter.wait_if(std::forward(pred), tm); - k = 0; - }); - if (!ret) return false; // timeout or fail - if (k == 0) break; // k has been reset - } - return true; -} - -template -struct queue_generator { - - using queue_t = ipc::queue, Policy>; - - struct conn_info_t : conn_info_head { - queue_t que_; - - conn_info_t(char const * name) - : conn_info_head{name} - , que_{("__QU_CONN__" + - ipc::to_string(DataSize) + "__" + - ipc::to_string(AlignSize) + "__" + name).c_str()} { - } - - void disconnect_receiver() { - bool dis = que_.disconnect(); - this->quit_waiting(); - if (dis) { - this->recv_cache().clear(); - } - } - }; -}; - -template -struct detail_impl { - -using policy_t = Policy; -using flag_t = typename policy_t::flag_t; -using queue_t = typename queue_generator::queue_t; -using conn_info_t = typename queue_generator::conn_info_t; - -constexpr static conn_info_t* info_of(ipc::handle_t h) noexcept { - return static_cast(h); -} - -constexpr static queue_t* queue_of(ipc::handle_t h) noexcept { - return (info_of(h) == nullptr) ? nullptr : &(info_of(h)->que_); -} - -/* API implementations */ - -static void disconnect(ipc::handle_t h) { - auto que = queue_of(h); - if (que == nullptr) { - return; - } - que->shut_sending(); - assert(info_of(h) != nullptr); - info_of(h)->disconnect_receiver(); -} - -static bool reconnect(ipc::handle_t * ph, bool start_to_recv) { - assert(ph != nullptr); - assert(*ph != nullptr); - auto que = queue_of(*ph); - if (que == nullptr) { - return false; - } - if (start_to_recv) { - que->shut_sending(); - if (que->connect()) { // wouldn't connect twice - info_of(*ph)->cc_waiter_.broadcast(); - return true; - } - return false; - } - // start_to_recv == false - if (que->connected()) { - info_of(*ph)->disconnect_receiver(); - } - return que->ready_sending(); -} - -static bool connect(ipc::handle_t * ph, char const * name, bool start_to_recv) { - assert(ph != nullptr); - if (*ph == nullptr) { - *ph = ipc::mem::alloc(name); - } - return reconnect(ph, start_to_recv); -} - -static void destroy(ipc::handle_t h) { - disconnect(h); - ipc::mem::free(info_of(h)); -} - -static std::size_t recv_count(ipc::handle_t h) noexcept { - auto que = queue_of(h); - if (que == nullptr) { - return ipc::invalid_value; - } - return que->conn_count(); -} - -static bool wait_for_recv(ipc::handle_t h, std::size_t r_count, std::uint64_t tm) { - auto que = queue_of(h); - if (que == nullptr) { - return false; - } - return wait_for(info_of(h)->cc_waiter_, [que, r_count] { - return que->conn_count() < r_count; - }, tm); -} - -template -static bool send(F&& gen_push, ipc::handle_t h, void const * data, std::size_t size) { - if (data == nullptr || size == 0) { - ipc::error("fail: send(%p, %zd)\n", data, size); - return false; - } - auto que = queue_of(h); - if (que == nullptr) { - ipc::error("fail: send, queue_of(h) == nullptr\n"); - return false; - } - if (que->elems() == nullptr) { - ipc::error("fail: send, queue_of(h)->elems() == nullptr\n"); - return false; - } - if (!que->ready_sending()) { - ipc::error("fail: send, que->ready_sending() == false\n"); - return false; - } - ipc::circ::cc_t conns = que->elems()->connections(std::memory_order_relaxed); - if (conns == 0) { - ipc::error("fail: send, there is no receiver on this connection.\n"); - return false; - } - // calc a new message id - auto acc = info_of(h)->acc(); - if (acc == nullptr) { - ipc::error("fail: send, info_of(h)->acc() == nullptr\n"); - return false; - } - auto msg_id = acc->fetch_add(1, std::memory_order_relaxed); - auto try_push = std::forward(gen_push)(info_of(h), que, msg_id); - if (size > ipc::large_msg_limit) { - auto dat = acquire_storage(size, conns); - void * buf = dat.second; - if (buf != nullptr) { - std::memcpy(buf, data, size); - return try_push(static_cast(size) - - static_cast(ipc::data_length), &(dat.first), 0); - } - // try using message fragment - //ipc::log("fail: shm::handle for big message. msg_id: %zd, size: %zd\n", msg_id, size); - } - // push message fragment - std::int32_t offset = 0; - for (std::int32_t i = 0; i < static_cast(size / ipc::data_length); ++i, offset += ipc::data_length) { - if (!try_push(static_cast(size) - offset - static_cast(ipc::data_length), - static_cast(data) + offset, ipc::data_length)) { - return false; - } - } - // if remain > 0, this is the last message fragment - std::int32_t remain = static_cast(size) - offset; - if (remain > 0) { - if (!try_push(remain - static_cast(ipc::data_length), - static_cast(data) + offset, - static_cast(remain))) { - return false; - } - } - return true; -} - -static bool send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) { - return send([tm](auto info, auto que, auto msg_id) { - return [tm, info, que, msg_id](std::int32_t remain, void const * data, std::size_t size) { - if (!wait_for(info->wt_waiter_, [&] { - return !que->push( - [](void*) { return true; }, - info->cc_id_, msg_id, remain, data, size); - }, tm)) { - ipc::log("force_push: msg_id = %zd, remain = %d, size = %zd\n", msg_id, remain, size); - if (!que->force_push( - clear_message, - info->cc_id_, msg_id, remain, data, size)) { - return false; - } - } - info->rd_waiter_.broadcast(); - return true; - }; - }, h, data, size); -} - -static bool try_send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) { - return send([tm](auto info, auto que, auto msg_id) { - return [tm, info, que, msg_id](std::int32_t remain, void const * data, std::size_t size) { - if (!wait_for(info->wt_waiter_, [&] { - return !que->push( - [](void*) { return true; }, - info->cc_id_, msg_id, remain, data, size); - }, tm)) { - return false; - } - info->rd_waiter_.broadcast(); - return true; - }; - }, h, data, size); -} - -static ipc::buff_t recv(ipc::handle_t h, std::uint64_t tm) { - auto que = queue_of(h); - if (que == nullptr) { - ipc::error("fail: recv, queue_of(h) == nullptr\n"); - return {}; - } - if (!que->connected()) { - // hasn't connected yet, just return. - return {}; - } - auto& rc = info_of(h)->recv_cache(); - for (;;) { - // pop a new message - typename queue_t::value_t msg; - if (!wait_for(info_of(h)->rd_waiter_, [que, &msg] { - return !que->pop(msg); - }, tm)) { - // pop failed, just return. - return {}; - } - info_of(h)->wt_waiter_.broadcast(); - if ((info_of(h)->acc() != nullptr) && (msg.cc_id_ == info_of(h)->cc_id_)) { - continue; // ignore message to self - } - // msg.remain_ may minus & abs(msg.remain_) < data_length - std::int32_t r_size = static_cast(ipc::data_length) + msg.remain_; - if (r_size <= 0) { - ipc::error("fail: recv, r_size = %d\n", (int)r_size); - return {}; - } - std::size_t msg_size = static_cast(r_size); - // large message - if (msg.storage_) { - ipc::storage_id_t buf_id = *reinterpret_cast(&msg.data_); - void* buf = find_storage(buf_id, msg_size); - if (buf != nullptr) { - struct recycle_t { - ipc::storage_id_t storage_id; - ipc::circ::cc_t curr_conns; - ipc::circ::cc_t conn_id; - } *r_info = ipc::mem::alloc(recycle_t{ - buf_id, que->elems()->connections(std::memory_order_relaxed), que->connected_id() - }); - if (r_info == nullptr) { - ipc::log("fail: ipc::mem::alloc.\n"); - return ipc::buff_t{buf, msg_size}; // no recycle - } else { - return ipc::buff_t{buf, msg_size, [](void* p_info, std::size_t size) { - auto r_info = static_cast(p_info); - IPC_UNUSED_ auto finally = ipc::guard([r_info] { - ipc::mem::free(r_info); - }); - recycle_storage(r_info->storage_id, size, r_info->curr_conns, r_info->conn_id); - }, r_info}; - } - } else { - ipc::log("fail: shm::handle for large message. msg_id: %zd, buf_id: %zd, size: %zd\n", msg.id_, buf_id, msg_size); - continue; - } - } - // find cache with msg.id_ - auto cac_it = rc.find(msg.id_); - if (cac_it == rc.end()) { - if (msg_size <= ipc::data_length) { - return make_cache(msg.data_, msg_size); - } - // gc - if (rc.size() > 1024) { - std::vector need_del; - for (auto const & pair : rc) { - auto cmp = std::minmax(msg.id_, pair.first); - if (cmp.second - cmp.first > 8192) { - need_del.push_back(pair.first); - } - } - for (auto id : need_del) rc.erase(id); - } - // cache the first message fragment - rc.emplace(msg.id_, cache_t { ipc::data_length, make_cache(msg.data_, msg_size) }); - } - // has cached before this message - else { - auto& cac = cac_it->second; - // this is the last message fragment - if (msg.remain_ <= 0) { - cac.append(&(msg.data_), msg_size); - // finish this message, erase it from cache - auto buff = std::move(cac.buff_); - rc.erase(cac_it); - return buff; - } - // there are remain datas after this message - cac.append(&(msg.data_), ipc::data_length); - } - } -} - -static ipc::buff_t try_recv(ipc::handle_t h) { - return recv(h, 0); -} - -}; // detail_impl - -template -using policy_t = ipc::policy::choose; - -} // internal-linkage - -namespace ipc { - -template -ipc::handle_t chan_impl::inited() { - ipc::detail::waiter::init(); - return nullptr; -} - -template -bool chan_impl::connect(ipc::handle_t * ph, char const * name, unsigned mode) { - return detail_impl>::connect(ph, name, mode & receiver); -} - -template -bool chan_impl::reconnect(ipc::handle_t * ph, unsigned mode) { - return detail_impl>::reconnect(ph, mode & receiver); -} - -template -void chan_impl::disconnect(ipc::handle_t h) { - detail_impl>::disconnect(h); -} - -template -void chan_impl::destroy(ipc::handle_t h) { - detail_impl>::destroy(h); -} - -template -char const * chan_impl::name(ipc::handle_t h) { - auto info = detail_impl>::info_of(h); - return (info == nullptr) ? nullptr : info->name_.c_str(); -} - -template -std::size_t chan_impl::recv_count(ipc::handle_t h) { - return detail_impl>::recv_count(h); -} - -template -bool chan_impl::wait_for_recv(ipc::handle_t h, std::size_t r_count, std::uint64_t tm) { - return detail_impl>::wait_for_recv(h, r_count, tm); -} - -template -bool chan_impl::send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) { - return detail_impl>::send(h, data, size, tm); -} - -template -buff_t chan_impl::recv(ipc::handle_t h, std::uint64_t tm) { - return detail_impl>::recv(h, tm); -} - -template -bool chan_impl::try_send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) { - return detail_impl>::try_send(h, data, size, tm); -} - -template -buff_t chan_impl::try_recv(ipc::handle_t h) { - return detail_impl>::try_recv(h); -} - -template struct chan_impl>; -// template struct chan_impl>; // TBD -// template struct chan_impl>; // TBD -template struct chan_impl>; -template struct chan_impl>; - -} // namespace ipc diff --git a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/src/optimizers/__init__.py b/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/src/optimizers/__init__.py deleted file mode 100644 index e4e36c22e00217deccacd589f8924b2f74589456..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/src/optimizers/__init__.py +++ /dev/null @@ -1,55 +0,0 @@ -import torch -from torch.optim.lr_scheduler import MultiStepLR, CosineAnnealingLR, ExponentialLR - - -def build_optimizer(model, config): - name = config.TRAINER.OPTIMIZER - lr = config.TRAINER.TRUE_LR - - if name == "adam": - return torch.optim.Adam( - model.parameters(), lr=lr, weight_decay=config.TRAINER.ADAM_DECAY - ) - elif name == "adamw": - return torch.optim.AdamW( - model.parameters(), lr=lr, weight_decay=config.TRAINER.ADAMW_DECAY - ) - else: - raise ValueError(f"TRAINER.OPTIMIZER = {name} is not a valid optimizer!") - - -def build_scheduler(config, optimizer): - """ - Returns: - scheduler (dict):{ - 'scheduler': lr_scheduler, - 'interval': 'step', # or 'epoch' - 'monitor': 'val_f1', (optional) - 'frequency': x, (optional) - } - """ - scheduler = {"interval": config.TRAINER.SCHEDULER_INTERVAL} - name = config.TRAINER.SCHEDULER - - if name == "MultiStepLR": - scheduler.update( - { - "scheduler": MultiStepLR( - optimizer, - config.TRAINER.MSLR_MILESTONES, - gamma=config.TRAINER.MSLR_GAMMA, - ) - } - ) - elif name == "CosineAnnealing": - scheduler.update( - {"scheduler": CosineAnnealingLR(optimizer, config.TRAINER.COSA_TMAX)} - ) - elif name == "ExponentialLR": - scheduler.update( - {"scheduler": ExponentialLR(optimizer, config.TRAINER.ELR_GAMMA)} - ) - else: - raise NotImplementedError() - - return scheduler diff --git a/spaces/Realcat/image-matching-webui/third_party/DarkFeat/datasets/InvISP/train.sh b/spaces/Realcat/image-matching-webui/third_party/DarkFeat/datasets/InvISP/train.sh deleted file mode 100644 index c94626d01d4adb7b6a453b6f09fa2c9f6479f90d..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/DarkFeat/datasets/InvISP/train.sh +++ /dev/null @@ -1,16 +0,0 @@ -# python train.py --task=debug \ -# --data_path="./data/" \ -# --gamma \ -# --aug \ -# --camera="NIKON_D700" \ -# --out_path="./exps/" \ -# # --debug_mode - -python train.py --task=debug2 \ - --data_path="./data/" \ - --gamma \ - --aug \ - --camera="Canon_EOS_5D" \ - --out_path="./exps/" \ - --debug_mode - diff --git a/spaces/RedBaron5/PatentSolver/App/bin/SharpClassifier.py b/spaces/RedBaron5/PatentSolver/App/bin/SharpClassifier.py deleted file mode 100644 index b89c43b5c5ef4da47e4d065c560f8a142a458967..0000000000000000000000000000000000000000 --- a/spaces/RedBaron5/PatentSolver/App/bin/SharpClassifier.py +++ /dev/null @@ -1,54 +0,0 @@ -from nltk.classify import NaiveBayesClassifier -from nltk.corpus import subjectivity -from nltk.sentiment import SentimentAnalyzer -from nltk.sentiment.util import * -from nltk.sentiment.vader import SentimentIntensityAnalyzer - -class SharpClassifier(object): - def __init__(self, sentence): - self.sentence = sentence - print("Classification....") - - def classify(self): - sentence = self.sentence - n_instances = 100 - subj_docs = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances]] - obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]] - len(subj_docs), len(obj_docs) - - train_subj_docs = subj_docs[:80] - test_subj_docs = subj_docs[80:100] - train_obj_docs = obj_docs[:80] - test_obj_docs = obj_docs[80:100] - training_docs = train_subj_docs + train_obj_docs - testing_docs = test_subj_docs + test_obj_docs - - sentim_analyzer = SentimentAnalyzer() - all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs]) - - unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) - - sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) - training_set = sentim_analyzer.apply_features(training_docs) - test_set = sentim_analyzer.apply_features(testing_docs) - - trainer = NaiveBayesClassifier.train - classifier = sentim_analyzer.train(trainer, training_set) - # for key,value in sorted(sentim_analyzer.evaluate(test_set).items()): - # print('{0}: {1}'.format(key, value)) - - sid = SentimentIntensityAnalyzer() - ss = sid.polarity_scores(sentence) - polarity = '' - if ss['neg'] < ss['pos']: - polarity = 'partialSolution' - elif ss['neg'] > ss['pos']: - polarity = 'problem' - else: - polarity ='neutre' - # for k in sorted(ss): - # print('{0}: {1}, '.format(k, ss[k]), end='') - return polarity - - - diff --git a/spaces/Redgon/bingo/src/components/external-link.tsx b/spaces/Redgon/bingo/src/components/external-link.tsx deleted file mode 100644 index 011265f364d5a64a770f4c7e9c65c5ade21d623a..0000000000000000000000000000000000000000 --- a/spaces/Redgon/bingo/src/components/external-link.tsx +++ /dev/null @@ -1,30 +0,0 @@ -export function ExternalLink({ - href, - children -}: { - href: string - children: React.ReactNode -}) { - return ( - - {children} - - - ) -} diff --git a/spaces/Ritori/TTS_Yui/hifi-gan/inference.py b/spaces/Ritori/TTS_Yui/hifi-gan/inference.py deleted file mode 100644 index 96ba10672fd808d9d5f5be673bd58f2db7878eda..0000000000000000000000000000000000000000 --- a/spaces/Ritori/TTS_Yui/hifi-gan/inference.py +++ /dev/null @@ -1,95 +0,0 @@ -from __future__ import absolute_import, division, print_function, unicode_literals - -import glob -import os -import argparse -import json -import torch -from scipy.io.wavfile import write -from env import AttrDict -from meldataset import mel_spectrogram, MAX_WAV_VALUE, load_wav -from models import Generator - -h = None -device = None - - -def load_checkpoint(filepath, device): - assert os.path.isfile(filepath) - print("Loading '{}'".format(filepath)) - checkpoint_dict = torch.load(filepath, map_location=device) - print("Complete.") - return checkpoint_dict - - -def get_mel(x): - return mel_spectrogram(x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax) - - -def scan_checkpoint(cp_dir, prefix): - pattern = os.path.join(cp_dir, prefix + '*') - cp_list = glob.glob(pattern) - if len(cp_list) == 0: - return '' - return sorted(cp_list)[-1] - - -def inference(a): - generator = Generator(h).to(device) - - state_dict_g = load_checkpoint(a.checkpoint_file, device) - generator.load_state_dict(state_dict_g['generator']) - - filelist = os.listdir(a.input_wavs_dir) - - os.makedirs(a.output_dir, exist_ok=True) - - generator.eval() - generator.remove_weight_norm() - with torch.no_grad(): - for i, filname in enumerate(filelist): - wav, sr = load_wav(os.path.join(a.input_wavs_dir, filname)) - wav = wav / MAX_WAV_VALUE - wav = torch.FloatTensor(wav).to(device) - x = get_mel(wav.unsqueeze(0)) - y_g_hat = generator(x) - audio = y_g_hat.squeeze() - audio = audio * MAX_WAV_VALUE - audio = audio.cpu().numpy().astype('int16') - - output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + '_generated.wav') - write(output_file, h.sampling_rate, audio) - print(output_file) - - -def main(): - print('Initializing Inference Process..') - - parser = argparse.ArgumentParser() - parser.add_argument('--input_wavs_dir', default='test_files') - parser.add_argument('--output_dir', default='generated_files') - parser.add_argument('--checkpoint_file', required=True) - a = parser.parse_args() - - config_file = os.path.join(os.path.split(a.checkpoint_file)[0], 'config.json') - with open(config_file) as f: - data = f.read() - - global h - json_config = json.loads(data) - h = AttrDict(json_config) - - torch.manual_seed(h.seed) - global device - if torch.cuda.is_available(): - torch.cuda.manual_seed(h.seed) - device = torch.device('cuda') - else: - device = torch.device('cpu') - - inference(a) - - -if __name__ == '__main__': - main() - diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/cnn/builder.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/cnn/builder.py deleted file mode 100644 index 7567316c566bd3aca6d8f65a84b00e9e890948a7..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/cnn/builder.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..runner import Sequential -from ..utils import Registry, build_from_cfg - - -def build_model_from_cfg(cfg, registry, default_args=None): - """Build a PyTorch model from config dict(s). Different from - ``build_from_cfg``, if cfg is a list, a ``nn.Sequential`` will be built. - - Args: - cfg (dict, list[dict]): The config of modules, is is either a config - dict or a list of config dicts. If cfg is a list, a - the built modules will be wrapped with ``nn.Sequential``. - registry (:obj:`Registry`): A registry the module belongs to. - default_args (dict, optional): Default arguments to build the module. - Defaults to None. - - Returns: - nn.Module: A built nn module. - """ - if isinstance(cfg, list): - modules = [ - build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg - ] - return Sequential(*modules) - else: - return build_from_cfg(cfg, registry, default_args) - - -MODELS = Registry('model', build_func=build_model_from_cfg) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py deleted file mode 100644 index be6772fa6c471a7a65b77f2f18dfd217f4bd3289..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py +++ /dev/null @@ -1,377 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import Conv2d, ConvModule, build_upsample_layer -from mmcv.ops.carafe import CARAFEPack -from mmcv.runner import auto_fp16, force_fp32 -from torch.nn.modules.utils import _pair - -from mmdet.core import mask_target -from mmdet.models.builder import HEADS, build_loss - -BYTES_PER_FLOAT = 4 -# TODO: This memory limit may be too much or too little. It would be better to -# determine it based on available resources. -GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit - - -@HEADS.register_module() -class FCNMaskHead(nn.Module): - - def __init__(self, - num_convs=4, - roi_feat_size=14, - in_channels=256, - conv_kernel_size=3, - conv_out_channels=256, - num_classes=80, - class_agnostic=False, - upsample_cfg=dict(type='deconv', scale_factor=2), - conv_cfg=None, - norm_cfg=None, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)): - super(FCNMaskHead, self).__init__() - self.upsample_cfg = upsample_cfg.copy() - if self.upsample_cfg['type'] not in [ - None, 'deconv', 'nearest', 'bilinear', 'carafe' - ]: - raise ValueError( - f'Invalid upsample method {self.upsample_cfg["type"]}, ' - 'accepted methods are "deconv", "nearest", "bilinear", ' - '"carafe"') - self.num_convs = num_convs - # WARN: roi_feat_size is reserved and not used - self.roi_feat_size = _pair(roi_feat_size) - self.in_channels = in_channels - self.conv_kernel_size = conv_kernel_size - self.conv_out_channels = conv_out_channels - self.upsample_method = self.upsample_cfg.get('type') - self.scale_factor = self.upsample_cfg.pop('scale_factor', None) - self.num_classes = num_classes - self.class_agnostic = class_agnostic - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.fp16_enabled = False - self.loss_mask = build_loss(loss_mask) - - self.convs = nn.ModuleList() - for i in range(self.num_convs): - in_channels = ( - self.in_channels if i == 0 else self.conv_out_channels) - padding = (self.conv_kernel_size - 1) // 2 - self.convs.append( - ConvModule( - in_channels, - self.conv_out_channels, - self.conv_kernel_size, - padding=padding, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg)) - upsample_in_channels = ( - self.conv_out_channels if self.num_convs > 0 else in_channels) - upsample_cfg_ = self.upsample_cfg.copy() - if self.upsample_method is None: - self.upsample = None - elif self.upsample_method == 'deconv': - upsample_cfg_.update( - in_channels=upsample_in_channels, - out_channels=self.conv_out_channels, - kernel_size=self.scale_factor, - stride=self.scale_factor) - self.upsample = build_upsample_layer(upsample_cfg_) - elif self.upsample_method == 'carafe': - upsample_cfg_.update( - channels=upsample_in_channels, scale_factor=self.scale_factor) - self.upsample = build_upsample_layer(upsample_cfg_) - else: - # suppress warnings - align_corners = (None - if self.upsample_method == 'nearest' else False) - upsample_cfg_.update( - scale_factor=self.scale_factor, - mode=self.upsample_method, - align_corners=align_corners) - self.upsample = build_upsample_layer(upsample_cfg_) - - out_channels = 1 if self.class_agnostic else self.num_classes - logits_in_channel = ( - self.conv_out_channels - if self.upsample_method == 'deconv' else upsample_in_channels) - self.conv_logits = Conv2d(logits_in_channel, out_channels, 1) - self.relu = nn.ReLU(inplace=True) - self.debug_imgs = None - - def init_weights(self): - for m in [self.upsample, self.conv_logits]: - if m is None: - continue - elif isinstance(m, CARAFEPack): - m.init_weights() - else: - nn.init.kaiming_normal_( - m.weight, mode='fan_out', nonlinearity='relu') - nn.init.constant_(m.bias, 0) - - @auto_fp16() - def forward(self, x): - for conv in self.convs: - x = conv(x) - if self.upsample is not None: - x = self.upsample(x) - if self.upsample_method == 'deconv': - x = self.relu(x) - mask_pred = self.conv_logits(x) - return mask_pred - - def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg): - pos_proposals = [res.pos_bboxes for res in sampling_results] - pos_assigned_gt_inds = [ - res.pos_assigned_gt_inds for res in sampling_results - ] - mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, - gt_masks, rcnn_train_cfg) - return mask_targets - - @force_fp32(apply_to=('mask_pred', )) - def loss(self, mask_pred, mask_targets, labels): - """ - Example: - >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA - >>> N = 7 # N = number of extracted ROIs - >>> C, H, W = 11, 32, 32 - >>> # Create example instance of FCN Mask Head. - >>> # There are lots of variations depending on the configuration - >>> self = FCNMaskHead(num_classes=C, num_convs=1) - >>> inputs = torch.rand(N, self.in_channels, H, W) - >>> mask_pred = self.forward(inputs) - >>> sf = self.scale_factor - >>> labels = torch.randint(0, C, size=(N,)) - >>> # With the default properties the mask targets should indicate - >>> # a (potentially soft) single-class label - >>> mask_targets = torch.rand(N, H * sf, W * sf) - >>> loss = self.loss(mask_pred, mask_targets, labels) - >>> print('loss = {!r}'.format(loss)) - """ - loss = dict() - if mask_pred.size(0) == 0: - loss_mask = mask_pred.sum() - else: - if self.class_agnostic: - loss_mask = self.loss_mask(mask_pred, mask_targets, - torch.zeros_like(labels)) - else: - loss_mask = self.loss_mask(mask_pred, mask_targets, labels) - loss['loss_mask'] = loss_mask - return loss - - def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, - ori_shape, scale_factor, rescale): - """Get segmentation masks from mask_pred and bboxes. - - Args: - mask_pred (Tensor or ndarray): shape (n, #class, h, w). - For single-scale testing, mask_pred is the direct output of - model, whose type is Tensor, while for multi-scale testing, - it will be converted to numpy array outside of this method. - det_bboxes (Tensor): shape (n, 4/5) - det_labels (Tensor): shape (n, ) - rcnn_test_cfg (dict): rcnn testing config - ori_shape (Tuple): original image height and width, shape (2,) - scale_factor(float | Tensor): If ``rescale is True``, box - coordinates are divided by this scale factor to fit - ``ori_shape``. - rescale (bool): If True, the resulting masks will be rescaled to - ``ori_shape``. - - Returns: - list[list]: encoded masks. The c-th item in the outer list - corresponds to the c-th class. Given the c-th outer list, the - i-th item in that inner list is the mask for the i-th box with - class label c. - - Example: - >>> import mmcv - >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA - >>> N = 7 # N = number of extracted ROIs - >>> C, H, W = 11, 32, 32 - >>> # Create example instance of FCN Mask Head. - >>> self = FCNMaskHead(num_classes=C, num_convs=0) - >>> inputs = torch.rand(N, self.in_channels, H, W) - >>> mask_pred = self.forward(inputs) - >>> # Each input is associated with some bounding box - >>> det_bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N) - >>> det_labels = torch.randint(0, C, size=(N,)) - >>> rcnn_test_cfg = mmcv.Config({'mask_thr_binary': 0, }) - >>> ori_shape = (H * 4, W * 4) - >>> scale_factor = torch.FloatTensor((1, 1)) - >>> rescale = False - >>> # Encoded masks are a list for each category. - >>> encoded_masks = self.get_seg_masks( - >>> mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, - >>> scale_factor, rescale - >>> ) - >>> assert len(encoded_masks) == C - >>> assert sum(list(map(len, encoded_masks))) == N - """ - if isinstance(mask_pred, torch.Tensor): - mask_pred = mask_pred.sigmoid() - else: - mask_pred = det_bboxes.new_tensor(mask_pred) - - device = mask_pred.device - cls_segms = [[] for _ in range(self.num_classes) - ] # BG is not included in num_classes - bboxes = det_bboxes[:, :4] - labels = det_labels - - if rescale: - img_h, img_w = ori_shape[:2] - else: - if isinstance(scale_factor, float): - img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32) - img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32) - else: - w_scale, h_scale = scale_factor[0], scale_factor[1] - img_h = np.round(ori_shape[0] * h_scale.item()).astype( - np.int32) - img_w = np.round(ori_shape[1] * w_scale.item()).astype( - np.int32) - scale_factor = 1.0 - - if not isinstance(scale_factor, (float, torch.Tensor)): - scale_factor = bboxes.new_tensor(scale_factor) - bboxes = bboxes / scale_factor - - if torch.onnx.is_in_onnx_export(): - # TODO: Remove after F.grid_sample is supported. - from torchvision.models.detection.roi_heads \ - import paste_masks_in_image - masks = paste_masks_in_image(mask_pred, bboxes, ori_shape[:2]) - thr = rcnn_test_cfg.get('mask_thr_binary', 0) - if thr > 0: - masks = masks >= thr - return masks - - N = len(mask_pred) - # The actual implementation split the input into chunks, - # and paste them chunk by chunk. - if device.type == 'cpu': - # CPU is most efficient when they are pasted one by one with - # skip_empty=True, so that it performs minimal number of - # operations. - num_chunks = N - else: - # GPU benefits from parallelism for larger chunks, - # but may have memory issue - num_chunks = int( - np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT)) - assert (num_chunks <= - N), 'Default GPU_MEM_LIMIT is too small; try increasing it' - chunks = torch.chunk(torch.arange(N, device=device), num_chunks) - - threshold = rcnn_test_cfg.mask_thr_binary - im_mask = torch.zeros( - N, - img_h, - img_w, - device=device, - dtype=torch.bool if threshold >= 0 else torch.uint8) - - if not self.class_agnostic: - mask_pred = mask_pred[range(N), labels][:, None] - - for inds in chunks: - masks_chunk, spatial_inds = _do_paste_mask( - mask_pred[inds], - bboxes[inds], - img_h, - img_w, - skip_empty=device.type == 'cpu') - - if threshold >= 0: - masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool) - else: - # for visualization and debugging - masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8) - - im_mask[(inds, ) + spatial_inds] = masks_chunk - - for i in range(N): - cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy()) - return cls_segms - - -def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True): - """Paste instance masks according to boxes. - - This implementation is modified from - https://github.com/facebookresearch/detectron2/ - - Args: - masks (Tensor): N, 1, H, W - boxes (Tensor): N, 4 - img_h (int): Height of the image to be pasted. - img_w (int): Width of the image to be pasted. - skip_empty (bool): Only paste masks within the region that - tightly bound all boxes, and returns the results this region only. - An important optimization for CPU. - - Returns: - tuple: (Tensor, tuple). The first item is mask tensor, the second one - is the slice object. - If skip_empty == False, the whole image will be pasted. It will - return a mask of shape (N, img_h, img_w) and an empty tuple. - If skip_empty == True, only area around the mask will be pasted. - A mask of shape (N, h', w') and its start and end coordinates - in the original image will be returned. - """ - # On GPU, paste all masks together (up to chunk size) - # by using the entire image to sample the masks - # Compared to pasting them one by one, - # this has more operations but is faster on COCO-scale dataset. - device = masks.device - if skip_empty: - x0_int, y0_int = torch.clamp( - boxes.min(dim=0).values.floor()[:2] - 1, - min=0).to(dtype=torch.int32) - x1_int = torch.clamp( - boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32) - y1_int = torch.clamp( - boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32) - else: - x0_int, y0_int = 0, 0 - x1_int, y1_int = img_w, img_h - x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1 - - N = masks.shape[0] - - img_y = torch.arange( - y0_int, y1_int, device=device, dtype=torch.float32) + 0.5 - img_x = torch.arange( - x0_int, x1_int, device=device, dtype=torch.float32) + 0.5 - img_y = (img_y - y0) / (y1 - y0) * 2 - 1 - img_x = (img_x - x0) / (x1 - x0) * 2 - 1 - # img_x, img_y have shapes (N, w), (N, h) - if torch.isinf(img_x).any(): - inds = torch.where(torch.isinf(img_x)) - img_x[inds] = 0 - if torch.isinf(img_y).any(): - inds = torch.where(torch.isinf(img_y)) - img_y[inds] = 0 - - gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1)) - gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1)) - grid = torch.stack([gx, gy], dim=3) - - if torch.onnx.is_in_onnx_export(): - raise RuntimeError( - 'Exporting F.grid_sample from Pytorch to ONNX is not supported.') - img_masks = F.grid_sample( - masks.to(dtype=torch.float32), grid, align_corners=False) - - if skip_empty: - return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int)) - else: - return img_masks[:, 0], () diff --git a/spaces/Salesforce/EDICT/my_diffusers/schedulers/scheduling_sde_ve.py b/spaces/Salesforce/EDICT/my_diffusers/schedulers/scheduling_sde_ve.py deleted file mode 100644 index e187f079688723c991b4b80fa1fd4f358896bb4f..0000000000000000000000000000000000000000 --- a/spaces/Salesforce/EDICT/my_diffusers/schedulers/scheduling_sde_ve.py +++ /dev/null @@ -1,283 +0,0 @@ -# Copyright 2022 Google Brain and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch - -import warnings -from dataclasses import dataclass -from typing import Optional, Tuple, Union - -import numpy as np -import torch - -from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import BaseOutput -from .scheduling_utils import SchedulerMixin, SchedulerOutput - - -@dataclass -class SdeVeOutput(BaseOutput): - """ - Output class for the ScoreSdeVeScheduler's step function output. - - Args: - prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the - denoising loop. - prev_sample_mean (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - Mean averaged `prev_sample`. Same as `prev_sample`, only mean-averaged over previous timesteps. - """ - - prev_sample: torch.FloatTensor - prev_sample_mean: torch.FloatTensor - - -class ScoreSdeVeScheduler(SchedulerMixin, ConfigMixin): - """ - The variance exploding stochastic differential equation (SDE) scheduler. - - For more information, see the original paper: https://arxiv.org/abs/2011.13456 - - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and - [`~ConfigMixin.from_config`] functios. - - Args: - snr (`float`): - coefficient weighting the step from the model_output sample (from the network) to the random noise. - sigma_min (`float`): - initial noise scale for sigma sequence in sampling procedure. The minimum sigma should mirror the - distribution of the data. - sigma_max (`float`): maximum value used for the range of continuous timesteps passed into the model. - sampling_eps (`float`): the end value of sampling, where timesteps decrease progessively from 1 to - epsilon. - correct_steps (`int`): number of correction steps performed on a produced sample. - tensor_format (`str`): "np" or "pt" for the expected format of samples passed to the Scheduler. - """ - - @register_to_config - def __init__( - self, - num_train_timesteps: int = 2000, - snr: float = 0.15, - sigma_min: float = 0.01, - sigma_max: float = 1348.0, - sampling_eps: float = 1e-5, - correct_steps: int = 1, - tensor_format: str = "pt", - ): - # setable values - self.timesteps = None - - self.set_sigmas(num_train_timesteps, sigma_min, sigma_max, sampling_eps) - - self.tensor_format = tensor_format - self.set_format(tensor_format=tensor_format) - - def set_timesteps(self, num_inference_steps: int, sampling_eps: float = None): - """ - Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference. - - Args: - num_inference_steps (`int`): - the number of diffusion steps used when generating samples with a pre-trained model. - sampling_eps (`float`, optional): final timestep value (overrides value given at Scheduler instantiation). - - """ - sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps - tensor_format = getattr(self, "tensor_format", "pt") - if tensor_format == "np": - self.timesteps = np.linspace(1, sampling_eps, num_inference_steps) - elif tensor_format == "pt": - self.timesteps = torch.linspace(1, sampling_eps, num_inference_steps) - else: - raise ValueError(f"`self.tensor_format`: {self.tensor_format} is not valid.") - - def set_sigmas( - self, num_inference_steps: int, sigma_min: float = None, sigma_max: float = None, sampling_eps: float = None - ): - """ - Sets the noise scales used for the diffusion chain. Supporting function to be run before inference. - - The sigmas control the weight of the `drift` and `diffusion` components of sample update. - - Args: - num_inference_steps (`int`): - the number of diffusion steps used when generating samples with a pre-trained model. - sigma_min (`float`, optional): - initial noise scale value (overrides value given at Scheduler instantiation). - sigma_max (`float`, optional): final noise scale value (overrides value given at Scheduler instantiation). - sampling_eps (`float`, optional): final timestep value (overrides value given at Scheduler instantiation). - - """ - sigma_min = sigma_min if sigma_min is not None else self.config.sigma_min - sigma_max = sigma_max if sigma_max is not None else self.config.sigma_max - sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps - if self.timesteps is None: - self.set_timesteps(num_inference_steps, sampling_eps) - - tensor_format = getattr(self, "tensor_format", "pt") - if tensor_format == "np": - self.discrete_sigmas = np.exp(np.linspace(np.log(sigma_min), np.log(sigma_max), num_inference_steps)) - self.sigmas = np.array([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps]) - elif tensor_format == "pt": - self.discrete_sigmas = torch.exp(torch.linspace(np.log(sigma_min), np.log(sigma_max), num_inference_steps)) - self.sigmas = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps]) - else: - raise ValueError(f"`self.tensor_format`: {self.tensor_format} is not valid.") - - def get_adjacent_sigma(self, timesteps, t): - tensor_format = getattr(self, "tensor_format", "pt") - if tensor_format == "np": - return np.where(timesteps == 0, np.zeros_like(t), self.discrete_sigmas[timesteps - 1]) - elif tensor_format == "pt": - return torch.where( - timesteps == 0, - torch.zeros_like(t.to(timesteps.device)), - self.discrete_sigmas[timesteps - 1].to(timesteps.device), - ) - - raise ValueError(f"`self.tensor_format`: {self.tensor_format} is not valid.") - - def set_seed(self, seed): - warnings.warn( - "The method `set_seed` is deprecated and will be removed in version `0.4.0`. Please consider passing a" - " generator instead.", - DeprecationWarning, - ) - tensor_format = getattr(self, "tensor_format", "pt") - if tensor_format == "np": - np.random.seed(seed) - elif tensor_format == "pt": - torch.manual_seed(seed) - else: - raise ValueError(f"`self.tensor_format`: {self.tensor_format} is not valid.") - - def step_pred( - self, - model_output: Union[torch.FloatTensor, np.ndarray], - timestep: int, - sample: Union[torch.FloatTensor, np.ndarray], - generator: Optional[torch.Generator] = None, - return_dict: bool = True, - **kwargs, - ) -> Union[SdeVeOutput, Tuple]: - """ - Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion - process from the learned model outputs (most often the predicted noise). - - Args: - model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model. - timestep (`int`): current discrete timestep in the diffusion chain. - sample (`torch.FloatTensor` or `np.ndarray`): - current instance of sample being created by diffusion process. - generator: random number generator. - return_dict (`bool`): option for returning tuple rather than SchedulerOutput class - - Returns: - [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`: [`~schedulers.scheduling_sde_ve.SdeVeOutput`] if - `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. - - """ - if "seed" in kwargs and kwargs["seed"] is not None: - self.set_seed(kwargs["seed"]) - - if self.timesteps is None: - raise ValueError( - "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" - ) - - timestep = timestep * torch.ones( - sample.shape[0], device=sample.device - ) # torch.repeat_interleave(timestep, sample.shape[0]) - timesteps = (timestep * (len(self.timesteps) - 1)).long() - - # mps requires indices to be in the same device, so we use cpu as is the default with cuda - timesteps = timesteps.to(self.discrete_sigmas.device) - - sigma = self.discrete_sigmas[timesteps].to(sample.device) - adjacent_sigma = self.get_adjacent_sigma(timesteps, timestep).to(sample.device) - drift = self.zeros_like(sample) - diffusion = (sigma**2 - adjacent_sigma**2) ** 0.5 - - # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) - # also equation 47 shows the analog from SDE models to ancestral sampling methods - drift = drift - diffusion[:, None, None, None] ** 2 * model_output - - # equation 6: sample noise for the diffusion term of - noise = self.randn_like(sample, generator=generator) - prev_sample_mean = sample - drift # subtract because `dt` is a small negative timestep - # TODO is the variable diffusion the correct scaling term for the noise? - prev_sample = prev_sample_mean + diffusion[:, None, None, None] * noise # add impact of diffusion field g - - if not return_dict: - return (prev_sample, prev_sample_mean) - - return SdeVeOutput(prev_sample=prev_sample, prev_sample_mean=prev_sample_mean) - - def step_correct( - self, - model_output: Union[torch.FloatTensor, np.ndarray], - sample: Union[torch.FloatTensor, np.ndarray], - generator: Optional[torch.Generator] = None, - return_dict: bool = True, - **kwargs, - ) -> Union[SchedulerOutput, Tuple]: - """ - Correct the predicted sample based on the output model_output of the network. This is often run repeatedly - after making the prediction for the previous timestep. - - Args: - model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model. - sample (`torch.FloatTensor` or `np.ndarray`): - current instance of sample being created by diffusion process. - generator: random number generator. - return_dict (`bool`): option for returning tuple rather than SchedulerOutput class - - Returns: - [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`: [`~schedulers.scheduling_sde_ve.SdeVeOutput`] if - `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. - - """ - if "seed" in kwargs and kwargs["seed"] is not None: - self.set_seed(kwargs["seed"]) - - if self.timesteps is None: - raise ValueError( - "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" - ) - - # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" - # sample noise for correction - noise = self.randn_like(sample, generator=generator) - - # compute step size from the model_output, the noise, and the snr - grad_norm = self.norm(model_output) - noise_norm = self.norm(noise) - step_size = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 - step_size = step_size * torch.ones(sample.shape[0]).to(sample.device) - # self.repeat_scalar(step_size, sample.shape[0]) - - # compute corrected sample: model_output term and noise term - prev_sample_mean = sample + step_size[:, None, None, None] * model_output - prev_sample = prev_sample_mean + ((step_size * 2) ** 0.5)[:, None, None, None] * noise - - if not return_dict: - return (prev_sample,) - - return SchedulerOutput(prev_sample=prev_sample) - - def __len__(self): - return self.config.num_train_timesteps diff --git a/spaces/Samhita/geolocator/app.py b/spaces/Samhita/geolocator/app.py deleted file mode 100644 index 4896df615c1f413f666551cc80b8dc9830313a20..0000000000000000000000000000000000000000 --- a/spaces/Samhita/geolocator/app.py +++ /dev/null @@ -1,327 +0,0 @@ -import base64 -import json -import mimetypes - -# import mimetypes -import os -import sys -from io import BytesIO -from typing import Dict, Tuple, Union - -import banana_dev as banana -import geopy.distance -import gradio as gr -import pandas as pd -import plotly -import plotly.express as px - -# import requests -from dotenv import load_dotenv -from smart_open import open as smartopen - -sys.path.append("..") - -from gantry_callback.gantry_util import GantryImageToTextLogger # noqa: E402 -from gantry_callback.s3_util import ( # noqa: E402 - add_access_policy, - enable_bucket_versioning, - get_or_create_bucket, - get_uri_of, - make_key, - make_unique_bucket_name, -) -from gantry_callback.string_img_util import read_b64_string # noqa: E402 - -load_dotenv() - -URL = os.getenv("ENDPOINT") -GANTRY_APP_NAME = os.getenv("GANTRY_APP_NAME") -GANTRY_KEY = os.getenv("GANTRY_API_KEY") -MAPBOX_TOKEN = os.getenv("MAPBOX_TOKEN") -BANANA_API_KEY = os.getenv("BANANA_API_KEY") -BANANA_MODEL_KEY = os.getenv("BANANA_MODEL_KEY") - -examples = json.load(open("examples.json")) - - -def compute_distance(map_data: Dict[str, Dict[str, Union[str, float, None]]]): - hierarchy_lat, hierarchy_long = ( - map_data["hierarchy"]["latitude"], - map_data["hierarchy"]["longitude"], - ) - - coarse_lat, coarse_long = ( - map_data["coarse"]["latitude"], - map_data["coarse"]["longitude"], - ) - - fine_lat, fine_long = ( - map_data["fine"]["latitude"], - map_data["fine"]["longitude"], - ) - - hierarchy_to_coarse = geopy.distance.geodesic( - (hierarchy_lat, hierarchy_long), (coarse_lat, coarse_long) - ).miles - - hierarchy_to_fine = geopy.distance.geodesic( - (hierarchy_lat, hierarchy_long), (fine_lat, fine_long) - ).miles - - return hierarchy_to_coarse, hierarchy_to_fine - - -def get_plotly_graph( - map_data: Dict[str, Dict[str, Union[str, float, None]]] -) -> plotly.graph_objects.Figure: - - hierarchy_to_coarse, hierarchy_to_fine = compute_distance(map_data) - what_to_consider = {"hierarchy"} - if hierarchy_to_coarse > 5000: - what_to_consider.add("coarse") - if hierarchy_to_fine > 30: - what_to_consider.add("fine") - - size_map = {"hierarchy": 3, "fine": 1, "coarse": 1} - lat_long_data = [] - for subdivision, location_data in map_data.items(): - if subdivision in what_to_consider: - lat_long_data.append( - [ - subdivision, - float(location_data["latitude"]), - float(location_data["longitude"]), - location_data["location"], - size_map[subdivision], - ] - ) - - map_df = pd.DataFrame( - lat_long_data, - columns=["subdivision", "latitude", "longitude", "location", "size"], - ) - - px.set_mapbox_access_token(MAPBOX_TOKEN) - fig = px.scatter_mapbox( - map_df, - lat="latitude", - lon="longitude", - hover_name="location", - hover_data=["latitude", "longitude", "subdivision"], - color="subdivision", - color_discrete_map={ - "hierarchy": "fuchsia", - "coarse": "blue", - "fine": "yellow", - }, - zoom=2, - height=500, - size="size", - ) - - fig.update_layout(mapbox_style="dark") - fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 0}) - return fig - - -def gradio_error(): - raise gr.Error("Unable to detect the location!") - - -def get_outputs( - data: Dict[str, Dict[str, Union[str, float, None]]] -) -> Tuple[str, str, plotly.graph_objects.Figure]: - if data is None: - gradio_error() - - location, latitude, longitude = ( - data["hierarchy"]["location"], - data["hierarchy"]["latitude"], - data["hierarchy"]["longitude"], - ) - if location is None: - gradio_error() - - return ( - location, - f"{latitude},{longitude}", - get_plotly_graph(map_data=data), - ) - - -def image_gradio(img_file: str) -> Tuple[str, str, plotly.graph_objects.Figure]: - # data = json.loads( - # requests.post( - # f"{URL}predict-image", - # files={ - # "image": ( - # img_file, - # open(img_file, "rb"), - # mimetypes.guess_type(img_file)[0], - # ) - # }, - # ).text - # ) - with open(img_file, "rb") as image_file: - image_bytes = BytesIO(image_file.read()) - - data = banana.run( - BANANA_API_KEY, - BANANA_MODEL_KEY, - { - "image": base64.b64encode(image_bytes.getvalue()).decode("utf-8"), - "filename": os.path.basename(img_file), - }, - )["modelOutputs"][0] - - return get_outputs(data=data) - - -def _upload_video_to_s3(video_b64_string): - bucket = get_or_create_bucket( - make_unique_bucket_name(prefix="geolocator-app", seed="420") - ) - enable_bucket_versioning(bucket) - add_access_policy(bucket) - - data_type, video_buffer = read_b64_string(video_b64_string, return_data_type=True) - video_bytes = video_buffer.read() - key = make_key(video_bytes, filetype=data_type) - - s3_uri = get_uri_of(bucket, key) - - with smartopen(s3_uri, "wb") as s3_object: - s3_object.write(video_bytes) - - return s3_uri - - -def video_gradio(video_file: str) -> Tuple[str, str, plotly.graph_objects.Figure]: - # data = json.loads( - # requests.post( - # f"{URL}predict-video", - # files={ - # "video": ( - # video_file, - # open(video_file, "rb"), - # "application/octet-stream", - # ) - # }, - # ).text - # ) - - with open(video_file, "rb") as video_file: - video_b64_string = base64.b64encode( - BytesIO(video_file.read()).getvalue() - ).decode("utf8") - - video_mime = mimetypes.guess_type(video_file)[0] - - s3_uri = _upload_video_to_s3(f"data:{video_mime};base64," + video_b64_string) - - data = banana.run( - BANANA_API_KEY, - BANANA_MODEL_KEY, - { - "video": s3_uri, - "filename": os.path.basename(video_file), - }, - )["modelOutputs"][0] - - return get_outputs(data=data) - - -def url_gradio(url: str) -> Tuple[str, str, plotly.graph_objects.Figure]: - # data = json.loads( - # requests.post( - # f"{URL}predict-url", - # headers={"content-type": "text/plain"}, - # data=url, - # ).text - # ) - data = banana.run(BANANA_API_KEY, BANANA_MODEL_KEY, {"url": url},)[ - "modelOutputs" - ][0] - - return get_outputs(data=data) - - -with gr.Blocks() as demo: - gr.Markdown("# GeoLocator") - gr.Markdown( - "### An app that guesses the location of an image 🌌 or a YouTube video link 🔗." - ) - with gr.Tab("Image"): - with gr.Row(): - img_input = gr.Image(type="filepath", label="Image") - with gr.Column(): - img_text_output = gr.Textbox(label="Location") - img_coordinates = gr.Textbox(label="Coordinates") - img_plot = gr.Plot() - img_text_button = gr.Button("Go locate!") - with gr.Row(): - # Flag button - img_flag_button = gr.Button("Flag this output") - gr.Examples(examples["images"], inputs=[img_input]) - # with gr.Tab("Video"): - # with gr.Row(): - # video_input = gr.Video(type="filepath", label="Video") - # with gr.Column(): - # video_text_output = gr.Textbox(label="Location") - # video_coordinates = gr.Textbox(label="Coordinates") - # video_plot = gr.Plot() - # video_text_button = gr.Button("Go locate!") - # gr.Examples(examples["videos"], inputs=[video_input]) - with gr.Tab("YouTube Link"): - with gr.Row(): - url_input = gr.Textbox(label="Link") - with gr.Column(): - url_text_output = gr.Textbox(label="Location") - url_coordinates = gr.Textbox(label="Coordinates") - url_plot = gr.Plot() - url_text_button = gr.Button("Go locate!") - gr.Examples(examples["video_urls"], inputs=[url_input]) - - # Gantry flagging for image # - callback = GantryImageToTextLogger(application=GANTRY_APP_NAME, api_key=GANTRY_KEY) - - callback.setup( - components=[img_input, img_text_output], - flagging_dir=make_unique_bucket_name(prefix=GANTRY_APP_NAME, seed="420"), - ) - - img_flag_button.click( - fn=lambda *args: callback.flag(args), - inputs=[img_input, img_text_output, img_coordinates], - outputs=None, - preprocess=False, - ) - ################### - - img_text_button.click( - image_gradio, - inputs=img_input, - outputs=[img_text_output, img_coordinates, img_plot], - ) - # video_text_button.click( - # video_gradio, - # inputs=video_input, - # outputs=[video_text_output, video_coordinates, video_plot], - # ) - url_text_button.click( - url_gradio, - inputs=url_input, - outputs=[url_text_output, url_coordinates, url_plot], - ) - - gr.Markdown( - "Check out the [GitHub repository](https://github.com/samhita-alla/geolocator) that this demo is based off of." - ) - gr.Markdown( - "#### To understand what subdivision means, refer to the [Geolocation paper](https://openaccess.thecvf.com/content_ECCV_2018/papers/Eric_Muller-Budack_Geolocation_Estimation_of_ECCV_2018_paper.pdf)." - ) - gr.Markdown( - "#### TL;DR Fine and Coarse are spatial resolutions and Hierarchy generates predictions at fine scale but incorporates knowledge from coarse and middle partitionings." - ) - -demo.launch() diff --git a/spaces/SatwikKambham/Image-Classifier/app.py b/spaces/SatwikKambham/Image-Classifier/app.py deleted file mode 100644 index a6a9b0ac7923f050a3f9831691979a19221d9143..0000000000000000000000000000000000000000 --- a/spaces/SatwikKambham/Image-Classifier/app.py +++ /dev/null @@ -1,211 +0,0 @@ -import gradio as gr -from huggingface_hub import hf_hub_download - -import torch -import torch.nn as nn -from torchvision import transforms - - -class SimpleResidualBlock(nn.Module): - def __init__(self, in_channels, out_channels, set_stride=False): - super().__init__() - stride = 2 if in_channels != out_channels and set_stride else 1 - - self.conv1 = nn.LazyConv2d( - out_channels, - kernel_size=3, - padding="same" if stride == 1 else 1, - stride=stride, - ) - self.conv2 = nn.LazyConv2d(out_channels, kernel_size=3, padding="same") - - self.bn1 = nn.LazyBatchNorm2d() - self.bn2 = nn.LazyBatchNorm2d() - - self.relu = nn.ReLU() - - if in_channels != out_channels: - self.residual = nn.Sequential( - nn.LazyConv2d(out_channels, kernel_size=1, stride=stride), - nn.LazyBatchNorm2d(), - ) - else: - self.residual = nn.Identity() - - def forward(self, x): - out = self.relu(self.bn1(self.conv1(x))) - out = self.bn2(self.conv2(out)) - out += self.residual(x) - out = self.relu(out) - return out - - -class BottleneckResidualBlock(nn.Module): - def __init__( - self, in_channels, out_channels, identity_mapping=False, set_stride=False - ): - super().__init__() - stride = 2 if in_channels != out_channels and set_stride else 1 - - self.conv1 = nn.LazyConv2d( - out_channels, - kernel_size=1, - padding="same" if stride == 1 else 0, - stride=stride, - ) - self.conv2 = nn.LazyConv2d(out_channels, kernel_size=3, padding="same") - self.conv3 = nn.LazyConv2d(out_channels * 4, kernel_size=1, padding="same") - - self.bn1 = nn.LazyBatchNorm2d() - self.bn2 = nn.LazyBatchNorm2d() - self.bn3 = nn.LazyBatchNorm2d() - - self.relu = nn.ReLU() - - if in_channels != out_channels or not identity_mapping: - self.residual = nn.Sequential( - nn.LazyConv2d(out_channels * 4, kernel_size=1, stride=stride), - nn.LazyBatchNorm2d(), - ) - else: - self.residual = nn.Identity() - - def forward(self, x): - out = self.relu(self.bn1(self.conv1(x))) - out = self.relu(self.bn2(self.conv2(out))) - out = self.bn3(self.conv3(out)) - out += self.residual(x) - out = self.relu(out) - return out - - -RESNET_18 = [2, 2, 2, 2] -RESNET_34 = [3, 4, 6, 3] -RESNET_50 = [3, 4, 6, 3] -RESNET_101 = [3, 4, 23, 3] -RESNET_152 = [3, 8, 36, 3] - - -class ResNet(nn.Module): - def __init__(self, arch=RESNET_18, block="simple", num_classes=256): - super().__init__() - self.conv1 = nn.Sequential( - nn.LazyConv2d(64, kernel_size=7, stride=2, padding=3), - nn.LazyBatchNorm2d(), - nn.ReLU(), - ) - self.maxpool = nn.MaxPool2d(3, stride=2, padding=1) - self.conv2 = self._make_layer(64, 64, arch[0], set_stride=False, block=block) - self.conv3 = self._make_layer(64, 128, arch[1], block=block) - self.conv4 = self._make_layer(128, 256, arch[2], block=block) - self.conv5 = self._make_layer(256, 512, arch[3], block=block) - self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) - self.flatten = nn.Flatten() - self.fc = nn.LazyLinear(num_classes) - - def _make_layer( - self, in_channels, out_channels, num_blocks, set_stride=True, block="simple" - ): - """Block is either 'simple' or 'bottleneck'""" - layers = [] - for i in range(num_blocks): - layers.append( - SimpleResidualBlock(in_channels, out_channels, set_stride=set_stride) - if block == "simple" - else BottleneckResidualBlock( - in_channels if i == 0 else out_channels * 4, - out_channels, - set_stride=set_stride, - ) - ) - set_stride = False - return nn.Sequential(*layers) - - def forward(self, x): - out = self.conv1(x) - out = self.maxpool(self.conv2(out)) - out = self.conv3(out) - out = self.conv4(out) - out = self.conv5(out) - out = self.avgpool(out) - out = self.flatten(out) - out = self.fc(out) - return out - - def _init_weights(module): - # Initlize weights with glorot uniform - if isinstance(module, nn.Conv2d): - nn.init.xavier_uniform_(module.weight) - nn.init.zeros_(module.bias) - elif isinstance(module, nn.Linear): - nn.init.xavier_uniform_(module.weight) - nn.init.zeros_(module.bias) - - -class ImageClassifier: - def __init__(self, checkpoint_path): - self.checkpoint_path = checkpoint_path - self.model = self.load_model(checkpoint_path) - self.transform = self.get_transform((244, 244)) - self.labels = [ - "airplane", - "automobile", - "bird", - "cat", - "deer", - "dog", - "frog", - "horse", - "ship", - "truck", - ] - - def load_model(self, checkpoint_path): - classifier = ResNet( - arch=RESNET_18, - block="simple", - num_classes=10, - ) - classifier.load_state_dict(torch.load(checkpoint_path)) - classifier = classifier.cpu() - classifier.eval() - return classifier - - def get_transform(self, img_shape): - preprocess_transform = transforms.Compose( - [ - transforms.Resize(img_shape), - transforms.ToTensor(), - ] - ) - return preprocess_transform - - def predict(self, image): - image_tensor = self.transform(image).unsqueeze(0) - with torch.no_grad(): - logits = self.model(image_tensor) - probs = logits.softmax(dim=1)[0] - return {label: prob.item() for label, prob in zip(self.labels, probs)} - - def classify(self, input_image): - return self.predict(input_image) - - -def classify(input_image): - return classifier.classify(input_image) - - -checkpoint_path = hf_hub_download( - repo_id="SatwikKambham/resnet18-cifar10", - filename="model.pt", -) -classifier = ImageClassifier(checkpoint_path) -iface = gr.Interface( - classify, - inputs=[ - gr.Image(label="Input Image", type="pil"), - ], - outputs=gr.Label(num_top_classes=3), -) - -iface.launch() diff --git a/spaces/SeViLA/SeViLA/lavis/models/albef_models/albef_classification.py b/spaces/SeViLA/SeViLA/lavis/models/albef_models/albef_classification.py deleted file mode 100644 index 2d82de9a40ab53f443bf67fc6cfe24c6b6ed81cd..0000000000000000000000000000000000000000 --- a/spaces/SeViLA/SeViLA/lavis/models/albef_models/albef_classification.py +++ /dev/null @@ -1,182 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import warnings -from copy import deepcopy - -import torch -import torch.nn.functional as F -from lavis.common.registry import registry -from lavis.models.albef_models import AlbefBase -from lavis.models.albef_models.albef_outputs import ( - AlbefIntermediateOutput, - AlbefOutputWithLogits, -) -from lavis.models.base_model import MomentumDistilationMixin -from lavis.models.med import XBertEncoder -from lavis.models.vit import VisionTransformerEncoder -from torch import nn - - -@registry.register_model("albef_classification") -class AlbefClassification(AlbefBase, MomentumDistilationMixin): - PRETRAINED_MODEL_CONFIG_DICT = { - "ve": "configs/models/albef_classification_ve.yaml", - } - - def __init__( - self, - image_encoder, - text_encoder, - num_classes, - momentum=0.995, - alpha=0.4, - use_distill=True, - max_txt_len=40, - ): - super().__init__() - - self.tokenizer = self.init_tokenizer() - self.max_txt_len = max_txt_len - - self.use_distill = use_distill - - self.visual_encoder = image_encoder - self.text_encoder = text_encoder - - hidden_size = text_encoder.config.hidden_size - - if num_classes > 0: - self.cls_head = nn.Sequential( - nn.Linear(hidden_size, hidden_size), - nn.ReLU(), - nn.Linear(hidden_size, num_classes), - ) - else: - warnings.warn( - f"Found num_classes=0, initializing {type(self)} without classifier." - ) - - if self.use_distill: - self.visual_encoder_m = deepcopy(self.visual_encoder) - self.text_encoder_m = deepcopy(self.text_encoder) - self.cls_head_m = deepcopy(self.cls_head) - - self.momentum = momentum - self.alpha = alpha - - self.model_pairs = [ - [self.visual_encoder, self.visual_encoder_m], - [self.text_encoder, self.text_encoder_m], - [self.cls_head, self.cls_head_m], - ] - - self.copy_params() - - def _rampup_factor(self, epoch, iters, num_iters_per_epoch): - return min(1, (epoch * num_iters_per_epoch + iters) / num_iters_per_epoch) - - def forward(self, samples, is_train=True): - sentences = samples["text_input"] - sentences = self.tokenizer( - sentences, - padding="longest", - truncation=True, - max_length=self.max_txt_len, - return_tensors="pt", - ).to(self.device) - samples.update({"tokenized_text": sentences}) - - targets = samples["label"] - - image_embeds = self.visual_encoder.forward_features(samples["image"]) - encoder_output = self.text_encoder.forward_automask( - samples["tokenized_text"], image_embeds - ) - - prediction = self.cls_head(encoder_output.last_hidden_state[:, 0, :]) - - if is_train: - if self.use_distill: - with torch.no_grad(): - self._momentum_update() - - image_embeds_m = self.visual_encoder_m(samples["image"]) - encoder_output_m = self.text_encoder_m.forward_automask( - samples["tokenized_text"], image_embeds_m - ) - - prediction_m = self.cls_head_m( - encoder_output_m.last_hidden_state[:, 0, :] - ) - - alpha = self.alpha * self._rampup_factor( - epoch=samples["epoch"], - iters=samples["iters"], - num_iters_per_epoch=samples["num_iters_per_epoch"], - ) - - loss = (1 - alpha) * F.cross_entropy( - prediction, targets - ) - alpha * torch.sum( - F.log_softmax(prediction, dim=1) * F.softmax(prediction_m, dim=1), - dim=1, - ).mean() - else: - loss = F.cross_entropy(prediction, targets) - - image_embeds_m, encoder_output_m, prediction_m = None, None, None - - # return {"loss": loss} - return AlbefOutputWithLogits( - loss=loss, - intermediate_output=AlbefIntermediateOutput( - image_embeds=image_embeds, - image_embeds_m=image_embeds_m, - encoder_output=encoder_output, - encoder_output_m=encoder_output_m, - ), - logits=prediction, - logits_m=prediction_m, - ) - else: - return {"predictions": prediction, "targets": targets} - - def predict(self, samples): - output = self.forward(samples, is_train=False) - return output - - @classmethod - def from_config(cls, cfg=None): - image_encoder = VisionTransformerEncoder.from_config(cfg) - - # text encoder + multimodal encoder - text_encoder = XBertEncoder.from_config(cfg) - - alpha = cfg.get("alpha", 0.4) - momentum = cfg.get("momentum", 0.995) - use_distill = cfg.get("use_distill", True) - num_classes = cfg.get("num_classes", -1) - max_txt_len = cfg.get("max_txt_len", 40) - - assert num_classes > 1, "Invalid number of classes provided, found {}".format( - num_classes - ) - - model = cls( - image_encoder=image_encoder, - text_encoder=text_encoder, - use_distill=use_distill, - alpha=alpha, - num_classes=num_classes, - momentum=momentum, - max_txt_len=max_txt_len, - ) - - model.load_checkpoint_from_config(cfg) - - return model diff --git a/spaces/Sense-X/uniformer_video_demo/app.py b/spaces/Sense-X/uniformer_video_demo/app.py deleted file mode 100644 index f278eda9865ec2f407c37322b4b8b5fb1c4d17ae..0000000000000000000000000000000000000000 --- a/spaces/Sense-X/uniformer_video_demo/app.py +++ /dev/null @@ -1,103 +0,0 @@ -import os - -import torch -import numpy as np -import torch.nn.functional as F -import torchvision.transforms as T -from PIL import Image -from decord import VideoReader -from decord import cpu -from uniformer import uniformer_small -from kinetics_class_index import kinetics_classnames -from transforms import ( - GroupNormalize, GroupScale, GroupCenterCrop, - Stack, ToTorchFormatTensor -) - -import gradio as gr -from huggingface_hub import hf_hub_download - - -def get_index(num_frames, num_segments=16, dense_sample_rate=8): - sample_range = num_segments * dense_sample_rate - sample_pos = max(1, 1 + num_frames - sample_range) - t_stride = dense_sample_rate - start_idx = 0 if sample_pos == 1 else sample_pos // 2 - offsets = np.array([ - (idx * t_stride + start_idx) % - num_frames for idx in range(num_segments) - ]) - return offsets + 1 - - -def load_video(video_path): - vr = VideoReader(video_path, ctx=cpu(0)) - num_frames = len(vr) - frame_indices = get_index(num_frames, 16, 16) - - # transform - crop_size = 224 - scale_size = 256 - input_mean = [0.485, 0.456, 0.406] - input_std = [0.229, 0.224, 0.225] - - transform = T.Compose([ - GroupScale(int(scale_size)), - GroupCenterCrop(crop_size), - Stack(), - ToTorchFormatTensor(), - GroupNormalize(input_mean, input_std) - ]) - - images_group = list() - for frame_index in frame_indices: - img = Image.fromarray(vr[frame_index].asnumpy()) - images_group.append(img) - torch_imgs = transform(images_group) - - # The model expects inputs of shape: B x C x T x H x W - TC, H, W = torch_imgs.shape - torch_imgs = torch_imgs.reshape(1, TC//3, 3, H, W).permute(0, 2, 1, 3, 4) - - return torch_imgs - - -def inference(video): - vid = load_video(video) - - prediction = model(vid) - prediction = F.softmax(prediction, dim=1).flatten() - - return {kinetics_id_to_classname[str(i)]: float(prediction[i]) for i in range(400)} - - -# Device on which to run the model -# Set to cuda to load on GPU -device = "cpu" -model_path = hf_hub_download(repo_id="Sense-X/uniformer_video", filename="uniformer_small_k400_16x8.pth") -# Pick a pretrained model -model = uniformer_small() -state_dict = torch.load(model_path, map_location='cpu') -model.load_state_dict(state_dict) - -# Set to eval mode and move to desired device -model = model.to(device) -model = model.eval() - -# Create an id to label name mapping -kinetics_id_to_classname = {} -for k, v in kinetics_classnames.items(): - kinetics_id_to_classname[k] = v - -inputs = gr.inputs.Video() -label = gr.outputs.Label(num_top_classes=5) - -title = "UniFormer-S" -description = "Gradio demo for UniFormer: To use it, simply upload your video, or click one of the examples to load them. Read more at the links below." -article = "

    [ICLR2022] UniFormer: Unified Transformer for Efficient Spatiotemporal Representation Learning | Github Repo

    " - -gr.Interface( - inference, inputs, outputs=label, - title=title, description=description, article=article, - examples=[['hitting_baseball.mp4'], ['hoverboarding.mp4'], ['yoga.mp4']] - ).launch(enable_queue=True, cache_examples=True) diff --git a/spaces/SoulAbi/text-prompt-to-audio-generation/share_btn.py b/spaces/SoulAbi/text-prompt-to-audio-generation/share_btn.py deleted file mode 100644 index 6d99209630e3bef3f7aab2758eb06098447cd4b8..0000000000000000000000000000000000000000 --- a/spaces/SoulAbi/text-prompt-to-audio-generation/share_btn.py +++ /dev/null @@ -1,73 +0,0 @@ -community_icon_html = """""" - -loading_icon_html = """""" - -share_js = """async () => { - async function uploadFile(file){ - const UPLOAD_URL = 'https://huggingface.co/uploads'; - const response = await fetch(UPLOAD_URL, { - method: 'POST', - headers: { - 'Content-Type': file.type, - 'X-Requested-With': 'XMLHttpRequest', - }, - body: file, /// <- File inherits from Blob - }); - const url = await response.text(); - return url; - } - async function getInputVideoFile(videoEl){ - const res = await fetch(videoEl.src); - const blob = await res.blob(); - const videoId = Date.now() % 200; - const fileName = `sd-perception-${{videoId}}.mp4`; - return new File([blob], fileName, { type: 'video/mp4' }); - } - - async function audioToBase64(audioFile) { - return new Promise((resolve, reject) => { - let reader = new FileReader(); - reader.readAsDataURL(audioFile); - reader.onload = () => resolve(reader.result); - reader.onerror = error => reject(error); - - }); - } - const gradioEl = document.querySelector("gradio-app").shadowRoot || document.querySelector('body > gradio-app'); - const inputPromptEl = gradioEl.querySelector('#prompt-in input').value; - const outputVideoEl = gradioEl.querySelector('#output-video video'); - - let titleTxt = `Text-to-Audio: ${inputPromptEl}`; - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - if(!outputVideoEl){ - return; - }; - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - const outputVideo = await getInputVideoFile(outputVideoEl); - const urlOutputVideo = await uploadFile(outputVideo); - - const descriptionMd = ` -##### ${inputPromptEl} - -${urlOutputVideo} -`; - const params = new URLSearchParams({ - title: titleTxt, - description: descriptionMd, - }); - const paramsStr = params.toString(); - window.open(`https://huggingface.co/spaces/haoheliu/audioldm-text-to-audio-generation/discussions/new?${paramsStr}`, '_blank'); - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/array/__init__.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/array/__init__.py deleted file mode 100644 index 16e1274c1e3e2db483f3b05a9fe4d6fbb80dd533..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/array/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from docarray.array.any_array import AnyDocArray -from docarray.array.doc_list.doc_list import DocList -from docarray.array.doc_vec.doc_vec import DocVec - -__all__ = ['DocList', 'DocVec', 'AnyDocArray'] diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/utils/memory.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/utils/memory.py deleted file mode 100644 index bd494780b9dbbd1571688cd270bb9b53d113c13e..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/utils/memory.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -import logging -from contextlib import contextmanager -from functools import wraps -import torch - -__all__ = ["retry_if_cuda_oom"] - - -@contextmanager -def _ignore_torch_cuda_oom(): - """ - A context which ignores CUDA OOM exception from pytorch. - """ - try: - yield - except RuntimeError as e: - # NOTE: the string may change? - if "CUDA out of memory. " in str(e): - pass - else: - raise - - -def retry_if_cuda_oom(func): - """ - Makes a function retry itself after encountering - pytorch's CUDA OOM error. - It will first retry after calling `torch.cuda.empty_cache()`. - - If that still fails, it will then retry by trying to convert inputs to CPUs. - In this case, it expects the function to dispatch to CPU implementation. - The return values may become CPU tensors as well and it's user's - responsibility to convert it back to CUDA tensor if needed. - - Args: - func: a stateless callable that takes tensor-like objects as arguments - - Returns: - a callable which retries `func` if OOM is encountered. - - Examples: - :: - output = retry_if_cuda_oom(some_torch_function)(input1, input2) - # output may be on CPU even if inputs are on GPU - - Note: - 1. When converting inputs to CPU, it will only look at each argument and check - if it has `.device` and `.to` for conversion. Nested structures of tensors - are not supported. - - 2. Since the function might be called more than once, it has to be - stateless. - """ - - def maybe_to_cpu(x): - try: - like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to") - except AttributeError: - like_gpu_tensor = False - if like_gpu_tensor: - return x.to(device="cpu") - else: - return x - - @wraps(func) - def wrapped(*args, **kwargs): - with _ignore_torch_cuda_oom(): - return func(*args, **kwargs) - - # Clear cache and retry - torch.cuda.empty_cache() - with _ignore_torch_cuda_oom(): - return func(*args, **kwargs) - - # Try on CPU. This slows down the code significantly, therefore print a notice. - logger = logging.getLogger(__name__) - logger.info("Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func))) - new_args = (maybe_to_cpu(x) for x in args) - new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()} - return func(*new_args, **new_kwargs) - - return wrapped diff --git a/spaces/TH5314/newbing/src/lib/bots/bing/index.ts b/spaces/TH5314/newbing/src/lib/bots/bing/index.ts deleted file mode 100644 index 6fd51ba48cbb1148f13d29e76960c092b807cfae..0000000000000000000000000000000000000000 --- a/spaces/TH5314/newbing/src/lib/bots/bing/index.ts +++ /dev/null @@ -1,426 +0,0 @@ -import { fetch, WebSocket, debug } from '@/lib/isomorphic' -import WebSocketAsPromised from 'websocket-as-promised' -import { - SendMessageParams, - BingConversationStyle, - ConversationResponse, - ChatResponseMessage, - ConversationInfo, - InvocationEventType, - ChatError, - ErrorCode, - ChatUpdateCompleteResponse, - ImageInfo, - KBlobResponse -} from './types' - -import { convertMessageToMarkdown, websocketUtils, streamAsyncIterable } from './utils' -import { WatchDog, createChunkDecoder } from '@/lib/utils' - -type Params = SendMessageParams<{ bingConversationStyle: BingConversationStyle }> - -const OPTIONS_SETS = [ - 'nlu_direct_response_filter', - 'deepleo', - 'disable_emoji_spoken_text', - 'responsible_ai_policy_235', - 'enablemm', - 'iycapbing', - 'iyxapbing', - 'objopinion', - 'rweasgv2', - 'dagslnv1', - 'dv3sugg', - 'autosave', - 'iyoloxap', - 'iyoloneutral', - 'clgalileo', - 'gencontentv3', -] - -export class BingWebBot { - protected conversationContext?: ConversationInfo - protected cookie: string - protected ua: string - protected endpoint = '' - private lastText = '' - private asyncTasks: Array> = [] - - constructor(opts: { - cookie: string - ua: string - bingConversationStyle?: BingConversationStyle - conversationContext?: ConversationInfo - }) { - const { cookie, ua, conversationContext } = opts - this.cookie = cookie?.includes(';') ? cookie : `_EDGE_V=1; _U=${cookie}` - this.ua = ua - this.conversationContext = conversationContext - } - - static buildChatRequest(conversation: ConversationInfo) { - const optionsSets = OPTIONS_SETS - if (conversation.conversationStyle === BingConversationStyle.Precise) { - optionsSets.push('h3precise') - } else if (conversation.conversationStyle === BingConversationStyle.Creative) { - optionsSets.push('h3imaginative') - } - return { - arguments: [ - { - source: 'cib', - optionsSets, - allowedMessageTypes: [ - 'ActionRequest', - 'Chat', - 'Context', - 'InternalSearchQuery', - 'InternalSearchResult', - 'Disengaged', - 'InternalLoaderMessage', - 'Progress', - 'RenderCardRequest', - 'SemanticSerp', - 'GenerateContentQuery', - 'SearchQuery', - ], - sliceIds: [ - 'winmuid1tf', - 'anssupfor_c', - 'imgchatgptv2', - 'tts2cf', - 'contansperf', - 'mlchatpc8500w', - 'mlchatpc2', - 'ctrlworkpay', - 'winshortmsgtf', - 'cibctrl', - 'sydtransctrl', - 'sydconfigoptc', - '0705trt4', - '517opinion', - '628ajcopus0', - '330uaugs0', - '529rwea', - '0626snptrcs0', - '424dagslnv1', - ], - isStartOfSession: conversation.invocationId === 0, - message: { - author: 'user', - inputMethod: 'Keyboard', - text: conversation.prompt, - imageUrl: conversation.imageUrl, - messageType: 'Chat', - }, - conversationId: conversation.conversationId, - conversationSignature: conversation.conversationSignature, - participant: { id: conversation.clientId }, - }, - ], - invocationId: conversation.invocationId.toString(), - target: 'chat', - type: InvocationEventType.StreamInvocation, - } - } - - async createConversation(): Promise { - const headers = { - 'Accept-Encoding': 'gzip, deflate, br, zsdch', - 'User-Agent': this.ua, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - cookie: this.cookie, - } - - let resp: ConversationResponse | undefined - try { - const response = await fetch(this.endpoint + '/api/create', { method: 'POST', headers, redirect: 'error', mode: 'cors', credentials: 'include' }) - if (response.status === 404) { - throw new ChatError('Not Found', ErrorCode.NOTFOUND_ERROR) - } - resp = await response.json() as ConversationResponse - } catch (err) { - console.error('create conversation error', err) - } - - if (!resp?.result) { - throw new ChatError('你的 VPS 或代理可能被封禁,如有疑问,请前往 https://github.com/weaigc/bingo 咨询', ErrorCode.UNKOWN_ERROR) - } - - const { value, message } = resp.result || {} - if (value !== 'Success') { - const errorMsg = `${value}: ${message}` - if (value === 'UnauthorizedRequest') { - throw new ChatError(errorMsg, ErrorCode.BING_UNAUTHORIZED) - } - if (value === 'Forbidden') { - throw new ChatError(errorMsg, ErrorCode.BING_FORBIDDEN) - } - throw new ChatError(errorMsg, ErrorCode.UNKOWN_ERROR) - } - return resp - } - - private async createContext(conversationStyle: BingConversationStyle) { - if (!this.conversationContext) { - const conversation = await this.createConversation() - this.conversationContext = { - conversationId: conversation.conversationId, - conversationSignature: conversation.conversationSignature, - clientId: conversation.clientId, - invocationId: 0, - conversationStyle, - prompt: '', - } - } - return this.conversationContext - } - - async sendMessage(params: Params) { - try { - await this.createContext(params.options.bingConversationStyle) - Object.assign(this.conversationContext!, { prompt: params.prompt, imageUrl: params.imageUrl }) - return this.sydneyProxy(params) - } catch (error) { - params.onEvent({ - type: 'ERROR', - error: error instanceof ChatError ? error : new ChatError('Catch Error', ErrorCode.UNKOWN_ERROR), - }) - } - } - - private async sydneyProxy(params: Params) { - const abortController = new AbortController() - const response = await fetch(this.endpoint + '/api/sydney', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - signal: abortController.signal, - body: JSON.stringify(this.conversationContext!) - }) - if (response.status !== 200) { - params.onEvent({ - type: 'ERROR', - error: new ChatError( - 'Unknown error', - ErrorCode.UNKOWN_ERROR, - ), - }) - } - params.signal?.addEventListener('abort', () => { - abortController.abort() - }) - - const textDecoder = createChunkDecoder() - for await (const chunk of streamAsyncIterable(response.body!)) { - this.parseEvents(params, websocketUtils.unpackMessage(textDecoder(chunk))) - } - } - - async sendWs() { - const wsConfig: ConstructorParameters[1] = { - packMessage: websocketUtils.packMessage, - unpackMessage: websocketUtils.unpackMessage, - createWebSocket: (url) => new WebSocket(url, { - headers: { - 'accept-language': 'zh-CN,zh;q=0.9', - 'cache-control': 'no-cache', - 'User-Agent': this.ua, - pragma: 'no-cache', - cookie: this.cookie, - } - }) - } - const wsp = new WebSocketAsPromised('wss://sydney.bing.com/sydney/ChatHub', wsConfig) - - wsp.open().then(() => { - wsp.sendPacked({ protocol: 'json', version: 1 }) - wsp.sendPacked({ type: 6 }) - wsp.sendPacked(BingWebBot.buildChatRequest(this.conversationContext!)) - }) - - return wsp - } - - private async useWs(params: Params) { - const wsp = await this.sendWs() - const watchDog = new WatchDog() - wsp.onUnpackedMessage.addListener((events) => { - watchDog.watch(() => { - wsp.sendPacked({ type: 6 }) - }) - this.parseEvents(params, events) - }) - - wsp.onClose.addListener(() => { - watchDog.reset() - params.onEvent({ type: 'DONE' }) - wsp.removeAllListeners() - }) - - params.signal?.addEventListener('abort', () => { - wsp.removeAllListeners() - wsp.close() - }) - } - - private async createImage(prompt: string, id: string) { - try { - const headers = { - 'Accept-Encoding': 'gzip, deflate, br, zsdch', - 'User-Agent': this.ua, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - cookie: this.cookie, - } - const query = new URLSearchParams({ - prompt, - id - }) - const response = await fetch(this.endpoint + '/api/image?' + query.toString(), - { - method: 'POST', - headers, - mode: 'cors', - credentials: 'include' - }) - .then(res => res.text()) - if (response) { - this.lastText += '\n' + response - } - } catch (err) { - console.error('Create Image Error', err) - } - } - - private buildKnowledgeApiPayload(imageUrl: string, conversationStyle: BingConversationStyle) { - const imageInfo: ImageInfo = {} - let imageBase64: string | undefined = undefined - const knowledgeRequest = { - imageInfo, - knowledgeRequest: { - invokedSkills: [ - 'ImageById' - ], - subscriptionId: 'Bing.Chat.Multimodal', - invokedSkillsRequestData: { - enableFaceBlur: true - }, - convoData: { - convoid: this.conversationContext?.conversationId, - convotone: conversationStyle, - } - }, - } - - if (imageUrl.startsWith('data:image/')) { - imageBase64 = imageUrl.replace('data:image/', ''); - const partIndex = imageBase64.indexOf(',') - if (partIndex) { - imageBase64 = imageBase64.substring(partIndex + 1) - } - } else { - imageInfo.url = imageUrl - } - return { knowledgeRequest, imageBase64 } - } - - async uploadImage(imageUrl: string, conversationStyle: BingConversationStyle = BingConversationStyle.Creative): Promise { - if (!imageUrl) { - return - } - await this.createContext(conversationStyle) - const payload = this.buildKnowledgeApiPayload(imageUrl, conversationStyle) - - const response = await fetch(this.endpoint + '/api/kblob', - { - headers: { - 'Content-Type': 'application/json', - }, - method: 'POST', - mode: 'cors', - credentials: 'include', - body: JSON.stringify(payload), - }) - .then(res => res.json()) - .catch(e => { - console.log('Error', e) - }) - return response - } - - private async generateContent(message: ChatResponseMessage) { - if (message.contentType === 'IMAGE') { - this.asyncTasks.push(this.createImage(message.text, message.messageId)) - } - } - - private async parseEvents(params: Params, events: any) { - const conversation = this.conversationContext! - - events?.forEach(async (event: ChatUpdateCompleteResponse) => { - debug('bing event', event) - if (event.type === 3) { - await Promise.all(this.asyncTasks) - this.asyncTasks = [] - params.onEvent({ type: 'UPDATE_ANSWER', data: { text: this.lastText } }) - params.onEvent({ type: 'DONE' }) - conversation.invocationId = parseInt(event.invocationId, 10) + 1 - } else if (event.type === 1) { - const messages = event.arguments[0].messages - if (messages) { - const text = convertMessageToMarkdown(messages[0]) - this.lastText = text - params.onEvent({ type: 'UPDATE_ANSWER', data: { text, spokenText: messages[0].text, throttling: event.arguments[0].throttling } }) - } - } else if (event.type === 2) { - const messages = event.item.messages as ChatResponseMessage[] | undefined - if (!messages) { - params.onEvent({ - type: 'ERROR', - error: new ChatError( - event.item.result.error || 'Unknown error', - event.item.result.value === 'Throttled' ? ErrorCode.THROTTLE_LIMIT - : event.item.result.value === 'CaptchaChallenge' ? (this.conversationContext?.conversationId?.includes('BingProdUnAuthenticatedUsers') ? ErrorCode.BING_UNAUTHORIZED : ErrorCode.BING_CAPTCHA) - : ErrorCode.UNKOWN_ERROR - ), - }) - return - } - const limited = messages.some((message) => - message.contentOrigin === 'TurnLimiter' - || message.messageType === 'Disengaged' - ) - if (limited) { - params.onEvent({ - type: 'ERROR', - error: new ChatError( - 'Sorry, you have reached chat limit in this conversation.', - ErrorCode.CONVERSATION_LIMIT, - ), - }) - return - } - - const lastMessage = event.item.messages.at(-1) as ChatResponseMessage - const specialMessage = event.item.messages.find(message => message.author === 'bot' && message.contentType === 'IMAGE') - if (specialMessage) { - this.generateContent(specialMessage) - } - - if (lastMessage) { - const text = convertMessageToMarkdown(lastMessage) - this.lastText = text - params.onEvent({ - type: 'UPDATE_ANSWER', - data: { text, throttling: event.item.throttling, suggestedResponses: lastMessage.suggestedResponses, sourceAttributions: lastMessage.sourceAttributions }, - }) - } - } - }) - } - - resetConversation() { - this.conversationContext = undefined - } -} diff --git a/spaces/TNR-5/netlist.v1/index.html b/spaces/TNR-5/netlist.v1/index.html deleted file mode 100644 index 15388ffe25e26693f2232ba80adc6f0d2caa5700..0000000000000000000000000000000000000000 --- a/spaces/TNR-5/netlist.v1/index.html +++ /dev/null @@ -1,12 +0,0 @@ - - NetList - - - - - - - - - \ No newline at end of file diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/_stack.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/_stack.py deleted file mode 100644 index 194564e761ddae165b39ef6598877e2e3820af0a..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/_stack.py +++ /dev/null @@ -1,16 +0,0 @@ -from typing import List, TypeVar - -T = TypeVar("T") - - -class Stack(List[T]): - """A small shim over builtin list.""" - - @property - def top(self) -> T: - """Get top of stack.""" - return self[-1] - - def push(self, item: T) -> None: - """Push an item on to the stack (append in stack nomenclature).""" - self.append(item) diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/packages/__init__.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/packages/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/bcppcompiler.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/bcppcompiler.py deleted file mode 100644 index ba45ea2b9500e62b8cf6786432336f5b1ddddec1..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/bcppcompiler.py +++ /dev/null @@ -1,401 +0,0 @@ -"""distutils.bcppcompiler - -Contains BorlandCCompiler, an implementation of the abstract CCompiler class -for the Borland C++ compiler. -""" - -# This implementation by Lyle Johnson, based on the original msvccompiler.py -# module and using the directions originally published by Gordon Williams. - -# XXX looks like there's a LOT of overlap between these two classes: -# someone should sit down and factor out the common code as -# WindowsCCompiler! --GPW - - -import os -import warnings - -from .errors import ( - DistutilsExecError, - CompileError, - LibError, - LinkError, - UnknownFileError, -) -from .ccompiler import CCompiler, gen_preprocess_options -from .file_util import write_file -from .dep_util import newer -from ._log import log - - -warnings.warn( - "bcppcompiler is deprecated and slated to be removed " - "in the future. Please discontinue use or file an issue " - "with pypa/distutils describing your use case.", - DeprecationWarning, -) - - -class BCPPCompiler(CCompiler): - """Concrete class that implements an interface to the Borland C/C++ - compiler, as defined by the CCompiler abstract class. - """ - - compiler_type = 'bcpp' - - # Just set this so CCompiler's constructor doesn't barf. We currently - # don't use the 'set_executables()' bureaucracy provided by CCompiler, - # as it really isn't necessary for this sort of single-compiler class. - # Would be nice to have a consistent interface with UnixCCompiler, - # though, so it's worth thinking about. - executables = {} - - # Private class data (need to distinguish C from C++ source for compiler) - _c_extensions = ['.c'] - _cpp_extensions = ['.cc', '.cpp', '.cxx'] - - # Needed for the filename generation methods provided by the - # base class, CCompiler. - src_extensions = _c_extensions + _cpp_extensions - obj_extension = '.obj' - static_lib_extension = '.lib' - shared_lib_extension = '.dll' - static_lib_format = shared_lib_format = '%s%s' - exe_extension = '.exe' - - def __init__(self, verbose=0, dry_run=0, force=0): - super().__init__(verbose, dry_run, force) - - # These executables are assumed to all be in the path. - # Borland doesn't seem to use any special registry settings to - # indicate their installation locations. - - self.cc = "bcc32.exe" - self.linker = "ilink32.exe" - self.lib = "tlib.exe" - - self.preprocess_options = None - self.compile_options = ['/tWM', '/O2', '/q', '/g0'] - self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0'] - - self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x'] - self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x'] - self.ldflags_static = [] - self.ldflags_exe = ['/Gn', '/q', '/x'] - self.ldflags_exe_debug = ['/Gn', '/q', '/x', '/r'] - - # -- Worker methods ------------------------------------------------ - - def compile( # noqa: C901 - self, - sources, - output_dir=None, - macros=None, - include_dirs=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - depends=None, - ): - macros, objects, extra_postargs, pp_opts, build = self._setup_compile( - output_dir, macros, include_dirs, sources, depends, extra_postargs - ) - compile_opts = extra_preargs or [] - compile_opts.append('-c') - if debug: - compile_opts.extend(self.compile_options_debug) - else: - compile_opts.extend(self.compile_options) - - for obj in objects: - try: - src, ext = build[obj] - except KeyError: - continue - # XXX why do the normpath here? - src = os.path.normpath(src) - obj = os.path.normpath(obj) - # XXX _setup_compile() did a mkpath() too but before the normpath. - # Is it possible to skip the normpath? - self.mkpath(os.path.dirname(obj)) - - if ext == '.res': - # This is already a binary file -- skip it. - continue # the 'for' loop - if ext == '.rc': - # This needs to be compiled to a .res file -- do it now. - try: - self.spawn(["brcc32", "-fo", obj, src]) - except DistutilsExecError as msg: - raise CompileError(msg) - continue # the 'for' loop - - # The next two are both for the real compiler. - if ext in self._c_extensions: - input_opt = "" - elif ext in self._cpp_extensions: - input_opt = "-P" - else: - # Unknown file type -- no extra options. The compiler - # will probably fail, but let it just in case this is a - # file the compiler recognizes even if we don't. - input_opt = "" - - output_opt = "-o" + obj - - # Compiler command line syntax is: "bcc32 [options] file(s)". - # Note that the source file names must appear at the end of - # the command line. - try: - self.spawn( - [self.cc] - + compile_opts - + pp_opts - + [input_opt, output_opt] - + extra_postargs - + [src] - ) - except DistutilsExecError as msg: - raise CompileError(msg) - - return objects - - # compile () - - def create_static_lib( - self, objects, output_libname, output_dir=None, debug=0, target_lang=None - ): - (objects, output_dir) = self._fix_object_args(objects, output_dir) - output_filename = self.library_filename(output_libname, output_dir=output_dir) - - if self._need_link(objects, output_filename): - lib_args = [output_filename, '/u'] + objects - if debug: - pass # XXX what goes here? - try: - self.spawn([self.lib] + lib_args) - except DistutilsExecError as msg: - raise LibError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - # create_static_lib () - - def link( # noqa: C901 - self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None, - ): - # XXX this ignores 'build_temp'! should follow the lead of - # msvccompiler.py - - (objects, output_dir) = self._fix_object_args(objects, output_dir) - (libraries, library_dirs, runtime_library_dirs) = self._fix_lib_args( - libraries, library_dirs, runtime_library_dirs - ) - - if runtime_library_dirs: - log.warning( - "I don't know what to do with 'runtime_library_dirs': %s", - str(runtime_library_dirs), - ) - - if output_dir is not None: - output_filename = os.path.join(output_dir, output_filename) - - if self._need_link(objects, output_filename): - # Figure out linker args based on type of target. - if target_desc == CCompiler.EXECUTABLE: - startup_obj = 'c0w32' - if debug: - ld_args = self.ldflags_exe_debug[:] - else: - ld_args = self.ldflags_exe[:] - else: - startup_obj = 'c0d32' - if debug: - ld_args = self.ldflags_shared_debug[:] - else: - ld_args = self.ldflags_shared[:] - - # Create a temporary exports file for use by the linker - if export_symbols is None: - def_file = '' - else: - head, tail = os.path.split(output_filename) - modname, ext = os.path.splitext(tail) - temp_dir = os.path.dirname(objects[0]) # preserve tree structure - def_file = os.path.join(temp_dir, '%s.def' % modname) - contents = ['EXPORTS'] - for sym in export_symbols or []: - contents.append(' {}=_{}'.format(sym, sym)) - self.execute(write_file, (def_file, contents), "writing %s" % def_file) - - # Borland C++ has problems with '/' in paths - objects2 = map(os.path.normpath, objects) - # split objects in .obj and .res files - # Borland C++ needs them at different positions in the command line - objects = [startup_obj] - resources = [] - for file in objects2: - (base, ext) = os.path.splitext(os.path.normcase(file)) - if ext == '.res': - resources.append(file) - else: - objects.append(file) - - for ell in library_dirs: - ld_args.append("/L%s" % os.path.normpath(ell)) - ld_args.append("/L.") # we sometimes use relative paths - - # list of object files - ld_args.extend(objects) - - # XXX the command-line syntax for Borland C++ is a bit wonky; - # certain filenames are jammed together in one big string, but - # comma-delimited. This doesn't mesh too well with the - # Unix-centric attitude (with a DOS/Windows quoting hack) of - # 'spawn()', so constructing the argument list is a bit - # awkward. Note that doing the obvious thing and jamming all - # the filenames and commas into one argument would be wrong, - # because 'spawn()' would quote any filenames with spaces in - # them. Arghghh!. Apparently it works fine as coded... - - # name of dll/exe file - ld_args.extend([',', output_filename]) - # no map file and start libraries - ld_args.append(',,') - - for lib in libraries: - # see if we find it and if there is a bcpp specific lib - # (xxx_bcpp.lib) - libfile = self.find_library_file(library_dirs, lib, debug) - if libfile is None: - ld_args.append(lib) - # probably a BCPP internal library -- don't warn - else: - # full name which prefers bcpp_xxx.lib over xxx.lib - ld_args.append(libfile) - - # some default libraries - ld_args.extend(('import32', 'cw32mt')) - - # def file for export symbols - ld_args.extend([',', def_file]) - # add resource files - ld_args.append(',') - ld_args.extend(resources) - - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - - self.mkpath(os.path.dirname(output_filename)) - try: - self.spawn([self.linker] + ld_args) - except DistutilsExecError as msg: - raise LinkError(msg) - - else: - log.debug("skipping %s (up-to-date)", output_filename) - - # link () - - # -- Miscellaneous methods ----------------------------------------- - - def find_library_file(self, dirs, lib, debug=0): - # List of effective library names to try, in order of preference: - # xxx_bcpp.lib is better than xxx.lib - # and xxx_d.lib is better than xxx.lib if debug is set - # - # The "_bcpp" suffix is to handle a Python installation for people - # with multiple compilers (primarily Distutils hackers, I suspect - # ;-). The idea is they'd have one static library for each - # compiler they care about, since (almost?) every Windows compiler - # seems to have a different format for static libraries. - if debug: - dlib = lib + "_d" - try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib) - else: - try_names = (lib + "_bcpp", lib) - - for dir in dirs: - for name in try_names: - libfile = os.path.join(dir, self.library_filename(name)) - if os.path.exists(libfile): - return libfile - else: - # Oops, didn't find it in *any* of 'dirs' - return None - - # overwrite the one from CCompiler to support rc and res-files - def object_filenames(self, source_filenames, strip_dir=0, output_dir=''): - if output_dir is None: - output_dir = '' - obj_names = [] - for src_name in source_filenames: - # use normcase to make sure '.rc' is really '.rc' and not '.RC' - (base, ext) = os.path.splitext(os.path.normcase(src_name)) - if ext not in (self.src_extensions + ['.rc', '.res']): - raise UnknownFileError( - "unknown file type '{}' (from '{}')".format(ext, src_name) - ) - if strip_dir: - base = os.path.basename(base) - if ext == '.res': - # these can go unchanged - obj_names.append(os.path.join(output_dir, base + ext)) - elif ext == '.rc': - # these need to be compiled to .res-files - obj_names.append(os.path.join(output_dir, base + '.res')) - else: - obj_names.append(os.path.join(output_dir, base + self.obj_extension)) - return obj_names - - # object_filenames () - - def preprocess( - self, - source, - output_file=None, - macros=None, - include_dirs=None, - extra_preargs=None, - extra_postargs=None, - ): - (_, macros, include_dirs) = self._fix_compile_args(None, macros, include_dirs) - pp_opts = gen_preprocess_options(macros, include_dirs) - pp_args = ['cpp32.exe'] + pp_opts - if output_file is not None: - pp_args.append('-o' + output_file) - if extra_preargs: - pp_args[:0] = extra_preargs - if extra_postargs: - pp_args.extend(extra_postargs) - pp_args.append(source) - - # We need to preprocess: either we're being forced to, or the - # source file is newer than the target (or the target doesn't - # exist). - if self.force or output_file is None or newer(source, output_file): - if output_file: - self.mkpath(os.path.dirname(output_file)) - try: - self.spawn(pp_args) - except DistutilsExecError as msg: - print(msg) - raise CompileError(msg) - - # preprocess() diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/importlib_metadata/_functools.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/importlib_metadata/_functools.py deleted file mode 100644 index 71f66bd03cb713a2190853bdf7170c4ea80d2425..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/importlib_metadata/_functools.py +++ /dev/null @@ -1,104 +0,0 @@ -import types -import functools - - -# from jaraco.functools 3.3 -def method_cache(method, cache_wrapper=None): - """ - Wrap lru_cache to support storing the cache data in the object instances. - - Abstracts the common paradigm where the method explicitly saves an - underscore-prefixed protected property on first call and returns that - subsequently. - - >>> class MyClass: - ... calls = 0 - ... - ... @method_cache - ... def method(self, value): - ... self.calls += 1 - ... return value - - >>> a = MyClass() - >>> a.method(3) - 3 - >>> for x in range(75): - ... res = a.method(x) - >>> a.calls - 75 - - Note that the apparent behavior will be exactly like that of lru_cache - except that the cache is stored on each instance, so values in one - instance will not flush values from another, and when an instance is - deleted, so are the cached values for that instance. - - >>> b = MyClass() - >>> for x in range(35): - ... res = b.method(x) - >>> b.calls - 35 - >>> a.method(0) - 0 - >>> a.calls - 75 - - Note that if method had been decorated with ``functools.lru_cache()``, - a.calls would have been 76 (due to the cached value of 0 having been - flushed by the 'b' instance). - - Clear the cache with ``.cache_clear()`` - - >>> a.method.cache_clear() - - Same for a method that hasn't yet been called. - - >>> c = MyClass() - >>> c.method.cache_clear() - - Another cache wrapper may be supplied: - - >>> cache = functools.lru_cache(maxsize=2) - >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache) - >>> a = MyClass() - >>> a.method2() - 3 - - Caution - do not subsequently wrap the method with another decorator, such - as ``@property``, which changes the semantics of the function. - - See also - http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/ - for another implementation and additional justification. - """ - cache_wrapper = cache_wrapper or functools.lru_cache() - - def wrapper(self, *args, **kwargs): - # it's the first call, replace the method with a cached, bound method - bound_method = types.MethodType(method, self) - cached_method = cache_wrapper(bound_method) - setattr(self, method.__name__, cached_method) - return cached_method(*args, **kwargs) - - # Support cache clear even before cache has been created. - wrapper.cache_clear = lambda: None - - return wrapper - - -# From jaraco.functools 3.3 -def pass_none(func): - """ - Wrap func so it's not called if its first param is None - - >>> print_text = pass_none(print) - >>> print_text('text') - text - >>> print_text(None) - """ - - @functools.wraps(func) - def wrapper(param, *args, **kwargs): - if param is not None: - return func(param, *args, **kwargs) - - return wrapper diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/export/flatten.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/export/flatten.py deleted file mode 100644 index f5ba4297567d650f147eebeed361e9d62fab899d..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/export/flatten.py +++ /dev/null @@ -1,330 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import collections -from dataclasses import dataclass -from typing import Callable, List, Optional, Tuple -import torch -from torch import nn - -from detectron2.structures import Boxes, Instances, ROIMasks -from detectron2.utils.registry import _convert_target_to_string, locate - -from .torchscript_patch import patch_builtin_len - - -@dataclass -class Schema: - """ - A Schema defines how to flatten a possibly hierarchical object into tuple of - primitive objects, so it can be used as inputs/outputs of PyTorch's tracing. - - PyTorch does not support tracing a function that produces rich output - structures (e.g. dict, Instances, Boxes). To trace such a function, we - flatten the rich object into tuple of tensors, and return this tuple of tensors - instead. Meanwhile, we also need to know how to "rebuild" the original object - from the flattened results, so we can evaluate the flattened results. - A Schema defines how to flatten an object, and while flattening it, it records - necessary schemas so that the object can be rebuilt using the flattened outputs. - - The flattened object and the schema object is returned by ``.flatten`` classmethod. - Then the original object can be rebuilt with the ``__call__`` method of schema. - - A Schema is a dataclass that can be serialized easily. - """ - - # inspired by FetchMapper in tensorflow/python/client/session.py - - @classmethod - def flatten(cls, obj): - raise NotImplementedError - - def __call__(self, values): - raise NotImplementedError - - @staticmethod - def _concat(values): - ret = () - sizes = [] - for v in values: - assert isinstance(v, tuple), "Flattened results must be a tuple" - ret = ret + v - sizes.append(len(v)) - return ret, sizes - - @staticmethod - def _split(values, sizes): - if len(sizes): - expected_len = sum(sizes) - assert ( - len(values) == expected_len - ), f"Values has length {len(values)} but expect length {expected_len}." - ret = [] - for k in range(len(sizes)): - begin, end = sum(sizes[:k]), sum(sizes[: k + 1]) - ret.append(values[begin:end]) - return ret - - -@dataclass -class ListSchema(Schema): - schemas: List[Schema] # the schemas that define how to flatten each element in the list - sizes: List[int] # the flattened length of each element - - def __call__(self, values): - values = self._split(values, self.sizes) - if len(values) != len(self.schemas): - raise ValueError( - f"Values has length {len(values)} but schemas " f"has length {len(self.schemas)}!" - ) - values = [m(v) for m, v in zip(self.schemas, values)] - return list(values) - - @classmethod - def flatten(cls, obj): - res = [flatten_to_tuple(k) for k in obj] - values, sizes = cls._concat([k[0] for k in res]) - return values, cls([k[1] for k in res], sizes) - - -@dataclass -class TupleSchema(ListSchema): - def __call__(self, values): - return tuple(super().__call__(values)) - - -@dataclass -class IdentitySchema(Schema): - def __call__(self, values): - return values[0] - - @classmethod - def flatten(cls, obj): - return (obj,), cls() - - -@dataclass -class DictSchema(ListSchema): - keys: List[str] - - def __call__(self, values): - values = super().__call__(values) - return dict(zip(self.keys, values)) - - @classmethod - def flatten(cls, obj): - for k in obj.keys(): - if not isinstance(k, str): - raise KeyError("Only support flattening dictionaries if keys are str.") - keys = sorted(obj.keys()) - values = [obj[k] for k in keys] - ret, schema = ListSchema.flatten(values) - return ret, cls(schema.schemas, schema.sizes, keys) - - -@dataclass -class InstancesSchema(DictSchema): - def __call__(self, values): - image_size, fields = values[-1], values[:-1] - fields = super().__call__(fields) - return Instances(image_size, **fields) - - @classmethod - def flatten(cls, obj): - ret, schema = super().flatten(obj.get_fields()) - size = obj.image_size - if not isinstance(size, torch.Tensor): - size = torch.tensor(size) - return ret + (size,), schema - - -@dataclass -class TensorWrapSchema(Schema): - """ - For classes that are simple wrapper of tensors, e.g. - Boxes, RotatedBoxes, BitMasks - """ - - class_name: str - - def __call__(self, values): - return locate(self.class_name)(values[0]) - - @classmethod - def flatten(cls, obj): - return (obj.tensor,), cls(_convert_target_to_string(type(obj))) - - -# if more custom structures needed in the future, can allow -# passing in extra schemas for custom types -def flatten_to_tuple(obj): - """ - Flatten an object so it can be used for PyTorch tracing. - Also returns how to rebuild the original object from the flattened outputs. - - Returns: - res (tuple): the flattened results that can be used as tracing outputs - schema: an object with a ``__call__`` method such that ``schema(res) == obj``. - It is a pure dataclass that can be serialized. - """ - schemas = [ - ((str, bytes), IdentitySchema), - (list, ListSchema), - (tuple, TupleSchema), - (collections.abc.Mapping, DictSchema), - (Instances, InstancesSchema), - ((Boxes, ROIMasks), TensorWrapSchema), - ] - for klass, schema in schemas: - if isinstance(obj, klass): - F = schema - break - else: - F = IdentitySchema - - return F.flatten(obj) - - -class TracingAdapter(nn.Module): - """ - A model may take rich input/output format (e.g. dict or custom classes), - but `torch.jit.trace` requires tuple of tensors as input/output. - This adapter flattens input/output format of a model so it becomes traceable. - - It also records the necessary schema to rebuild model's inputs/outputs from flattened - inputs/outputs. - - Example: - :: - outputs = model(inputs) # inputs/outputs may be rich structure - adapter = TracingAdapter(model, inputs) - - # can now trace the model, with adapter.flattened_inputs, or another - # tuple of tensors with the same length and meaning - traced = torch.jit.trace(adapter, adapter.flattened_inputs) - - # traced model can only produce flattened outputs (tuple of tensors) - flattened_outputs = traced(*adapter.flattened_inputs) - # adapter knows the schema to convert it back (new_outputs == outputs) - new_outputs = adapter.outputs_schema(flattened_outputs) - """ - - flattened_inputs: Tuple[torch.Tensor] = None - """ - Flattened version of inputs given to this class's constructor. - """ - - inputs_schema: Schema = None - """ - Schema of the inputs given to this class's constructor. - """ - - outputs_schema: Schema = None - """ - Schema of the output produced by calling the given model with inputs. - """ - - def __init__( - self, - model: nn.Module, - inputs, - inference_func: Optional[Callable] = None, - allow_non_tensor: bool = False, - ): - """ - Args: - model: an nn.Module - inputs: An input argument or a tuple of input arguments used to call model. - After flattening, it has to only consist of tensors. - inference_func: a callable that takes (model, *inputs), calls the - model with inputs, and return outputs. By default it - is ``lambda model, *inputs: model(*inputs)``. Can be override - if you need to call the model differently. - allow_non_tensor: allow inputs/outputs to contain non-tensor objects. - This option will filter out non-tensor objects to make the - model traceable, but ``inputs_schema``/``outputs_schema`` cannot be - used anymore because inputs/outputs cannot be rebuilt from pure tensors. - This is useful when you're only interested in the single trace of - execution (e.g. for flop count), but not interested in - generalizing the traced graph to new inputs. - """ - super().__init__() - if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)): - model = model.module - self.model = model - if not isinstance(inputs, tuple): - inputs = (inputs,) - self.inputs = inputs - self.allow_non_tensor = allow_non_tensor - - if inference_func is None: - inference_func = lambda model, *inputs: model(*inputs) # noqa - self.inference_func = inference_func - - self.flattened_inputs, self.inputs_schema = flatten_to_tuple(inputs) - - if all(isinstance(x, torch.Tensor) for x in self.flattened_inputs): - return - if self.allow_non_tensor: - self.flattened_inputs = tuple( - [x for x in self.flattened_inputs if isinstance(x, torch.Tensor)] - ) - self.inputs_schema = None - else: - for input in self.flattened_inputs: - if not isinstance(input, torch.Tensor): - raise ValueError( - "Inputs for tracing must only contain tensors. " - f"Got a {type(input)} instead." - ) - - def forward(self, *args: torch.Tensor): - with torch.no_grad(), patch_builtin_len(): - if self.inputs_schema is not None: - inputs_orig_format = self.inputs_schema(args) - else: - if len(args) != len(self.flattened_inputs) or any( - x is not y for x, y in zip(args, self.flattened_inputs) - ): - raise ValueError( - "TracingAdapter does not contain valid inputs_schema." - " So it cannot generalize to other inputs and must be" - " traced with `.flattened_inputs`." - ) - inputs_orig_format = self.inputs - - outputs = self.inference_func(self.model, *inputs_orig_format) - flattened_outputs, schema = flatten_to_tuple(outputs) - - flattened_output_tensors = tuple( - [x for x in flattened_outputs if isinstance(x, torch.Tensor)] - ) - if len(flattened_output_tensors) < len(flattened_outputs): - if self.allow_non_tensor: - flattened_outputs = flattened_output_tensors - self.outputs_schema = None - else: - raise ValueError( - "Model cannot be traced because some model outputs " - "cannot flatten to tensors." - ) - else: # schema is valid - if self.outputs_schema is None: - self.outputs_schema = schema - else: - assert self.outputs_schema == schema, ( - "Model should always return outputs with the same " - "structure so it can be traced!" - ) - return flattened_outputs - - def _create_wrapper(self, traced_model): - """ - Return a function that has an input/output interface the same as the - original model, but it calls the given traced model under the hood. - """ - - def forward(*args): - flattened_inputs, _ = flatten_to_tuple(args) - flattened_outputs = traced_model(*flattened_inputs) - return self.outputs_schema(flattened_outputs) - - return forward diff --git a/spaces/Treav/DICOMDeidentify2/README.md b/spaces/Treav/DICOMDeidentify2/README.md deleted file mode 100644 index 69e8de996d84a4e9c857cd4913d9d53868aec423..0000000000000000000000000000000000000000 --- a/spaces/Treav/DICOMDeidentify2/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: DICOMDeidentify -emoji: 💩 -colorFrom: green -colorTo: indigo -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -duplicated_from: Treav/DICOMDeidentify ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Tune-A-Video-library/Tune-A-Video-Training-UI/inference.py b/spaces/Tune-A-Video-library/Tune-A-Video-Training-UI/inference.py deleted file mode 100644 index 66d18915d746c32aeae6cf9643cd186bb950d23d..0000000000000000000000000000000000000000 --- a/spaces/Tune-A-Video-library/Tune-A-Video-Training-UI/inference.py +++ /dev/null @@ -1,104 +0,0 @@ -from __future__ import annotations - -import gc -import pathlib -import sys -import tempfile - -import gradio as gr -import imageio -import PIL.Image -import torch -from diffusers.utils.import_utils import is_xformers_available -from einops import rearrange -from huggingface_hub import ModelCard - -sys.path.append("Tune-A-Video") - -from tuneavideo.models.unet import UNet3DConditionModel -from tuneavideo.pipelines.pipeline_tuneavideo import TuneAVideoPipeline - - -class InferencePipeline: - def __init__(self, hf_token: str | None = None): - self.hf_token = hf_token - self.pipe = None - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - self.model_id = None - - def clear(self) -> None: - self.model_id = None - del self.pipe - self.pipe = None - torch.cuda.empty_cache() - gc.collect() - - @staticmethod - def check_if_model_is_local(model_id: str) -> bool: - return pathlib.Path(model_id).exists() - - @staticmethod - def get_model_card(model_id: str, hf_token: str | None = None) -> ModelCard: - if InferencePipeline.check_if_model_is_local(model_id): - card_path = (pathlib.Path(model_id) / "README.md").as_posix() - else: - card_path = model_id - return ModelCard.load(card_path, token=hf_token) - - @staticmethod - def get_base_model_info(model_id: str, hf_token: str | None = None) -> str: - card = InferencePipeline.get_model_card(model_id, hf_token) - return card.data.base_model - - def load_pipe(self, model_id: str) -> None: - if model_id == self.model_id: - return - base_model_id = self.get_base_model_info(model_id, self.hf_token) - unet = UNet3DConditionModel.from_pretrained( - model_id, subfolder="unet", torch_dtype=torch.float16, use_auth_token=self.hf_token - ) - pipe = TuneAVideoPipeline.from_pretrained( - base_model_id, unet=unet, torch_dtype=torch.float16, use_auth_token=self.hf_token - ) - pipe = pipe.to(self.device) - if is_xformers_available(): - pipe.unet.enable_xformers_memory_efficient_attention() - self.pipe = pipe - self.model_id = model_id # type: ignore - - def run( - self, - model_id: str, - prompt: str, - video_length: int, - fps: int, - seed: int, - n_steps: int, - guidance_scale: float, - ) -> PIL.Image.Image: - if not torch.cuda.is_available(): - raise gr.Error("CUDA is not available.") - - self.load_pipe(model_id) - - generator = torch.Generator(device=self.device).manual_seed(seed) - out = self.pipe( - prompt, - video_length=video_length, - width=512, - height=512, - num_inference_steps=n_steps, - guidance_scale=guidance_scale, - generator=generator, - ) # type: ignore - - frames = rearrange(out.videos[0], "c t h w -> t h w c") - frames = (frames * 255).to(torch.uint8).numpy() - - out_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) - writer = imageio.get_writer(out_file.name, fps=fps) - for frame in frames: - writer.append_data(frame) - writer.close() - - return out_file.name diff --git a/spaces/VaneM/text-to-image-es/README.md b/spaces/VaneM/text-to-image-es/README.md deleted file mode 100644 index 6575f34a39850bf1fceaaf8a4966ea89f4395314..0000000000000000000000000000000000000000 --- a/spaces/VaneM/text-to-image-es/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Text To Image Es -emoji: 🐠 -colorFrom: purple -colorTo: purple -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -license: unknown ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Violetmae14/Text-to-AnimeStudioVideo/README.md b/spaces/Violetmae14/Text-to-AnimeStudioVideo/README.md deleted file mode 100644 index a9082eef7dfee96fa1ef24e69f283d440327f765..0000000000000000000000000000000000000000 --- a/spaces/Violetmae14/Text-to-AnimeStudioVideo/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Text To AnimeStudioVideo -emoji: 🏃 -colorFrom: purple -colorTo: purple -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Voicemod/Speech-to-Speech/README.md b/spaces/Voicemod/Speech-to-Speech/README.md deleted file mode 100644 index 9165d9cc49fbe5f384f0a5c2bac387536368ef17..0000000000000000000000000000000000000000 --- a/spaces/Voicemod/Speech-to-Speech/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Voicemod's Speech to Speech -emoji: 🧌 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Vynock/rvc-wefu/infer_pack/transforms.py b/spaces/Vynock/rvc-wefu/infer_pack/transforms.py deleted file mode 100644 index a11f799e023864ff7082c1f49c0cc18351a13b47..0000000000000000000000000000000000000000 --- a/spaces/Vynock/rvc-wefu/infer_pack/transforms.py +++ /dev/null @@ -1,209 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = {"tails": tails, "tail_bound": tail_bound} - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1 - - -def unconstrained_rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails="linear", - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == "linear": - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError("{} tails are not implemented.".format(tails)) - - ( - outputs[inside_interval_mask], - logabsdet[inside_interval_mask], - ) = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, - right=tail_bound, - bottom=-tail_bound, - top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - ) - - return outputs, logabsdet - - -def rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0.0, - right=1.0, - bottom=0.0, - top=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError("Input to a transform is not within its domain") - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError("Minimal bin width too large for the number of bins") - if min_bin_height * num_bins > 1.0: - raise ValueError("Minimal bin height too large for the number of bins") - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) + input_heights * (input_delta - input_derivatives) - b = input_heights * input_derivatives - (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) - c = -input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * ( - input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta - ) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/WAT-ai-AA/stable-diffused-adversarial-attacks/attack.py b/spaces/WAT-ai-AA/stable-diffused-adversarial-attacks/attack.py deleted file mode 100644 index 5bd6216bbb632e2f00b6cf2b84dad2ee28b834e0..0000000000000000000000000000000000000000 --- a/spaces/WAT-ai-AA/stable-diffused-adversarial-attacks/attack.py +++ /dev/null @@ -1,59 +0,0 @@ -import torch -from torchvision import transforms - - -class Attack: - def __init__(self, pipe, classifer, device="cpu"): - self.device = device - self.pipe = pipe - self.generator = torch.Generator(device=self.device).manual_seed(1024) - self.classifer = classifer - - def __call__( - self, prompt, negative_prompt="", size=512, guidance_scale=8, epsilon=0 - ): - pipe_output = self.pipe( - prompt=prompt, # What to generate - negative_prompt=negative_prompt, # What NOT to generate - height=size, - width=size, # Specify the image size - guidance_scale=guidance_scale, # How strongly to follow the prompt - num_inference_steps=30, # How many steps to take - generator=self.generator, # Fixed random seed - ) - - # Resulting image: - init_image = pipe_output.images[0] - image = self.transform(init_image) - - image.requires_grad = True - - outputs = self.classifer(image).to(self.device) - - target = torch.tensor([0]).to(self.device) - - return ( - init_image, - self.untargeted_attack(image, outputs, target, epsilon), - ) - - def transform(self, image): - img_tfms = transforms.Compose( - [transforms.Resize(32), transforms.ToTensor()] - ) - image = img_tfms(image) - image = torch.unsqueeze(image, dim=0) - return image - - def untargeted_attack(self, image, pred, target, epsilon): - loss = torch.nn.functional.nll_loss(pred, target) - - self.classifer.zero_grad() - - loss.backward() - - gradient_sign = image.grad.data.sign() - perturbed_image = image + epsilon * gradient_sign - perturbed_image = torch.clamp(perturbed_image, 0, 1) - - return perturbed_image diff --git a/spaces/XzJosh/Eileen-Bert-VITS2/app.py b/spaces/XzJosh/Eileen-Bert-VITS2/app.py deleted file mode 100644 index e1b32f9d4b49c5624c6096755f01393ee226c70c..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Eileen-Bert-VITS2/app.py +++ /dev/null @@ -1,155 +0,0 @@ -import sys, os - -if sys.platform == "darwin": - os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" - -import logging - -logging.getLogger("numba").setLevel(logging.WARNING) -logging.getLogger("markdown_it").setLevel(logging.WARNING) -logging.getLogger("urllib3").setLevel(logging.WARNING) -logging.getLogger("matplotlib").setLevel(logging.WARNING) - -logging.basicConfig(level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s") - -logger = logging.getLogger(__name__) - -import torch -import argparse -import commons -import utils -from models import SynthesizerTrn -from text.symbols import symbols -from text import cleaned_text_to_sequence, get_bert -from text.cleaner import clean_text -import gradio as gr -import webbrowser - - -net_g = None - - -def get_text(text, language_str, hps): - norm_text, phone, tone, word2ph = clean_text(text, language_str) - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - - if hps.data.add_blank: - phone = commons.intersperse(phone, 0) - tone = commons.intersperse(tone, 0) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - bert = get_bert(norm_text, word2ph, language_str) - del word2ph - - assert bert.shape[-1] == len(phone) - - phone = torch.LongTensor(phone) - tone = torch.LongTensor(tone) - language = torch.LongTensor(language) - - return bert, phone, tone, language - -def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid): - global net_g - bert, phones, tones, lang_ids = get_text(text, "ZH", hps) - with torch.no_grad(): - x_tst=phones.to(device).unsqueeze(0) - tones=tones.to(device).unsqueeze(0) - lang_ids=lang_ids.to(device).unsqueeze(0) - bert = bert.to(device).unsqueeze(0) - x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device) - del phones - speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device) - audio = net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids, bert, sdp_ratio=sdp_ratio - , noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.cpu().float().numpy() - del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers - return audio - -def tts_fn(text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale): - with torch.no_grad(): - audio = infer(text, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, sid=speaker) - return "Success", (hps.data.sampling_rate, audio) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--model_dir", default="./logs/Eileen/G_4100.pth", help="path of your model") - parser.add_argument("--config_dir", default="./configs/config.json", help="path of your config file") - parser.add_argument("--share", default=False, help="make link public") - parser.add_argument("-d", "--debug", action="store_true", help="enable DEBUG-LEVEL log") - - args = parser.parse_args() - if args.debug: - logger.info("Enable DEBUG-LEVEL log") - logging.basicConfig(level=logging.DEBUG) - hps = utils.get_hparams_from_file(args.config_dir) - device = "cuda:0" if torch.cuda.is_available() else "cpu" - ''' - device = ( - "cuda:0" - if torch.cuda.is_available() - else ( - "mps" - if sys.platform == "darwin" and torch.backends.mps.is_available() - else "cpu" - ) - ) - ''' - net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model).to(device) - _ = net_g.eval() - - _ = utils.load_checkpoint(args.model_dir, net_g, None, skip_optimizer=True) - - speaker_ids = hps.data.spk2id - speakers = list(speaker_ids.keys()) - with gr.Blocks() as app: - with gr.Row(): - with gr.Column(): - gr.Markdown(value=""" - 【AI乃琳】在线语音合成(Bert-Vits2)\n - 作者:Xz乔希 https://space.bilibili.com/5859321\n - 声音归属:乃琳Queen https://space.bilibili.com/672342685\n - Bert-VITS2项目:https://github.com/Stardust-minus/Bert-VITS2\n - 使用本模型请严格遵守法律法规!\n - 发布二创作品请标注本项目作者及链接、作品使用Bert-VITS2 AI生成!\n - """) - text = gr.TextArea(label="Text", placeholder="Input Text Here", - value="大家好,我是诶嗖的乃琳艾琳哟") - speaker = gr.Dropdown(choices=speakers, value=speakers[0], label='Speaker') - sdp_ratio = gr.Slider(minimum=0.1, maximum=1, value=0.2, step=0.01, label='SDP/DP混合比') - noise_scale = gr.Slider(minimum=0.1, maximum=1, value=0.5, step=0.01, label='感情调节') - noise_scale_w = gr.Slider(minimum=0.1, maximum=1, value=0.9, step=0.01, label='音素长度') - length_scale = gr.Slider(minimum=0.1, maximum=2, value=1, step=0.01, label='生成长度') - btn = gr.Button("点击生成", variant="primary") - with gr.Column(): - text_output = gr.Textbox(label="Message") - audio_output = gr.Audio(label="Output Audio") - gr.Markdown(value=""" - 【AI塔菲】https://huggingface.co/spaces/XzJosh/Taffy-Bert-VITS2\n - 【AI东雪莲】https://huggingface.co/spaces/XzJosh/Azuma-Bert-VITS2\n - 【AI奶绿】https://huggingface.co/spaces/XzJosh/LAPLACE-Bert-VITS2\n - 【AI尼奈】https://huggingface.co/spaces/XzJosh/nine1-Bert-VITS2\n - 【AI珈乐】https://huggingface.co/spaces/XzJosh/Carol-Bert-VITS2\n - 【AI电棍】https://huggingface.co/spaces/XzJosh/otto-Bert-VITS2\n - 【AI七海】https://huggingface.co/spaces/XzJosh/Nana7mi-Bert-VITS2\n - 【AI阿梓】https://huggingface.co/spaces/XzJosh/Azusa-Bert-VITS2\n - 【AI星瞳】https://huggingface.co/spaces/XzJosh/XingTong-Bert-VITS2\n - 【AI向晚】https://huggingface.co/spaces/XzJosh/Ava-Bert-VITS2\n - 【AI嘉然】https://huggingface.co/spaces/XzJosh/Diana-Bert-VITS2\n - 【AI剑魔】https://huggingface.co/spaces/XzJosh/Aatrox-Bert-VITS2\n - """) - btn.click(tts_fn, - inputs=[text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale], - outputs=[text_output, audio_output]) - -# webbrowser.open("http://127.0.0.1:6006") -# app.launch(server_port=6006, show_error=True) - - app.launch(show_error=True) diff --git a/spaces/XzJosh/LAPLACE-Bert-VITS2/attentions.py b/spaces/XzJosh/LAPLACE-Bert-VITS2/attentions.py deleted file mode 100644 index 1192dd7268c20c11010e73a6017ed09549695afe..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/LAPLACE-Bert-VITS2/attentions.py +++ /dev/null @@ -1,344 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import logging - -logger = logging.getLogger(__name__) - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, isflow = True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - #if isflow: - # cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1) - # self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1) - # self.cond_layer = weight_norm(cond_layer, name='weight') - # self.gin_channels = 256 - self.cond_layer_idx = self.n_layers - if 'gin_channels' in kwargs: - self.gin_channels = kwargs['gin_channels'] - if self.gin_channels != 0: - self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels) - # vits2 says 3rd block, so idx is 2 by default - self.cond_layer_idx = kwargs['cond_layer_idx'] if 'cond_layer_idx' in kwargs else 2 - logging.debug(self.gin_channels, self.cond_layer_idx) - assert self.cond_layer_idx < self.n_layers, 'cond_layer_idx should be less than n_layers' - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - def forward(self, x, x_mask, g=None): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - if i == self.cond_layer_idx and g is not None: - g = self.spk_emb_linear(g.transpose(1, 2)) - g = g.transpose(1, 2) - x = x + g - x = x * x_mask - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/YONG627/456123/yolov5-code-main/utils/segment/metrics.py b/spaces/YONG627/456123/yolov5-code-main/utils/segment/metrics.py deleted file mode 100644 index c9f137e38ead20b9e44dbe58d3e69cf616b99c9e..0000000000000000000000000000000000000000 --- a/spaces/YONG627/456123/yolov5-code-main/utils/segment/metrics.py +++ /dev/null @@ -1,210 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Model validation metrics -""" - -import numpy as np - -from ..metrics import ap_per_class - - -def fitness(x): - # Model fitness as a weighted combination of metrics - w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9] - return (x[:, :8] * w).sum(1) - - -def ap_per_class_box_and_mask( - tp_m, - tp_b, - conf, - pred_cls, - target_cls, - plot=False, - save_dir='.', - names=(), -): - """ - Args: - tp_b: tp of boxes. - tp_m: tp of masks. - other arguments see `func: ap_per_class`. - """ - results_boxes = ap_per_class(tp_b, - conf, - pred_cls, - target_cls, - plot=plot, - save_dir=save_dir, - names=names, - prefix='Box')[2:] - results_masks = ap_per_class(tp_m, - conf, - pred_cls, - target_cls, - plot=plot, - save_dir=save_dir, - names=names, - prefix='Mask')[2:] - - results = { - 'boxes': { - 'p': results_boxes[0], - 'r': results_boxes[1], - 'ap': results_boxes[3], - 'f1': results_boxes[2], - 'ap_class': results_boxes[4]}, - 'masks': { - 'p': results_masks[0], - 'r': results_masks[1], - 'ap': results_masks[3], - 'f1': results_masks[2], - 'ap_class': results_masks[4]}} - return results - - -class Metric: - - def __init__(self) -> None: - self.p = [] # (nc, ) - self.r = [] # (nc, ) - self.f1 = [] # (nc, ) - self.all_ap = [] # (nc, 10) - self.ap_class_index = [] # (nc, ) - - @property - def ap50(self): - """AP@0.5 of all classes. - Return: - (nc, ) or []. - """ - return self.all_ap[:, 0] if len(self.all_ap) else [] - - @property - def ap(self): - """AP@0.5:0.95 - Return: - (nc, ) or []. - """ - return self.all_ap.mean(1) if len(self.all_ap) else [] - - @property - def mp(self): - """mean precision of all classes. - Return: - float. - """ - return self.p.mean() if len(self.p) else 0.0 - - @property - def mr(self): - """mean recall of all classes. - Return: - float. - """ - return self.r.mean() if len(self.r) else 0.0 - - @property - def map50(self): - """Mean AP@0.5 of all classes. - Return: - float. - """ - return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0 - - @property - def map(self): - """Mean AP@0.5:0.95 of all classes. - Return: - float. - """ - return self.all_ap.mean() if len(self.all_ap) else 0.0 - - def mean_results(self): - """Mean of results, return mp, mr, map50, map""" - return (self.mp, self.mr, self.map50, self.map) - - def class_result(self, i): - """class-aware result, return p[i], r[i], ap50[i], ap[i]""" - return (self.p[i], self.r[i], self.ap50[i], self.ap[i]) - - def get_maps(self, nc): - maps = np.zeros(nc) + self.map - for i, c in enumerate(self.ap_class_index): - maps[c] = self.ap[i] - return maps - - def update(self, results): - """ - Args: - results: tuple(p, r, ap, f1, ap_class) - """ - p, r, all_ap, f1, ap_class_index = results - self.p = p - self.r = r - self.all_ap = all_ap - self.f1 = f1 - self.ap_class_index = ap_class_index - - -class Metrics: - """Metric for boxes and masks.""" - - def __init__(self) -> None: - self.metric_box = Metric() - self.metric_mask = Metric() - - def update(self, results): - """ - Args: - results: Dict{'boxes': Dict{}, 'masks': Dict{}} - """ - self.metric_box.update(list(results['boxes'].values())) - self.metric_mask.update(list(results['masks'].values())) - - def mean_results(self): - return self.metric_box.mean_results() + self.metric_mask.mean_results() - - def class_result(self, i): - return self.metric_box.class_result(i) + self.metric_mask.class_result(i) - - def get_maps(self, nc): - return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc) - - @property - def ap_class_index(self): - # boxes and masks have the same ap_class_index - return self.metric_box.ap_class_index - - -KEYS = [ - 'train/box_loss', - 'train/seg_loss', # train loss - 'train/obj_loss', - 'train/cls_loss', - 'metrics/precision(B)', - 'metrics/recall(B)', - 'metrics/mAP_0.5(B)', - 'metrics/mAP_0.5:0.95(B)', # metrics - 'metrics/precision(M)', - 'metrics/recall(M)', - 'metrics/mAP_0.5(M)', - 'metrics/mAP_0.5:0.95(M)', # metrics - 'val/box_loss', - 'val/seg_loss', # val loss - 'val/obj_loss', - 'val/cls_loss', - 'x/lr0', - 'x/lr1', - 'x/lr2',] - -BEST_KEYS = [ - 'best/epoch', - 'best/precision(B)', - 'best/recall(B)', - 'best/mAP_0.5(B)', - 'best/mAP_0.5:0.95(B)', - 'best/precision(M)', - 'best/recall(M)', - 'best/mAP_0.5(M)', - 'best/mAP_0.5:0.95(M)',] diff --git a/spaces/YouLiXiya/Mobile-SAM/GroundingDINO/groundingdino/version.py b/spaces/YouLiXiya/Mobile-SAM/GroundingDINO/groundingdino/version.py deleted file mode 100644 index b794fd409a5e3b3b65ad76a43d6a01a318877640..0000000000000000000000000000000000000000 --- a/spaces/YouLiXiya/Mobile-SAM/GroundingDINO/groundingdino/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = '0.1.0' diff --git a/spaces/YuAnthony/Audio-Caption/eval_metrics.py b/spaces/YuAnthony/Audio-Caption/eval_metrics.py deleted file mode 100644 index ea7dd5235d0e797c2964f51e343e384125c761f2..0000000000000000000000000000000000000000 --- a/spaces/YuAnthony/Audio-Caption/eval_metrics.py +++ /dev/null @@ -1,305 +0,0 @@ -#!/usr/bin/env python - -from pathlib import Path -import json -import csv -from typing import Dict, List, Union, Tuple, Any - -from coco_caption.pycocotools.coco import COCO -from coco_caption.pycocoevalcap.eval import COCOEvalCap - -__author__ = 'Samuel Lipping -- Tampere University' -__docformat__ = 'reStructuredText' -__all__ = ['evaluate_metrics'] - - -def write_json(data: Union[List[Dict[str, Any]], Dict[str, Any]], - path: Path) \ - -> None: - """ Write a dict or a list of dicts into a JSON file - - :param data: Data to write - :type data: list[dict[str, any]] | dict[str, any] - :param path: Path to the output file - :type path: Path - """ - with path.open("w") as f: - json.dump(data, f) - - -def reformat_to_coco(predictions: List[str], - ground_truths: List[List[str]], - ids: Union[List[int], None] = None) \ - -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: - """ Reformat annotations to the MSCOCO format - - :param predictions: List of predicted captions - :type predictions: list[str] - :param ground_truths: List of lists of reference captions - :type ground_truths: list[list[str]] - :param ids: List of file IDs. If not given, a running integer\ - is used - :type ids: list[int] | None - :return: Predictions and reference captions in the MSCOCO format - :rtype: list[dict[str, any]] - """ - # Running number as ids for files if not given - if ids is None: - ids = range(len(predictions)) - - # Captions need to be in format - # [{ - # "audio_id": : int, - # "caption" : str - # ]}, - # as per the COCO results format. - pred = [] - ref = { - 'info': {'description': 'Clotho reference captions (2019)'}, - 'audio samples': [], - 'licenses': [ - {'id': 1}, - {'id': 2}, - {'id': 3} - ], - 'type': 'captions', - 'annotations': [] - } - cap_id = 0 - for audio_id, p, gt in zip(ids, predictions, ground_truths): - p = p[0] if isinstance(p, list) else p - pred.append({ - 'audio_id': audio_id, - 'caption': p - }) - - ref['audio samples'].append({ - 'id': audio_id - }) - - for cap in gt: - ref['annotations'].append({ - 'audio_id': audio_id, - 'id': cap_id, - 'caption': cap - }) - cap_id += 1 - - return pred, ref - - -def evaluate_metrics_from_files(pred_file: Union[Path, str], - ref_file: Union[Path, str]) \ - -> Tuple[Dict[str, float], Dict[int, Dict[str, float]]]: - """ Evaluate the translation metrics from annotation files with the coco lib - Follows the example in the repo. - - :param pred_file: File with predicted captions - :type pred_file: Path | str - :param ref_file: File with reference captions - :type ref_file: Path | str - :return: Tuple with metrics for the whole dataset and per-file metrics - :rtype: tuple[dict[str, float], dict[int, dict[str, float]]] - """ - # Load annotations from files - coco = COCO(str(ref_file)) - cocoRes = coco.loadRes(str(pred_file)) - - # Create evaluation object and evaluate metrics - cocoEval = COCOEvalCap(coco, cocoRes) - cocoEval.params['audio_id'] = cocoRes.getAudioIds() - cocoEval.evaluate() - - # Make dict from metrics - metrics = dict( - (m, s) for m, s in cocoEval.eval.items() - ) - return metrics, cocoEval.audioToEval - - -def evaluate_metrics_from_lists(predictions: List[str], - ground_truths: List[List[str]], - ids: Union[List[int], None] = None) \ - -> Tuple[Dict[str, float], Dict[int, Dict[str, float]]]: - """Evaluate metrics from lists of predictions and ground truths - - :param predictions: List of prediction captions - :type predictions: list[str] - :param ground_truths: List of lists of reference captions \ - (one five-caption list per file) - :type ground_truths: list[list[str]] - :param ids: Ids for the audio files. If not given, a running \ - integer is used - :type ids: list[int] | None - :return: Tuple with metrics for the whole dataset and per-file \ - metrics - :rtype: tuple[dict[str, float], dict[int, dict[str, float]]] - """ - assert(len(predictions) == len(ground_truths)) - assert(all([len(i) == 5 for i in ground_truths])) - - # Running int for id if not given - if ids is None: - ids = range(len(predictions)) - - # Captions need to be in format - # [{ - # "audio_id": : int, - # "caption" : str - # ]}, - # as per the COCO results format. - pred, ref = reformat_to_coco(predictions, ground_truths, ids) - - # Write temporary files for the metric evaluation - tmp_dir = Path('tmp') - - if not tmp_dir.is_dir(): - tmp_dir.mkdir() - - ref_file = tmp_dir.joinpath('ref.json') - pred_file = tmp_dir.joinpath('pred.json') - - write_json(ref, ref_file) - write_json(pred, pred_file) - - metrics, per_file_metrics = evaluate_metrics_from_files( - pred_file, ref_file) - - # Delete temporary files - ref_file.unlink() - pred_file.unlink() - - return metrics, per_file_metrics - - -def check_and_read_csv(path: Union[str, Path, List[Dict[str, str]]]) \ - -> List[Dict[str, str]]: - """ If input is a file path, returns the data as a list of dicts \ - (as returned by DictReader) Otherwise just returns the input - - :param path: Input file or its contents (as given by DictReader). - :type path: Path | str | list[dict[str, str]] - :return: File contents. - :rtype: list[dict[str, str]] - """ - if not isinstance(path, list): - if isinstance(path, str): - path = Path(path) - - with path.open('r') as f: - reader = csv.DictReader(f, dialect='unix') - - result = [row for row in reader] - else: - result = path - - return result - - -def combine_single_and_per_file_metrics(single_metrics: Dict[str, float], - per_file_metrics: Dict[int, Dict[str, float]], - file_names: List[str]) \ - -> Dict[str, Dict[str, Any]]: - """ Reformat single (one for whole dataset) and per-file metrics into - { - :{ - 'score': , - 'scores': { - : - } - } - } - - :param single_metrics: Evaluated single metrics - :type single_metrics: dict[str, float] - :param per_file_metrics: Evaluated per-file metrics - :type per_file_metrics: dict[int, dict[str, float]] - :param file_names: List of file names in the order they were given \ - to the metric evaluator - :type file_names: list[str] - :return: Evaluated metrics in one data structure - :rtype: dict[str, dict[str, any]] - """ - total_metrics = {} - for metric, score in single_metrics.items(): - total_metrics[metric] = { - 'score': score, - 'scores': {} - } - for file_idx, metric_dict in per_file_metrics.items(): - file_name = file_names[file_idx] - for metric in total_metrics.keys(): - if metric == 'SPICE': - value = metric_dict[metric]['All']['f'] - else: - value = metric_dict[metric] - total_metrics[metric]['scores'][file_name] = value - - return total_metrics - - -def evaluate_metrics(prediction_file: Union[str, Path, List[Dict[str, str]]], - reference_file: Union[str, Path, List[Dict[str, str]]], - nb_reference_captions: int = 5) \ - -> Dict[str, Dict[str, Union[float, Dict[str, float]]]]: - """ Evaluates metrics from the predictions and reference captions. - - Evaluates BLEU1-4, CIDEr, METEOR, ROUGE_L, SPICE, and SPIDEr using - code from https://github.com/tylin/coco-caption - - :param prediction_file: Input file (or file contents, as given by DictReader) \ - with predicted captions - :type prediction_file: Path | str | list[dict[str, str]] - :param reference_file: Input file (or file contents, as given by DictReader) \ - with reference captions - :type reference_file: Path | str | list[dict[str, str]] - :param nb_reference_captions: Number of reference captions - :type nb_reference_captions: int - :return: A dict with keys the names of the metrics. Each metric\ - has as value a dict, with keys `score` and `scores`. The\ - `score` key, has as a value the score of the corresponding\ - metric, for the whole set of files. The `scores` keys, has\ - as a value, a dict with keys the file names of the files, and\ - values the value of the score for the corresponding file. - :rtype: dict[str, dict[str, float|dict[str, float]] - """ - prediction_file = check_and_read_csv(prediction_file) - reference_file = check_and_read_csv(reference_file) - - prediction_file.sort(key=lambda row: row['file_name']) - reference_file.sort(key=lambda row: row['file_name']) - - # Make reference file contents indexable by file name - reference_dict = {} - for row in reference_file: - reference_dict[row['file_name']] = row - - # Make sure that all the files in the prediction file exist also in the reference file - file_names = [row['file_name'] for row in prediction_file] - assert( - all( - file_name in reference_dict for file_name in file_names - ) - ) - - predictions = [] - ground_truths = [] - for row in prediction_file: - file_name = row['file_name'] - predictions.append(row['caption_predicted']) - - cap_names = ['caption_reference_{:02d}'.format(i) for i in range(1, nb_reference_captions+1)] - - ground_truths.append([reference_dict[file_name][cap] for cap in cap_names]) - - metrics, per_file_metrics = evaluate_metrics_from_lists(predictions, ground_truths) - - total_metrics = combine_single_and_per_file_metrics( - metrics, per_file_metrics, file_names - ) - - return { - key.lower(): value for key, value in total_metrics.items() - } - -# EOF diff --git a/spaces/a-v-bely/russian-task-generator/utilities_cookies/encrypted_cookie_manager.py b/spaces/a-v-bely/russian-task-generator/utilities_cookies/encrypted_cookie_manager.py deleted file mode 100644 index b7688afce32eaeb7b1145b40472ba9bde0213811..0000000000000000000000000000000000000000 --- a/spaces/a-v-bely/russian-task-generator/utilities_cookies/encrypted_cookie_manager.py +++ /dev/null @@ -1,115 +0,0 @@ -import os -import base64 -import streamlit as st -from typing import Tuple -from typing import Optional -from cryptography import fernet -from typing import MutableMapping -from cryptography.fernet import Fernet -from cryptography.hazmat.primitives import hashes -from utilities_cookies.cookie_manager import CookieManager -from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC - - -@st.cache_data -def key_from_parameters(salt: bytes, iterations: int, password: str): - kdf = PBKDF2HMAC( - algorithm=hashes.SHA256(), - length=32, - salt=salt, - iterations=iterations, - ) - - return base64.urlsafe_b64encode(kdf.derive(password.encode('utf-8'))) - - -class EncryptedCookieManager(MutableMapping[str, str]): - def __init__( - self, *, - password: str, - path: str = None, - prefix: str = "", - key_params_cookie="EncryptedCookieManager.key_params", - ignore_broken=True, - ): - self._cookie_manager = CookieManager(path=path, prefix=prefix) - self._fernet: Optional[Fernet] = None - self._key_params_cookie = key_params_cookie - self._password = password - self._ignore_broken = ignore_broken - - def ready(self): - return self._cookie_manager.ready() - - def save(self): - return self._cookie_manager.save() - - def _encrypt(self, value): - self._setup_fernet() - return self._fernet.encrypt(value) - - def _decrypt(self, value): - self._setup_fernet() - return self._fernet.decrypt(value) - - def _setup_fernet(self): - if self._fernet is not None: - return - key_params = self._get_key_params() - if not key_params: - key_params = self._initialize_new_key_params() - salt, iterations, magic = key_params - key = key_from_parameters( - salt=salt, - iterations=iterations, - password=self._password - ) - - self._fernet = Fernet(key) - - def _get_key_params(self) -> Optional[Tuple[bytes, int, bytes]]: - raw_key_params = self._cookie_manager.get(self._key_params_cookie) - if not raw_key_params: - return - try: - raw_salt, raw_iterations, raw_magic = raw_key_params.split(':') - return base64.b64decode(raw_salt), int(raw_iterations), base64.b64decode(raw_magic) - except (ValueError, TypeError): - print(f"Failed to parse key parameters from cookie {raw_key_params}") - return - - def _initialize_new_key_params(self) -> Tuple[bytes, int, bytes]: - salt = os.urandom(16) - iterations = 390000 - magic = os.urandom(16) - self._cookie_manager[self._key_params_cookie] = b':'.join([ - base64.b64encode(salt), - str(iterations).encode('ascii'), - base64.b64encode(magic) - ]).decode('ascii') - return salt, iterations, magic - - def __repr__(self): - if self.ready(): - return f'' - return '' - - def __getitem__(self, k: str) -> str or None: - try: - return self._decrypt(self._cookie_manager[k].encode('utf-8')).decode('utf-8') - except fernet.InvalidToken: - if self._ignore_broken: - return - raise - - def __iter__(self): - return iter(self._cookie_manager) - - def __len__(self): - return len(self._cookie_manager) - - def __setitem__(self, key: str, value: str) -> None: - self._cookie_manager[key] = self._encrypt(value.encode('utf-8')).decode('utf-8') - - def __delitem__(self, key: str) -> None: - del self._cookie_manager[key] diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/runner/base_module.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/runner/base_module.py deleted file mode 100644 index 617fad9bb89f10a9a0911d962dfb3bc8f3a3628c..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/runner/base_module.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import warnings -from abc import ABCMeta -from collections import defaultdict -from logging import FileHandler - -import torch.nn as nn - -from annotator.uniformer.mmcv.runner.dist_utils import master_only -from annotator.uniformer.mmcv.utils.logging import get_logger, logger_initialized, print_log - - -class BaseModule(nn.Module, metaclass=ABCMeta): - """Base module for all modules in openmmlab. - - ``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional - functionality of parameter initialization. Compared with - ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes. - - - ``init_cfg``: the config to control the initialization. - - ``init_weights``: The function of parameter - initialization and recording initialization - information. - - ``_params_init_info``: Used to track the parameter - initialization information. This attribute only - exists during executing the ``init_weights``. - - Args: - init_cfg (dict, optional): Initialization config dict. - """ - - def __init__(self, init_cfg=None): - """Initialize BaseModule, inherited from `torch.nn.Module`""" - - # NOTE init_cfg can be defined in different levels, but init_cfg - # in low levels has a higher priority. - - super(BaseModule, self).__init__() - # define default value of init_cfg instead of hard code - # in init_weights() function - self._is_init = False - - self.init_cfg = copy.deepcopy(init_cfg) - - # Backward compatibility in derived classes - # if pretrained is not None: - # warnings.warn('DeprecationWarning: pretrained is a deprecated \ - # key, please consider using init_cfg') - # self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) - - @property - def is_init(self): - return self._is_init - - def init_weights(self): - """Initialize the weights.""" - - is_top_level_module = False - # check if it is top-level module - if not hasattr(self, '_params_init_info'): - # The `_params_init_info` is used to record the initialization - # information of the parameters - # the key should be the obj:`nn.Parameter` of model and the value - # should be a dict containing - # - init_info (str): The string that describes the initialization. - # - tmp_mean_value (FloatTensor): The mean of the parameter, - # which indicates whether the parameter has been modified. - # this attribute would be deleted after all parameters - # is initialized. - self._params_init_info = defaultdict(dict) - is_top_level_module = True - - # Initialize the `_params_init_info`, - # When detecting the `tmp_mean_value` of - # the corresponding parameter is changed, update related - # initialization information - for name, param in self.named_parameters(): - self._params_init_info[param][ - 'init_info'] = f'The value is the same before and ' \ - f'after calling `init_weights` ' \ - f'of {self.__class__.__name__} ' - self._params_init_info[param][ - 'tmp_mean_value'] = param.data.mean() - - # pass `params_init_info` to all submodules - # All submodules share the same `params_init_info`, - # so it will be updated when parameters are - # modified at any level of the model. - for sub_module in self.modules(): - sub_module._params_init_info = self._params_init_info - - # Get the initialized logger, if not exist, - # create a logger named `mmcv` - logger_names = list(logger_initialized.keys()) - logger_name = logger_names[0] if logger_names else 'mmcv' - - from ..cnn import initialize - from ..cnn.utils.weight_init import update_init_info - module_name = self.__class__.__name__ - if not self._is_init: - if self.init_cfg: - print_log( - f'initialize {module_name} with init_cfg {self.init_cfg}', - logger=logger_name) - initialize(self, self.init_cfg) - if isinstance(self.init_cfg, dict): - # prevent the parameters of - # the pre-trained model - # from being overwritten by - # the `init_weights` - if self.init_cfg['type'] == 'Pretrained': - return - - for m in self.children(): - if hasattr(m, 'init_weights'): - m.init_weights() - # users may overload the `init_weights` - update_init_info( - m, - init_info=f'Initialized by ' - f'user-defined `init_weights`' - f' in {m.__class__.__name__} ') - - self._is_init = True - else: - warnings.warn(f'init_weights of {self.__class__.__name__} has ' - f'been called more than once.') - - if is_top_level_module: - self._dump_init_info(logger_name) - - for sub_module in self.modules(): - del sub_module._params_init_info - - @master_only - def _dump_init_info(self, logger_name): - """Dump the initialization information to a file named - `initialization.log.json` in workdir. - - Args: - logger_name (str): The name of logger. - """ - - logger = get_logger(logger_name) - - with_file_handler = False - # dump the information to the logger file if there is a `FileHandler` - for handler in logger.handlers: - if isinstance(handler, FileHandler): - handler.stream.write( - 'Name of parameter - Initialization information\n') - for name, param in self.named_parameters(): - handler.stream.write( - f'\n{name} - {param.shape}: ' - f"\n{self._params_init_info[param]['init_info']} \n") - handler.stream.flush() - with_file_handler = True - if not with_file_handler: - for name, param in self.named_parameters(): - print_log( - f'\n{name} - {param.shape}: ' - f"\n{self._params_init_info[param]['init_info']} \n ", - logger=logger_name) - - def __repr__(self): - s = super().__repr__() - if self.init_cfg: - s += f'\ninit_cfg={self.init_cfg}' - return s - - -class Sequential(BaseModule, nn.Sequential): - """Sequential module in openmmlab. - - Args: - init_cfg (dict, optional): Initialization config dict. - """ - - def __init__(self, *args, init_cfg=None): - BaseModule.__init__(self, init_cfg) - nn.Sequential.__init__(self, *args) - - -class ModuleList(BaseModule, nn.ModuleList): - """ModuleList in openmmlab. - - Args: - modules (iterable, optional): an iterable of modules to add. - init_cfg (dict, optional): Initialization config dict. - """ - - def __init__(self, modules=None, init_cfg=None): - BaseModule.__init__(self, init_cfg) - nn.ModuleList.__init__(self, modules) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/ops/gather_points.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/ops/gather_points.py deleted file mode 100644 index f52f1677d8ea0facafc56a3672d37adb44677ff3..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/ops/gather_points.py +++ /dev/null @@ -1,57 +0,0 @@ -import torch -from torch.autograd import Function - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext( - '_ext', ['gather_points_forward', 'gather_points_backward']) - - -class GatherPoints(Function): - """Gather points with given index.""" - - @staticmethod - def forward(ctx, features: torch.Tensor, - indices: torch.Tensor) -> torch.Tensor: - """ - Args: - features (Tensor): (B, C, N) features to gather. - indices (Tensor): (B, M) where M is the number of points. - - Returns: - Tensor: (B, C, M) where M is the number of points. - """ - assert features.is_contiguous() - assert indices.is_contiguous() - - B, npoint = indices.size() - _, C, N = features.size() - output = torch.cuda.FloatTensor(B, C, npoint) - - ext_module.gather_points_forward( - features, indices, output, b=B, c=C, n=N, npoints=npoint) - - ctx.for_backwards = (indices, C, N) - if torch.__version__ != 'parrots': - ctx.mark_non_differentiable(indices) - return output - - @staticmethod - def backward(ctx, grad_out): - idx, C, N = ctx.for_backwards - B, npoint = idx.size() - - grad_features = torch.cuda.FloatTensor(B, C, N).zero_() - grad_out_data = grad_out.data.contiguous() - ext_module.gather_points_backward( - grad_out_data, - idx, - grad_features.data, - b=B, - c=C, - n=N, - npoints=npoint) - return grad_features, None - - -gather_points = GatherPoints.apply diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/ops/group_points.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/ops/group_points.py deleted file mode 100644 index 6c3ec9d758ebe4e1c2205882af4be154008253a5..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/ops/group_points.py +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Tuple - -import torch -from torch import nn as nn -from torch.autograd import Function - -from ..utils import ext_loader -from .ball_query import ball_query -from .knn import knn - -ext_module = ext_loader.load_ext( - '_ext', ['group_points_forward', 'group_points_backward']) - - -class QueryAndGroup(nn.Module): - """Groups points with a ball query of radius. - - Args: - max_radius (float): The maximum radius of the balls. - If None is given, we will use kNN sampling instead of ball query. - sample_num (int): Maximum number of features to gather in the ball. - min_radius (float, optional): The minimum radius of the balls. - Default: 0. - use_xyz (bool, optional): Whether to use xyz. - Default: True. - return_grouped_xyz (bool, optional): Whether to return grouped xyz. - Default: False. - normalize_xyz (bool, optional): Whether to normalize xyz. - Default: False. - uniform_sample (bool, optional): Whether to sample uniformly. - Default: False - return_unique_cnt (bool, optional): Whether to return the count of - unique samples. Default: False. - return_grouped_idx (bool, optional): Whether to return grouped idx. - Default: False. - """ - - def __init__(self, - max_radius, - sample_num, - min_radius=0, - use_xyz=True, - return_grouped_xyz=False, - normalize_xyz=False, - uniform_sample=False, - return_unique_cnt=False, - return_grouped_idx=False): - super().__init__() - self.max_radius = max_radius - self.min_radius = min_radius - self.sample_num = sample_num - self.use_xyz = use_xyz - self.return_grouped_xyz = return_grouped_xyz - self.normalize_xyz = normalize_xyz - self.uniform_sample = uniform_sample - self.return_unique_cnt = return_unique_cnt - self.return_grouped_idx = return_grouped_idx - if self.return_unique_cnt: - assert self.uniform_sample, \ - 'uniform_sample should be True when ' \ - 'returning the count of unique samples' - if self.max_radius is None: - assert not self.normalize_xyz, \ - 'can not normalize grouped xyz when max_radius is None' - - def forward(self, points_xyz, center_xyz, features=None): - """ - Args: - points_xyz (Tensor): (B, N, 3) xyz coordinates of the features. - center_xyz (Tensor): (B, npoint, 3) coordinates of the centriods. - features (Tensor): (B, C, N) Descriptors of the features. - - Returns: - Tensor: (B, 3 + C, npoint, sample_num) Grouped feature. - """ - # if self.max_radius is None, we will perform kNN instead of ball query - # idx is of shape [B, npoint, sample_num] - if self.max_radius is None: - idx = knn(self.sample_num, points_xyz, center_xyz, False) - idx = idx.transpose(1, 2).contiguous() - else: - idx = ball_query(self.min_radius, self.max_radius, self.sample_num, - points_xyz, center_xyz) - - if self.uniform_sample: - unique_cnt = torch.zeros((idx.shape[0], idx.shape[1])) - for i_batch in range(idx.shape[0]): - for i_region in range(idx.shape[1]): - unique_ind = torch.unique(idx[i_batch, i_region, :]) - num_unique = unique_ind.shape[0] - unique_cnt[i_batch, i_region] = num_unique - sample_ind = torch.randint( - 0, - num_unique, (self.sample_num - num_unique, ), - dtype=torch.long) - all_ind = torch.cat((unique_ind, unique_ind[sample_ind])) - idx[i_batch, i_region, :] = all_ind - - xyz_trans = points_xyz.transpose(1, 2).contiguous() - # (B, 3, npoint, sample_num) - grouped_xyz = grouping_operation(xyz_trans, idx) - grouped_xyz_diff = grouped_xyz - \ - center_xyz.transpose(1, 2).unsqueeze(-1) # relative offsets - if self.normalize_xyz: - grouped_xyz_diff /= self.max_radius - - if features is not None: - grouped_features = grouping_operation(features, idx) - if self.use_xyz: - # (B, C + 3, npoint, sample_num) - new_features = torch.cat([grouped_xyz_diff, grouped_features], - dim=1) - else: - new_features = grouped_features - else: - assert (self.use_xyz - ), 'Cannot have not features and not use xyz as a feature!' - new_features = grouped_xyz_diff - - ret = [new_features] - if self.return_grouped_xyz: - ret.append(grouped_xyz) - if self.return_unique_cnt: - ret.append(unique_cnt) - if self.return_grouped_idx: - ret.append(idx) - if len(ret) == 1: - return ret[0] - else: - return tuple(ret) - - -class GroupAll(nn.Module): - """Group xyz with feature. - - Args: - use_xyz (bool): Whether to use xyz. - """ - - def __init__(self, use_xyz: bool = True): - super().__init__() - self.use_xyz = use_xyz - - def forward(self, - xyz: torch.Tensor, - new_xyz: torch.Tensor, - features: torch.Tensor = None): - """ - Args: - xyz (Tensor): (B, N, 3) xyz coordinates of the features. - new_xyz (Tensor): new xyz coordinates of the features. - features (Tensor): (B, C, N) features to group. - - Returns: - Tensor: (B, C + 3, 1, N) Grouped feature. - """ - grouped_xyz = xyz.transpose(1, 2).unsqueeze(2) - if features is not None: - grouped_features = features.unsqueeze(2) - if self.use_xyz: - # (B, 3 + C, 1, N) - new_features = torch.cat([grouped_xyz, grouped_features], - dim=1) - else: - new_features = grouped_features - else: - new_features = grouped_xyz - - return new_features - - -class GroupingOperation(Function): - """Group feature with given index.""" - - @staticmethod - def forward(ctx, features: torch.Tensor, - indices: torch.Tensor) -> torch.Tensor: - """ - Args: - features (Tensor): (B, C, N) tensor of features to group. - indices (Tensor): (B, npoint, nsample) the indices of - features to group with. - - Returns: - Tensor: (B, C, npoint, nsample) Grouped features. - """ - features = features.contiguous() - indices = indices.contiguous() - - B, nfeatures, nsample = indices.size() - _, C, N = features.size() - output = torch.cuda.FloatTensor(B, C, nfeatures, nsample) - - ext_module.group_points_forward(B, C, N, nfeatures, nsample, features, - indices, output) - - ctx.for_backwards = (indices, N) - return output - - @staticmethod - def backward(ctx, - grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Args: - grad_out (Tensor): (B, C, npoint, nsample) tensor of the gradients - of the output from forward. - - Returns: - Tensor: (B, C, N) gradient of the features. - """ - idx, N = ctx.for_backwards - - B, C, npoint, nsample = grad_out.size() - grad_features = torch.cuda.FloatTensor(B, C, N).zero_() - - grad_out_data = grad_out.data.contiguous() - ext_module.group_points_backward(B, C, N, npoint, nsample, - grad_out_data, idx, - grad_features.data) - return grad_features, None - - -grouping_operation = GroupingOperation.apply diff --git a/spaces/ahmedJaafari/AnnarabicRecord/app.py b/spaces/ahmedJaafari/AnnarabicRecord/app.py deleted file mode 100644 index 6a7e844fc53a37a89f129c4acbc546f00cad4dbc..0000000000000000000000000000000000000000 --- a/spaces/ahmedJaafari/AnnarabicRecord/app.py +++ /dev/null @@ -1,57 +0,0 @@ -import gradio as gr -import streamlit as st -import numpy as np -from transformers.file_utils import cached_path, hf_bucket_url -import os -from transformers import Wav2Vec2ProcessorWithLM, AutoModelForCTC -from datasets import load_dataset -import torch -import kenlm -import torchaudio - -cache_dir = './cache/' -processor = Wav2Vec2ProcessorWithLM.from_pretrained("ahmedJaafari/Annarabic3.2", cache_dir=cache_dir, use_auth_token=st.secrets["AnnarabicToken"]) -model = AutoModelForCTC.from_pretrained("ahmedJaafari/Annarabic3.2", cache_dir=cache_dir, use_auth_token=st.secrets["AnnarabicToken"]) - -# define function to read in sound file -def speech_file_to_array_fn(path, max_seconds=60): - batch = {"file": path} - speech_array, sampling_rate = torchaudio.load(batch["file"]) - if sampling_rate != 16000: - transform = torchaudio.transforms.Resample(orig_freq=sampling_rate, - new_freq=16000) - speech_array = transform(speech_array) - speech_array = speech_array[0] - if max_seconds > 0: - speech_array = speech_array[:max_seconds*16000] - batch["speech"] = speech_array.numpy() - batch["sampling_rate"] = 16000 - return batch - -# tokenize -def inference(audio): - # read in sound file - # load dummy dataset and read soundfiles - ds = speech_file_to_array_fn(audio.name) - # infer model - input_values = processor( - ds["speech"], - sampling_rate=ds["sampling_rate"], - return_tensors="pt" - ).input_values - # decode ctc output - with torch.no_grad(): - logits = model(input_values).logits - - output = processor.decode(logits.numpy()[0]).text - - print(output) - return output - -inputs = gr.inputs.Audio(label="Record Audio", source="microphone", type='file') -outputs = gr.outputs.Textbox(label="Output Text") -title = "Annarabic Speech Recognition System" -description = 'Demo for Annarabic ASR. To use it, simply upload your audio, or click on one of the examples to load them. Only the 10 first seconds of the audio will be transcribed and GPU runtime is not used. For more information, contact Ahmed Jaafari via email: a.jaafari@aui.ma or phone: +212658537105.' -#examples=[['Aya.mp3'], ['Loubna.mp3'], ['Omar.wav'], ['Yassir.wav']] -#article="* The ASR never trained on the given examples." -gr.Interface(inference, inputs, outputs, title=title, description=description).launch() \ No newline at end of file diff --git a/spaces/aipicasso/emi-latest-demo/README.md b/spaces/aipicasso/emi-latest-demo/README.md deleted file mode 100644 index 78e439fa7f43206c29e89df7a66e021d73cb63cd..0000000000000000000000000000000000000000 --- a/spaces/aipicasso/emi-latest-demo/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Emi -emoji: 😊 -colorFrom: blue -colorTo: blue -sdk: gradio -sdk_version: 3.41.2 -app_file: app.py -pinned: false -license: other ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/akhaliq/ArcaneGAN/app.py b/spaces/akhaliq/ArcaneGAN/app.py deleted file mode 100644 index 581bc5198cd1e1cf21913aa71784cb17ee0044d5..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/ArcaneGAN/app.py +++ /dev/null @@ -1,152 +0,0 @@ -import os -from huggingface_hub import hf_hub_download -os.system("pip -qq install facenet_pytorch") -from facenet_pytorch import MTCNN -from torchvision import transforms -import torch, PIL -from tqdm.notebook import tqdm -import gradio as gr -import torch - -modelarcanev4 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.4", filename="ArcaneGANv0.4.jit") -modelarcanev3 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.3", filename="ArcaneGANv0.3.jit") -modelarcanev2 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.2", filename="ArcaneGANv0.2.jit") - - -mtcnn = MTCNN(image_size=256, margin=80) - -# simplest ye olde trustworthy MTCNN for face detection with landmarks -def detect(img): - - # Detect faces - batch_boxes, batch_probs, batch_points = mtcnn.detect(img, landmarks=True) - # Select faces - if not mtcnn.keep_all: - batch_boxes, batch_probs, batch_points = mtcnn.select_boxes( - batch_boxes, batch_probs, batch_points, img, method=mtcnn.selection_method - ) - - return batch_boxes, batch_points - -# my version of isOdd, should make a separate repo for it :D -def makeEven(_x): - return _x if (_x % 2 == 0) else _x+1 - -# the actual scaler function -def scale(boxes, _img, max_res=1_500_000, target_face=256, fixed_ratio=0, max_upscale=2, VERBOSE=False): - - x, y = _img.size - - ratio = 2 #initial ratio - - #scale to desired face size - if (boxes is not None): - if len(boxes)>0: - ratio = target_face/max(boxes[0][2:]-boxes[0][:2]); - ratio = min(ratio, max_upscale) - if VERBOSE: print('up by', ratio) - - if fixed_ratio>0: - if VERBOSE: print('fixed ratio') - ratio = fixed_ratio - - x*=ratio - y*=ratio - - #downscale to fit into max res - res = x*y - if res > max_res: - ratio = pow(res/max_res,1/2); - if VERBOSE: print(ratio) - x=int(x/ratio) - y=int(y/ratio) - - #make dimensions even, because usually NNs fail on uneven dimensions due skip connection size mismatch - x = makeEven(int(x)) - y = makeEven(int(y)) - - size = (x, y) - - return _img.resize(size) - -""" - A useful scaler algorithm, based on face detection. - Takes PIL.Image, returns a uniformly scaled PIL.Image - boxes: a list of detected bboxes - _img: PIL.Image - max_res: maximum pixel area to fit into. Use to stay below the VRAM limits of your GPU. - target_face: desired face size. Upscale or downscale the whole image to fit the detected face into that dimension. - fixed_ratio: fixed scale. Ignores the face size, but doesn't ignore the max_res limit. - max_upscale: maximum upscale ratio. Prevents from scaling images with tiny faces to a blurry mess. -""" - -def scale_by_face_size(_img, max_res=1_500_000, target_face=256, fix_ratio=0, max_upscale=2, VERBOSE=False): - boxes = None - boxes, _ = detect(_img) - if VERBOSE: print('boxes',boxes) - img_resized = scale(boxes, _img, max_res, target_face, fix_ratio, max_upscale, VERBOSE) - return img_resized - - -size = 256 - -means = [0.485, 0.456, 0.406] -stds = [0.229, 0.224, 0.225] - -t_stds = torch.tensor(stds).cuda().half()[:,None,None] -t_means = torch.tensor(means).cuda().half()[:,None,None] - -def makeEven(_x): - return int(_x) if (_x % 2 == 0) else int(_x+1) - -img_transforms = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(means,stds)]) - -def tensor2im(var): - return var.mul(t_stds).add(t_means).mul(255.).clamp(0,255).permute(1,2,0) - -def proc_pil_img(input_image, model): - transformed_image = img_transforms(input_image)[None,...].cuda().half() - - with torch.no_grad(): - result_image = model(transformed_image)[0] - output_image = tensor2im(result_image) - output_image = output_image.detach().cpu().numpy().astype('uint8') - output_image = PIL.Image.fromarray(output_image) - return output_image - - - -modelv4 = torch.jit.load(modelarcanev4).eval().cuda().half() -modelv3 = torch.jit.load(modelarcanev3).eval().cuda().half() -modelv2 = torch.jit.load(modelarcanev2).eval().cuda().half() - -def process(im, version): - if version == 'version 0.4': - im = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=1) - res = proc_pil_img(im, modelv4) - elif version == 'version 0.3': - im = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=1) - res = proc_pil_img(im, modelv3) - else: - im = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=1) - res = proc_pil_img(im, modelv2) - return res - -title = "ArcaneGAN" -description = "Gradio demo for ArcaneGAN, portrait to Arcane style. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below." -article = "
    ArcaneGan by Alexander S | Github Repo |
    visitor badge
    " - -gr.Interface( - process, - [gr.inputs.Image(type="pil", label="Input"),gr.inputs.Radio(choices=['version 0.2','version 0.3','version 0.4'], type="value", default='version 0.4', label='version') -], - gr.outputs.Image(type="pil", label="Output"), - title=title, - description=description, - article=article, - examples=[['bill.png','version 0.3'],['keanu.png','version 0.4'],['will.jpeg','version 0.4']], - allow_flagging=False, - allow_screenshot=False - ).launch() diff --git a/spaces/akhaliq/Mask2Former/mask2former/evaluation/__init__.py b/spaces/akhaliq/Mask2Former/mask2former/evaluation/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/akhaliq/Music_Source_Separation/README.md b/spaces/akhaliq/Music_Source_Separation/README.md deleted file mode 100644 index e8112f530f97d392653d9ef7a70aa319c9ab98ac..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Music_Source_Separation/README.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Music_Source_Separation -emoji: ⚡ -colorFrom: green -colorTo: yellow -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/akhaliq/Music_Source_Separation/bytesep/callbacks/instruments_callbacks.py b/spaces/akhaliq/Music_Source_Separation/bytesep/callbacks/instruments_callbacks.py deleted file mode 100644 index dc8a1d133ac4a9253c207cb2d6607fb96d392607..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Music_Source_Separation/bytesep/callbacks/instruments_callbacks.py +++ /dev/null @@ -1,200 +0,0 @@ -import logging -import os -import time -from typing import List, NoReturn - -import librosa -import numpy as np -import pytorch_lightning as pl -import torch.nn as nn -from pytorch_lightning.utilities import rank_zero_only - -from bytesep.callbacks.base_callbacks import SaveCheckpointsCallback -from bytesep.inference import Separator -from bytesep.utils import StatisticsContainer, calculate_sdr, read_yaml - - -def get_instruments_callbacks( - config_yaml: str, - workspace: str, - checkpoints_dir: str, - statistics_path: str, - logger: pl.loggers.TensorBoardLogger, - model: nn.Module, - evaluate_device: str, -) -> List[pl.Callback]: - """Get Voicebank-Demand callbacks of a config yaml. - - Args: - config_yaml: str - workspace: str - checkpoints_dir: str, directory to save checkpoints - statistics_dir: str, directory to save statistics - logger: pl.loggers.TensorBoardLogger - model: nn.Module - evaluate_device: str - - Return: - callbacks: List[pl.Callback] - """ - configs = read_yaml(config_yaml) - task_name = configs['task_name'] - target_source_types = configs['train']['target_source_types'] - input_channels = configs['train']['channels'] - mono = True if input_channels == 1 else False - test_audios_dir = os.path.join(workspace, "evaluation_audios", task_name, "test") - sample_rate = configs['train']['sample_rate'] - evaluate_step_frequency = configs['train']['evaluate_step_frequency'] - save_step_frequency = configs['train']['save_step_frequency'] - test_batch_size = configs['evaluate']['batch_size'] - test_segment_seconds = configs['evaluate']['segment_seconds'] - - test_segment_samples = int(test_segment_seconds * sample_rate) - assert len(target_source_types) == 1 - target_source_type = target_source_types[0] - - # save checkpoint callback - save_checkpoints_callback = SaveCheckpointsCallback( - model=model, - checkpoints_dir=checkpoints_dir, - save_step_frequency=save_step_frequency, - ) - - # statistics container - statistics_container = StatisticsContainer(statistics_path) - - # evaluation callback - evaluate_test_callback = EvaluationCallback( - model=model, - target_source_type=target_source_type, - input_channels=input_channels, - sample_rate=sample_rate, - mono=mono, - evaluation_audios_dir=test_audios_dir, - segment_samples=test_segment_samples, - batch_size=test_batch_size, - device=evaluate_device, - evaluate_step_frequency=evaluate_step_frequency, - logger=logger, - statistics_container=statistics_container, - ) - - callbacks = [save_checkpoints_callback, evaluate_test_callback] - # callbacks = [save_checkpoints_callback] - - return callbacks - - -class EvaluationCallback(pl.Callback): - def __init__( - self, - model: nn.Module, - input_channels: int, - evaluation_audios_dir: str, - target_source_type: str, - sample_rate: int, - mono: bool, - segment_samples: int, - batch_size: int, - device: str, - evaluate_step_frequency: int, - logger: pl.loggers.TensorBoardLogger, - statistics_container: StatisticsContainer, - ): - r"""Callback to evaluate every #save_step_frequency steps. - - Args: - model: nn.Module - input_channels: int - evaluation_audios_dir: str, directory containing audios for evaluation - target_source_type: str, e.g., 'violin' - sample_rate: int - mono: bool - segment_samples: int, length of segments to be input to a model, e.g., 44100*30 - batch_size, int, e.g., 12 - device: str, e.g., 'cuda' - evaluate_step_frequency: int, evaluate every #save_step_frequency steps - logger: pl.loggers.TensorBoardLogger - statistics_container: StatisticsContainer - """ - self.model = model - self.target_source_type = target_source_type - self.sample_rate = sample_rate - self.mono = mono - self.segment_samples = segment_samples - self.evaluate_step_frequency = evaluate_step_frequency - self.logger = logger - self.statistics_container = statistics_container - - self.evaluation_audios_dir = evaluation_audios_dir - - # separator - self.separator = Separator(model, self.segment_samples, batch_size, device) - - @rank_zero_only - def on_batch_end(self, trainer: pl.Trainer, _) -> NoReturn: - r"""Evaluate losses on a few mini-batches. Losses are only used for - observing training, and are not final F1 metrics. - """ - - global_step = trainer.global_step - - if global_step % self.evaluate_step_frequency == 0: - - mixture_audios_dir = os.path.join(self.evaluation_audios_dir, 'mixture') - clean_audios_dir = os.path.join( - self.evaluation_audios_dir, self.target_source_type - ) - - audio_names = sorted(os.listdir(mixture_audios_dir)) - - error_str = "Directory {} does not contain audios for evaluation!".format( - self.evaluation_audios_dir - ) - assert len(audio_names) > 0, error_str - - logging.info("--- Step {} ---".format(global_step)) - logging.info("Total {} pieces for evaluation:".format(len(audio_names))) - - eval_time = time.time() - - sdrs = [] - - for n, audio_name in enumerate(audio_names): - - # Load audio. - mixture_path = os.path.join(mixture_audios_dir, audio_name) - clean_path = os.path.join(clean_audios_dir, audio_name) - - mixture, origin_fs = librosa.core.load( - mixture_path, sr=self.sample_rate, mono=self.mono - ) - - # Target - clean, origin_fs = librosa.core.load( - clean_path, sr=self.sample_rate, mono=self.mono - ) - - if mixture.ndim == 1: - mixture = mixture[None, :] - # (channels_num, audio_length) - - input_dict = {'waveform': mixture} - - # separate - sep_wav = self.separator.separate(input_dict) - # (channels_num, audio_length) - - sdr = calculate_sdr(ref=clean, est=sep_wav) - - print("{} SDR: {:.3f}".format(audio_name, sdr)) - sdrs.append(sdr) - - logging.info("-----------------------------") - logging.info('Avg SDR: {:.3f}'.format(np.mean(sdrs))) - - logging.info("Evlauation time: {:.3f}".format(time.time() - eval_time)) - - statistics = {"sdr": np.mean(sdrs)} - self.statistics_container.append(global_step, statistics, 'test') - self.statistics_container.dump() diff --git a/spaces/akhaliq/deeplab2/compile.sh b/spaces/akhaliq/deeplab2/compile.sh deleted file mode 100644 index 2afdcf2afc04835e81bc57f877a65bc6903d1ba1..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/deeplab2/compile.sh +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2021 The Deeplab2 Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Quick start command line to setup deeplab2 (Linux only). -# Example command to run: -# deeplab2/compile.sh ${PATH_TO_PROTOC} -# -# This script assumes that the following folder structure: -# -# + root -# + deeplab2 -# + models -# + orbit -# + cocoapi -# + PythonAPI -# -# Besides, the script also assumes that `protoc` can be accessed from command -# line. - -#!/bin/bash - -set -e - -# cpu or gpu -CONFIG="cpu" - -function tolower() { - echo "${1,,}" -} - -if [[ ! -z "$1" ]] -then - echo "Setting configuration from argument($1)..." - CONFIG=$(tolower "$1") - if [ "$CONFIG" != "cpu" ] && [ "$CONFIG" != "gpu" ] - then - echo "Configuration must be either \"cpu\" or \"gpu\", exiting..." - exit 1 - fi -fi - -echo "Running configuration with $CONFIG." - -# Protobuf compilation -# Replace `protoc` with `${PATH_TO_PROTOC}` if protobuf compilier is downloaded -# from web. -echo "-----------------------------------------------------------------------" -echo "Compiling protobuf..." -echo "-----------------------------------------------------------------------" -protoc deeplab2/*.proto --python_out=. - -# Compile custom ops -# See details in https://www.tensorflow.org/guide/create_op#compile_the_op_using_your_system_compiler_tensorflow_binary_installation -TF_CFLAGS=( $(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_compile_flags()))') ) -TF_LFLAGS=( $(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_link_flags()))') ) -OP_NAME='deeplab2/tensorflow_ops/kernels/merge_semantic_and_instance_maps_op' - -if [ "$CONFIG" == "cpu" ] -then - # CPU - echo "-----------------------------------------------------------------------" - echo "Compiling the custom cc op: merge_semantic_and_instance_maps_op (CPU)..." - echo "-----------------------------------------------------------------------" - g++ -std=c++14 -shared \ - ${OP_NAME}.cc ${OP_NAME}_kernel.cc -o ${OP_NAME}.so -fPIC ${TF_CFLAGS[@]} ${TF_LFLAGS[@]} -O2 -else - # GPU - # (https://www.tensorflow.org/guide/create_op#compiling_the_kernel_for_the_gpu_device) - echo "-----------------------------------------------------------------------" - echo "Compiling the custom cc op: merge_semantic_and_instance_maps_op (GPU)..." - echo "-----------------------------------------------------------------------" - nvcc -std=c++14 -c -o ${OP_NAME}_kernel.cu.o \ - ${OP_NAME}_kernel.cu.cc \ - ${TF_CFLAGS[@]} -D GOOGLE_CUDA=1 -x cu -Xcompiler -fPIC --expt-relaxed-constexpr - - g++ -std=c++14 -shared -o ${OP_NAME}.so ${OP_NAME}.cc ${OP_NAME}_kernel.cc \ - ${OP_NAME}_kernel.cu.o ${TF_CFLAGS[@]} -fPIC -lcudart ${TF_LFLAGS[@]} -fi - -# PYTHONPATH -export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/models:`pwd`/cocoapi/PythonAPI - -# Runing test -echo "-----------------------------------------------------------------------" -echo "Running tests for merge_semantic_and_instance_maps_op..." -echo "-----------------------------------------------------------------------" -python deeplab2/tensorflow_ops/python/kernel_tests/merge_semantic_and_instance_maps_op_test.py - -# End-to-end tests -echo "-----------------------------------------------------------------------" -echo "Running end-to-end tests..." -echo "-----------------------------------------------------------------------" - -# Model training test (test for custom ops, protobug) -python deeplab2/model/deeplab_test.py - -# Model evaluation test (test for other packages such as orbit, cocoapi, etc) -python deeplab2/trainer/evaluator_test.py - -echo "------------------------" -echo "Done with configuration!" -echo "------------------------" - diff --git a/spaces/akhaliq/scikit-learn-tabular-playground/README.md b/spaces/akhaliq/scikit-learn-tabular-playground/README.md deleted file mode 100644 index 087b9b3b15e0c4568d959bf8598eb7ac340daf88..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/scikit-learn-tabular-playground/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Scikit Learn Tabular Playground -emoji: 📉 -colorFrom: pink -colorTo: green -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/_extension.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/_extension.py deleted file mode 100644 index cbd6da9be4956ce8558304ed72ffbe88ccd22ba5..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/_extension.py +++ /dev/null @@ -1,10 +0,0 @@ -from typing import Any - - -def load_ipython_extension(ip: Any) -> None: # pragma: no cover - # prevent circular import - from pip._vendor.rich.pretty import install - from pip._vendor.rich.traceback import install as tr_install - - install() - tr_install() diff --git a/spaces/alistairmcleay/cambridge-masters-project/src/crazyneuraluser/user_model_code/analysis_multiwoz.py b/spaces/alistairmcleay/cambridge-masters-project/src/crazyneuraluser/user_model_code/analysis_multiwoz.py deleted file mode 100644 index 33b7ccbf4def4767cad329a4d6917882b5c00616..0000000000000000000000000000000000000000 --- a/spaces/alistairmcleay/cambridge-masters-project/src/crazyneuraluser/user_model_code/analysis_multiwoz.py +++ /dev/null @@ -1,119 +0,0 @@ -import json -import os - -DATA_SPLIT = ["train", "dev", "test"] - - -def _check_n_turns(data, data_act): - for split in DATA_SPLIT: - for dial_id, meta in data[split].items(): - n_in_meta = len(meta["turns"]) - - assert dial_id in data_act - n_in_act = len(data_act[dial_id]) - assert n_in_meta == n_in_act - - -def collect_data(data_path, remove_dial_switch=False): - # load act - act_file = os.path.join(data_path, "dialog_acts.json") - with open(act_file) as f: - data_act = json.load(f) - print("Load {} dialogues in act file".format(len(data_act))) - - # load data - data = {} - for split in DATA_SPLIT: - data[split] = iter_data_folder(data_path, split, remove_dial_switch, data_act) - - _check_n_turns(data, data_act) - return data, data_act - - -def remove_dial(dial_id, dial, dial_act): - # check services - services = dial["services"] - if "police" in services or "bus" in services or "hospital" in services: - return True - - # check act - domains = set() - for turn_id, turn_act in dial_act.items(): - dialogue_act = turn_act["dialog_act"] - for dact in dialogue_act: - assert "-" in dact - domain, act = dact.split("-") - domains.add(domain) - if "Police" in domains or "Bus" in domains or "Hospital" in domains: - return True - return False - - -def iter_data_folder(data_path, split, remove_dial_switch, data_act): - """Iterate data folder""" - split_dir = os.path.join(data_path, split) - data_split = {} - remove_dial_ids = [] - total_dial_ids = [] - for f in os.listdir(split_dir): - if not f.startswith("dialogues"): # skip schema.json - continue - file_path = os.path.join(data_path, split, f) - iter_file( - file_path, - data_split, - remove_dial_ids, - total_dial_ids, - remove_dial_switch, - data_act, - ) - print( - "Done collecting {} | total {} dialogues | load {} dialogues | remove {} dialogues".format( - split, len(total_dial_ids), len(data_split), len(remove_dial_ids) - ) - ) - return data_split - - -def iter_file( - file_path, data_split, remove_dial_ids, total_dial_ids, remove_dial_switch, data_act -): - with open(file_path) as f: - data_in = json.load(f) # list of dialouges in a json file - - for dial in data_in: - dial_id = dial["dialogue_id"] - total_dial_ids.append(dial_id) - dial_act = data_act[dial_id] - - if remove_dial_switch and remove_dial(dial_id, dial, dial_act): - remove_dial_ids.append(dial_id) - else: - data_split[dial_id] = dial - - -def show_dial(dial_id, data, data_act): - def simple_linearise_act(dialouge_act): - linear_act = "" - for domain_act, slot_value_list in dialouge_act.items(): - linear_act += domain_act + " " - for slot_value in slot_value_list: - slot, value = slot_value[0], slot_value[1] - linear_act += slot + " " - linear_act += value + " " - return linear_act - - split = None - for data_split in DATA_SPLIT: - if dial_id in data[data_split]: - split = data_split - break - - print("dial_id: {}".format(dial_id)) - for turn_id, turn in enumerate(data[split][dial_id]["turns"]): - dialouge_act = data_act[dial_id][str(turn_id)]["dialog_act"] - linear_act = simple_linearise_act(dialouge_act) - print("-----" * 15) - print("turn_id: {}, spk: {}".format(turn_id, turn["speaker"])) - print("act: |{}|".format(linear_act)) - print("utt: |{}|".format(turn["utterance"])) diff --git a/spaces/allknowingroger/Image-Models-Test134/app.py b/spaces/allknowingroger/Image-Models-Test134/app.py deleted file mode 100644 index a97dc9195c0b490a7d4ca9ff717382350ff205c2..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test134/app.py +++ /dev/null @@ -1,144 +0,0 @@ -import gradio as gr -# import os -# import sys -# from pathlib import Path -import time - -models =[ - "Varun29/my-ai-project", - "anshulpatidar01/my-horse", - "Yntec/526", - "Dime96/sd-pokemon-model-lora-sdxl", - "samkit123/my-pet-dog-xzp", - "rjaiswal/sdxl-bulgari-model-lora", - "Nil7Rudra/peacocks-xmj", - "Ashutosh94/my-pet-character", - "NickKolok/meryl-stryfe-20230408-17-adm-7k-1200-steps", -] - - -model_functions = {} -model_idx = 1 -for model_path in models: - try: - model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False) - except Exception as error: - def the_fn(txt): - return None - model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"]) - model_idx+=1 - - -def send_it_idx(idx): - def send_it_fn(prompt): - output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt) - return output - return send_it_fn - -def get_prompts(prompt_text): - return prompt_text - -def clear_it(val): - if int(val) != 0: - val = 0 - else: - val = 0 - pass - return val - -def all_task_end(cnt,t_stamp): - to = t_stamp + 60 - et = time.time() - if et > to and t_stamp != 0: - d = gr.update(value=0) - tog = gr.update(value=1) - #print(f'to: {to} et: {et}') - else: - if cnt != 0: - d = gr.update(value=et) - else: - d = gr.update(value=0) - tog = gr.update(value=0) - #print (f'passing: to: {to} et: {et}') - pass - return d, tog - -def all_task_start(): - print("\n\n\n\n\n\n\n") - t = time.gmtime() - t_stamp = time.time() - current_time = time.strftime("%H:%M:%S", t) - return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0) - -def clear_fn(): - nn = len(models) - return tuple([None, *[None for _ in range(nn)]]) - - - -with gr.Blocks(title="SD Models") as my_interface: - with gr.Column(scale=12): - # with gr.Row(): - # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""") - with gr.Row(): - with gr.Row(scale=6): - primary_prompt=gr.Textbox(label="Prompt", value="") - # real_prompt=gr.Textbox(label="Real prompt") - with gr.Row(scale=6): - # improve_prompts_btn=gr.Button("Improve") - with gr.Row(): - run=gr.Button("Run",variant="primary") - clear_btn=gr.Button("Clear") - with gr.Row(): - sd_outputs = {} - model_idx = 1 - for model_path in models: - with gr.Column(scale=3, min_width=320): - with gr.Box(): - sd_outputs[model_idx] = gr.Image(label=model_path) - pass - model_idx += 1 - pass - pass - - with gr.Row(visible=False): - start_box=gr.Number(interactive=False) - end_box=gr.Number(interactive=False) - tog_box=gr.Textbox(value=0,interactive=False) - - start_box.change( - all_task_end, - [start_box, end_box], - [start_box, tog_box], - every=1, - show_progress=False) - - primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box]) - run.click(all_task_start, None, [start_box, end_box, tog_box]) - runs_dict = {} - model_idx = 1 - for model_path in models: - runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]]) - model_idx += 1 - pass - pass - - # improve_prompts_btn_clicked=improve_prompts_btn.click( - # get_prompts, - # inputs=[primary_prompt], - # outputs=[primary_prompt], - # cancels=list(runs_dict.values())) - clear_btn.click( - clear_fn, - None, - [primary_prompt, *list(sd_outputs.values())], - cancels=[*list(runs_dict.values())]) - tog_box.change( - clear_it, - tog_box, - tog_box, - cancels=[*list(runs_dict.values())]) - -my_interface.queue(concurrency_count=600, status_update_rate=1) -my_interface.launch(inline=True, show_api=False) - \ No newline at end of file diff --git a/spaces/allknowingroger/text-generation-webui-space-1/extensions/gallery/script.py b/spaces/allknowingroger/text-generation-webui-space-1/extensions/gallery/script.py deleted file mode 100644 index 8a2d7cf988734a7ab0966d047ff3d31ba58324b7..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/text-generation-webui-space-1/extensions/gallery/script.py +++ /dev/null @@ -1,82 +0,0 @@ -from pathlib import Path - -import gradio as gr - -from modules.html_generator import get_image_cache - - -def generate_html(): - css = """ - .character-gallery { - margin: 1rem 0; - display: grid; - grid-template-columns: repeat(auto-fit, minmax(150px, 1fr)); - grid-column-gap: 0.4rem; - grid-row-gap: 1.2rem; - } - - .character-container { - cursor: pointer; - text-align: center; - position: relative; - opacity: 0.85; - } - - .character-container:hover { - opacity: 1; - } - - .character-container .placeholder, .character-container img { - width: 150px; - height: 200px; - background-color: gray; - object-fit: cover; - margin: 0 auto; - border-radius: 1rem; - border: 3px solid white; - box-shadow: 3px 3px 6px 0px rgb(0 0 0 / 50%); - } - - .character-name { - margin-top: 0.3rem; - display: block; - font-size: 1.2rem; - font-weight: 600; - overflow-wrap: anywhere; - } - """ - - container_html = f'" - return container_html - -def ui(): - with gr.Accordion("Character gallery"): - update = gr.Button("Refresh") - gallery = gr.HTML(value=generate_html()) - update.click(generate_html, [], gallery) diff --git a/spaces/alsalemi/pv-segment-01/xml_to_mask.py b/spaces/alsalemi/pv-segment-01/xml_to_mask.py deleted file mode 100644 index cb54f11f525240710fe62580d82428c054282698..0000000000000000000000000000000000000000 --- a/spaces/alsalemi/pv-segment-01/xml_to_mask.py +++ /dev/null @@ -1,239 +0,0 @@ -import numpy as np -import sys -import lxml.etree as ET -import cv2 -import time -import os - -def xml_to_mask(xml_path, location, size, tree=None, downsample=1, verbose=0): - - # parse xml and get root - if tree == None: tree = ET.parse(xml_path) - root = tree.getroot() - - # calculate region bounds - bounds = {'x_min' : location[0], 'y_min' : location[1], 'x_max' : location[0] + size[0]*downsample, 'y_max' : location[1] + size[1]*downsample} - IDs = regions_in_mask(xml_path=xml_path, root=root, tree=tree, bounds=bounds, verbose=verbose) - - if verbose != 0: - print('\nFOUND: ' + str(len(IDs)) + ' regions') - - # find regions in bounds - Regions = get_vertex_points(root=root, IDs=IDs, verbose=verbose) - # print('Regions:', Regions) - - # fill regions and create mask - mask = Regions_to_mask(Regions=Regions, bounds=bounds, IDs=IDs, downsample=downsample, verbose=verbose) - if verbose != 0: - print('done...\n') - - return mask - -def xml_to_masks(xml_path, location, size, tree=None, downsample=1, verbose=0, linecolors=[]): - # parse xml and get root - if tree == None: tree = ET.parse(xml_path) - root = tree.getroot() - - # calculate region bounds - bounds = { - 'x_min' : location[0], - 'y_min' : location[1], - 'x_max' : location[0] + size[0]*downsample, - 'y_max' : location[1] + size[1]*downsample} - # bounds_array = np.array([bounds['x_min'], bounds['y_min'], bounds['x_max'], bounds['y_max']]) - - IDs = labels_in_mask(xml_path=xml_path, root=root, tree=tree, bounds=bounds, verbose=verbose, linecolors=linecolors) - - if verbose != 0: - print('\nFOUND: ' + str(len(IDs)) + ' regions') - - # find regions in bounds - # print('Regions:', Regions) - - # fill regions and create mask - masks = [] - for id in IDs: - Regions = get_vertex_points(root=root, IDs=[id], verbose=verbose) - for Region in Regions: - mask = Regions_to_mask(Regions=[Region], bounds=bounds, IDs=[id], downsample=downsample, verbose=verbose) - masks.append(mask) - - if verbose != 0: - print('done...\n') - - return masks - -def labels_in_mask(xml_path, root, tree, bounds, verbose=1, linecolors=[]): - # find regions to save - colors = [] - IDs = [] - mtime = os.path.getmtime(xml_path) - - write_minmax_to_xml(xml_path, tree) - - for Annotation in root.findall("./Annotation"): # for all annotations - annotationID = Annotation.attrib['Id'] - LineColor = Annotation.attrib['LineColor'] - colors.append(LineColor) - - colors = np.unique(colors) - # Encode LineColor to 0,1,2,3 - if linecolors != []: - for id in colors: - IDs.append({'label_id' : linecolors.index(id) + 1, 'LineColor' : id}) - - return IDs - -def get_vertex_points(root, IDs, verbose=1): - Regions = [] - for ID in IDs: # for all IDs - # get all vertex attributes (points) - for Region in root.findall("./Annotation[@LineColor='" + ID['LineColor'] + "']/*/Region"): - Vertices = [] - # Ignore all regions with area 0 - if Region.attrib['Area'] != '0.0': - for Vertex in Region.findall("./Vertices/Vertex"): - # make array of points - Vertices.append([int(float(Vertex.attrib['X'])), int(float(Vertex.attrib['Y']))]) - Regions.append(np.array(Vertices)) - return Regions - -def regions_in_mask(xml_path, root, tree, bounds, verbose=1, linecolors=[]): - # find regions to save - IDs = [] - mtime = os.path.getmtime(xml_path) - - write_minmax_to_xml(xml_path, tree) - - for Annotation in root.findall("./Annotation"): # for all annotations - annotationID = Annotation.attrib['Id'] - LineColor = Annotation.attrib['LineColor'] - for Region in Annotation.findall("./*/Region"): # iterate on all regions - # Check if region's area's is more than 0 - print('Region area:', Region.attrib['Area']) - if Region.attrib['Area'] != '0.0': - for Vert in Region.findall("./Vertices"): # iterate on all vertex in region - # get minmax points - Xmin = np.int32(Vert.attrib['Xmin']) - Ymin = np.int32(Vert.attrib['Ymin']) - Xmax = np.int32(Vert.attrib['Xmax']) - Ymax = np.int32(Vert.attrib['Ymax']) - # test minmax points in region bounds - if bounds['x_min'] <= Xmax and bounds['x_max'] >= Xmin and bounds['y_min'] <= Ymax and bounds['y_max'] >= Ymin: - if verbose: print('Saving region IDs...') - IDs.append({'annotationID' : annotationID, 'LineColor' : LineColor}) - break - - # Encode LineColor to 0,1,2,3 - if linecolors != []: - for id in IDs: - id['LineColor'] = linecolors.index(id['LineColor']) + 1 - - return IDs - -def Regions_to_mask(Regions, bounds, IDs, downsample, verbose=1): - # downsample = int(np.round(downsample_factor**(.5))) - - if verbose !=0: - print('MAKING MASK for:', IDs) - - if len(Regions) != 0: # regions present - # get min/max sizes - min_sizes = np.empty(shape=[2,0], dtype=np.int32) - max_sizes = np.empty(shape=[2,0], dtype=np.int32) - for Region in Regions: # fill all regions - min_bounds = np.reshape((np.amin(Region, axis=0)), (2,1)) - max_bounds = np.reshape((np.amax(Region, axis=0)), (2,1)) - min_sizes = np.append(min_sizes, min_bounds, axis=1) - max_sizes = np.append(max_sizes, max_bounds, axis=1) - min_size = np.amin(min_sizes, axis=1) - max_size = np.amax(max_sizes, axis=1) - - # add to old bounds - bounds['x_min_pad'] = min(min_size[1], bounds['x_min']) - bounds['y_min_pad'] = min(min_size[0], bounds['y_min']) - bounds['x_max_pad'] = max(max_size[1], bounds['x_max']) - bounds['y_max_pad'] = max(max_size[0], bounds['y_max']) - - # make blank mask - mask = np.zeros([ int(np.round((bounds['y_max_pad'] - bounds['y_min_pad']) / downsample)), int(np.round((bounds['x_max_pad'] - bounds['x_min_pad']) / downsample)) ], dtype=np.uint8) - - # fill mask polygons - for Region in Regions: - # reformat Regions - Region[:,1] = np.int32(np.round((Region[:,1] - bounds['y_min_pad']) / downsample)) - Region[:,0] = np.int32(np.round((Region[:,0] - bounds['x_min_pad']) / downsample)) - # get annotation ID for mask color - # print('IDs inside:', IDs) - ID = IDs[0] - cv2.fillPoly(img=mask, pts=[Region], color=int(ID['label_id'])) - - - # reshape mask - x_start = np.int32(np.round((bounds['x_min'] - bounds['x_min_pad']) / downsample)) - y_start = np.int32(np.round((bounds['y_min'] - bounds['y_min_pad']) / downsample)) - x_stop = np.int32(np.round((bounds['x_max'] - bounds['x_min_pad']) / downsample)) - y_stop = np.int32(np.round((bounds['y_max'] - bounds['y_min_pad']) / downsample)) - # pull center mask region - mask = mask[ y_start:y_stop, x_start:x_stop ] - - else: # no Regions - mask = np.zeros([ int(np.round((bounds['y_max'] - bounds['y_min']) / downsample)), int(np.round((bounds['x_max'] - bounds['x_min']) / downsample)) ]) - - return mask - -def write_minmax_to_xml(xml_path, tree=None, time_buffer=10): - # function to write min and max verticies to each region - - # parse xml and get root - if tree == None: tree = ET.parse(xml_path) - root = tree.getroot() - - try: - # has the xml been modified to include minmax - modtime = np.float64(root.attrib['modtime']) - # has the minmax modified xml been changed? - assert os.path.getmtime(xml_path) < modtime + time_buffer - - except: - - for Annotation in root.findall("./Annotation"): # for all annotations - annotationID = Annotation.attrib['Id'] - - for Region in Annotation.findall("./*/Region"): # iterate on all region - - for Vert in Region.findall("./Vertices"): # iterate on all vertex in region - Xs = [] - Ys = [] - for Vertex in Vert.findall("./Vertex"): # iterate on all vertex in region - # get points - Xs.append(np.int32(np.float64(Vertex.attrib['X']))) - Ys.append(np.int32(np.float64(Vertex.attrib['Y']))) - - # find min and max points - Xs = np.array(Xs) - Ys = np.array(Ys) - - # modify the xml - Vert.set("Xmin", "{}".format(np.min(Xs))) - Vert.set("Xmax", "{}".format(np.max(Xs))) - Vert.set("Ymin", "{}".format(np.min(Ys))) - Vert.set("Ymax", "{}".format(np.max(Ys))) - - root.set("modtime", "{}".format(time.time())) - xml_data = ET.tostring(tree, pretty_print=True) - #xml_data = Annotations.toprettyxml() - f = open(xml_path, 'w') - f.write(xml_data.decode()) - f.close() - -def get_num_classes(xml_path): - # parse xml and get root - tree = ET.parse(xml_path) - root = tree.getroot() - - annotation_num = 0 - for Annotation in root.findall("./Annotation"): # for all annotations - annotation_num += 1 - - return annotation_num + 1 diff --git a/spaces/altryne/vidtranslator/utils/utils.py b/spaces/altryne/vidtranslator/utils/utils.py deleted file mode 100644 index 292df1a7e66af9c99cd2a9c454dae05072099dc2..0000000000000000000000000000000000000000 --- a/spaces/altryne/vidtranslator/utils/utils.py +++ /dev/null @@ -1,25 +0,0 @@ -import whisper -import argparse -import os - -def str2bool(string): - str2val = {"True": True, "False": False} - if string in str2val: - return str2val[string] - else: - raise ValueError( - f"Expected one of {set(str2val.keys())}, got {string}") - -def get_args(): - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("--public", type=str2bool, default=False, - help="Wether to share with gradio public or not") - parser.add_argument("--preload", type=str2bool, default=True, - help="Should the model be preloaded on script launch. Disable for faster debug") - parser.add_argument("--model", default=os.environ.get('MODEL_SIZE', "medium"), - choices=whisper.available_models(), help="name of the Whisper model to use") - - args = parser.parse_args().__dict__ - return args - diff --git a/spaces/aodianyun/panoptic-segment-anything/segment_anything/linter.sh b/spaces/aodianyun/panoptic-segment-anything/segment_anything/linter.sh deleted file mode 100644 index df2e17436d30e89ff1728109301599f425f1ad6b..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/panoptic-segment-anything/segment_anything/linter.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -e -# Copyright (c) Facebook, Inc. and its affiliates. - -{ - black --version | grep -E "23\." > /dev/null -} || { - echo "Linter requires 'black==23.*' !" - exit 1 -} - -ISORT_VERSION=$(isort --version-number) -if [[ "$ISORT_VERSION" != 5.12* ]]; then - echo "Linter requires isort==5.12.0 !" - exit 1 -fi - -echo "Running isort ..." -isort . --atomic - -echo "Running black ..." -black -l 100 . - -echo "Running flake8 ..." -if [ -x "$(command -v flake8)" ]; then - flake8 . -else - python3 -m flake8 . -fi - -echo "Running mypy..." - -mypy --exclude 'setup.py|notebooks' . diff --git a/spaces/aodianyun/stable-diffusion-webui/javascript/ui.js b/spaces/aodianyun/stable-diffusion-webui/javascript/ui.js deleted file mode 100644 index b7a8268a8fcdf9821cb3af31efea9e0283da1bfe..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/stable-diffusion-webui/javascript/ui.js +++ /dev/null @@ -1,338 +0,0 @@ -// various functions for interaction with ui.py not large enough to warrant putting them in separate files - -function set_theme(theme){ - gradioURL = window.location.href - if (!gradioURL.includes('?__theme=')) { - window.location.replace(gradioURL + '?__theme=' + theme); - } -} - -function selected_gallery_index(){ - var buttons = gradioApp().querySelectorAll('[style="display: block;"].tabitem div[id$=_gallery] .gallery-item') - var button = gradioApp().querySelector('[style="display: block;"].tabitem div[id$=_gallery] .gallery-item.\\!ring-2') - - var result = -1 - buttons.forEach(function(v, i){ if(v==button) { result = i } }) - - return result -} - -function extract_image_from_gallery(gallery){ - if(gallery.length == 1){ - return [gallery[0]] - } - - index = selected_gallery_index() - - if (index < 0 || index >= gallery.length){ - return [null] - } - - return [gallery[index]]; -} - -function args_to_array(args){ - res = [] - for(var i=0;i label > textarea"); - - if(counter.parentElement == prompt.parentElement){ - return - } - - prompt.parentElement.insertBefore(counter, prompt) - counter.classList.add("token-counter") - prompt.parentElement.style.position = "relative" - - promptTokecountUpdateFuncs[id] = function(){ update_token_counter(id_button); } - textarea.addEventListener("input", promptTokecountUpdateFuncs[id]); - } - - registerTextarea('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button') - registerTextarea('txt2img_neg_prompt', 'txt2img_negative_token_counter', 'txt2img_negative_token_button') - registerTextarea('img2img_prompt', 'img2img_token_counter', 'img2img_token_button') - registerTextarea('img2img_neg_prompt', 'img2img_negative_token_counter', 'img2img_negative_token_button') - - show_all_pages = gradioApp().getElementById('settings_show_all_pages') - settings_tabs = gradioApp().querySelector('#settings div') - if(show_all_pages && settings_tabs){ - settings_tabs.appendChild(show_all_pages) - show_all_pages.onclick = function(){ - gradioApp().querySelectorAll('#settings > div').forEach(function(elem){ - elem.style.display = "block"; - }) - } - } -}) - -onOptionsChanged(function(){ - elem = gradioApp().getElementById('sd_checkpoint_hash') - sd_checkpoint_hash = opts.sd_checkpoint_hash || "" - shorthash = sd_checkpoint_hash.substr(0,10) - - if(elem && elem.textContent != shorthash){ - elem.textContent = shorthash - elem.title = sd_checkpoint_hash - elem.href = "https://google.com/search?q=" + sd_checkpoint_hash - } -}) - -let txt2img_textarea, img2img_textarea = undefined; -let wait_time = 800 -let token_timeouts = {}; - -function update_txt2img_tokens(...args) { - update_token_counter("txt2img_token_button") - if (args.length == 2) - return args[0] - return args; -} - -function update_img2img_tokens(...args) { - update_token_counter("img2img_token_button") - if (args.length == 2) - return args[0] - return args; -} - -function update_token_counter(button_id) { - if (token_timeouts[button_id]) - clearTimeout(token_timeouts[button_id]); - token_timeouts[button_id] = setTimeout(() => gradioApp().getElementById(button_id)?.click(), wait_time); -} - -function restart_reload(){ - document.body.innerHTML='

    Reloading...

    '; - setTimeout(function(){location.reload()},2000) - - return [] -} - -// Simulate an `input` DOM event for Gradio Textbox component. Needed after you edit its contents in javascript, otherwise your edits -// will only visible on web page and not sent to python. -function updateInput(target){ - let e = new Event("input", { bubbles: true }) - Object.defineProperty(e, "target", {value: target}) - target.dispatchEvent(e); -} - - -var desiredCheckpointName = null; -function selectCheckpoint(name){ - desiredCheckpointName = name; - gradioApp().getElementById('change_checkpoint').click() -} diff --git a/spaces/arch-123/bingo/tests/parse.ts b/spaces/arch-123/bingo/tests/parse.ts deleted file mode 100644 index 92940fe6315f1d7cb2b267ba5e5a7e26460a1de3..0000000000000000000000000000000000000000 --- a/spaces/arch-123/bingo/tests/parse.ts +++ /dev/null @@ -1,13 +0,0 @@ -import { promises as fs } from 'fs' -import { join } from 'path' -import { parseHeadersFromCurl } from '@/lib/utils' - -(async () => { - const content = await fs.readFile(join(__dirname, './fixtures/curl.txt'), 'utf-8') - const headers = parseHeadersFromCurl(content) - console.log(headers) - - const cmdContent = await fs.readFile(join(__dirname, './fixtures/cmd.txt'), 'utf-8') - const cmdHeaders = parseHeadersFromCurl(cmdContent) - console.log(cmdHeaders) -})() diff --git a/spaces/arshy/medicalspecialty/app.py b/spaces/arshy/medicalspecialty/app.py deleted file mode 100644 index 1d7abe13958d896951419c74eef9b878420fca51..0000000000000000000000000000000000000000 --- a/spaces/arshy/medicalspecialty/app.py +++ /dev/null @@ -1,27 +0,0 @@ -from fastai.text.all import * -import gradio as gr -from blurr.text.modeling.all import * - -learn = load_learner("model.pkl") - -def predict(inp:str): - preds = learn.blurr_predict([inp])[0] - - preds_dict = dict(zip(preds['class_labels'], preds['probs'])) - preds_dict = sorted(preds_dict.items(), key=operator.itemgetter(1), reverse=True)[:5] - - preds_df = pd.DataFrame(preds_dict, columns=['Specialty', 'Probability']) - preds_df['Probability'] = preds_df['Probability'].apply(lambda x: f"{x*100:.4f}%") - - return preds_df - -intf = gr.Interface(fn=predict, - inputs=gr.inputs.Textbox(label="What are the symptoms?"), - outputs=gr.outputs.Dataframe(), - title="Medical Specialty Classification from Symptoms", - description="Given a descriptive prompt of symptoms, the model classifies which medical specialty might the symptoms be related too.", - examples=["I have been having a headache for two weeks", - "I have rashes on my skin", - "I have been coughing for more than a month"] -) -intf.launch() diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/tacotron/common_layers.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/tacotron/common_layers.py deleted file mode 100644 index f78ff1e75f6c23eb1a0fe827247a1127bc8f9958..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/tacotron/common_layers.py +++ /dev/null @@ -1,119 +0,0 @@ -import torch -from torch import nn -from torch.nn import functional as F - - -class Linear(nn.Module): - """Linear layer with a specific initialization. - - Args: - in_features (int): number of channels in the input tensor. - out_features (int): number of channels in the output tensor. - bias (bool, optional): enable/disable bias in the layer. Defaults to True. - init_gain (str, optional): method to compute the gain in the weight initializtion based on the nonlinear activation used afterwards. Defaults to 'linear'. - """ - - def __init__(self, in_features, out_features, bias=True, init_gain="linear"): - super().__init__() - self.linear_layer = torch.nn.Linear(in_features, out_features, bias=bias) - self._init_w(init_gain) - - def _init_w(self, init_gain): - torch.nn.init.xavier_uniform_(self.linear_layer.weight, gain=torch.nn.init.calculate_gain(init_gain)) - - def forward(self, x): - return self.linear_layer(x) - - -class LinearBN(nn.Module): - """Linear layer with Batch Normalization. - - x -> linear -> BN -> o - - Args: - in_features (int): number of channels in the input tensor. - out_features (int ): number of channels in the output tensor. - bias (bool, optional): enable/disable bias in the linear layer. Defaults to True. - init_gain (str, optional): method to set the gain for weight initialization. Defaults to 'linear'. - """ - - def __init__(self, in_features, out_features, bias=True, init_gain="linear"): - super().__init__() - self.linear_layer = torch.nn.Linear(in_features, out_features, bias=bias) - self.batch_normalization = nn.BatchNorm1d(out_features, momentum=0.1, eps=1e-5) - self._init_w(init_gain) - - def _init_w(self, init_gain): - torch.nn.init.xavier_uniform_(self.linear_layer.weight, gain=torch.nn.init.calculate_gain(init_gain)) - - def forward(self, x): - """ - Shapes: - x: [T, B, C] or [B, C] - """ - out = self.linear_layer(x) - if len(out.shape) == 3: - out = out.permute(1, 2, 0) - out = self.batch_normalization(out) - if len(out.shape) == 3: - out = out.permute(2, 0, 1) - return out - - -class Prenet(nn.Module): - """Tacotron specific Prenet with an optional Batch Normalization. - - Note: - Prenet with BN improves the model performance significantly especially - if it is enabled after learning a diagonal attention alignment with the original - prenet. However, if the target dataset is high quality then it also works from - the start. It is also suggested to disable dropout if BN is in use. - - prenet_type == "original" - x -> [linear -> ReLU -> Dropout]xN -> o - - prenet_type == "bn" - x -> [linear -> BN -> ReLU -> Dropout]xN -> o - - Args: - in_features (int): number of channels in the input tensor and the inner layers. - prenet_type (str, optional): prenet type "original" or "bn". Defaults to "original". - prenet_dropout (bool, optional): dropout rate. Defaults to True. - dropout_at_inference (bool, optional): use dropout at inference. It leads to a better quality for some models. - out_features (list, optional): List of output channels for each prenet block. - It also defines number of the prenet blocks based on the length of argument list. - Defaults to [256, 256]. - bias (bool, optional): enable/disable bias in prenet linear layers. Defaults to True. - """ - - # pylint: disable=dangerous-default-value - def __init__( - self, - in_features, - prenet_type="original", - prenet_dropout=True, - dropout_at_inference=False, - out_features=[256, 256], - bias=True, - ): - super().__init__() - self.prenet_type = prenet_type - self.prenet_dropout = prenet_dropout - self.dropout_at_inference = dropout_at_inference - in_features = [in_features] + out_features[:-1] - if prenet_type == "bn": - self.linear_layers = nn.ModuleList( - [LinearBN(in_size, out_size, bias=bias) for (in_size, out_size) in zip(in_features, out_features)] - ) - elif prenet_type == "original": - self.linear_layers = nn.ModuleList( - [Linear(in_size, out_size, bias=bias) for (in_size, out_size) in zip(in_features, out_features)] - ) - - def forward(self, x): - for linear in self.linear_layers: - if self.prenet_dropout: - x = F.dropout(F.relu(linear(x)), p=0.5, training=self.training or self.dropout_at_inference) - else: - x = F.relu(linear(x)) - return x diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/xtts/perceiver_encoder.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/xtts/perceiver_encoder.py deleted file mode 100644 index 7b7ee79b5018c80ad04c5766e7cd446862097c09..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/xtts/perceiver_encoder.py +++ /dev/null @@ -1,319 +0,0 @@ -# Adapted from https://github.com/lucidrains/naturalspeech2-pytorch/blob/659bec7f7543e7747e809e950cc2f84242fbeec7/naturalspeech2_pytorch/naturalspeech2_pytorch.py#L532 - -from collections import namedtuple -from functools import wraps - -import torch -import torch.nn.functional as F -from einops import rearrange, repeat -from einops.layers.torch import Rearrange -from packaging import version -from torch import einsum, nn - - -def exists(val): - return val is not None - - -def once(fn): - called = False - - @wraps(fn) - def inner(x): - nonlocal called - if called: - return - called = True - return fn(x) - - return inner - - -print_once = once(print) - -# main class - - -class Attend(nn.Module): - def __init__(self, dropout=0.0, causal=False, use_flash=False): - super().__init__() - self.dropout = dropout - self.attn_dropout = nn.Dropout(dropout) - - self.causal = causal - self.register_buffer("mask", None, persistent=False) - - self.use_flash = use_flash - assert not ( - use_flash and version.parse(torch.__version__) < version.parse("2.0.0") - ), "in order to use flash attention, you must be using pytorch 2.0 or above" - - # determine efficient attention configs for cuda and cpu - self.config = namedtuple("EfficientAttentionConfig", ["enable_flash", "enable_math", "enable_mem_efficient"]) - self.cpu_config = self.config(True, True, True) - self.cuda_config = None - - if not torch.cuda.is_available() or not use_flash: - return - - device_properties = torch.cuda.get_device_properties(torch.device("cuda")) - - if device_properties.major == 8 and device_properties.minor == 0: - print_once("A100 GPU detected, using flash attention if input tensor is on cuda") - self.cuda_config = self.config(True, False, False) - else: - print_once("Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda") - self.cuda_config = self.config(False, True, True) - - def get_mask(self, n, device): - if exists(self.mask) and self.mask.shape[-1] >= n: - return self.mask[:n, :n] - - mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1) - self.register_buffer("mask", mask, persistent=False) - return mask - - def flash_attn(self, q, k, v, mask=None): - _, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda - - # Recommended for multi-query single-key-value attention by Tri Dao - # kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64]) - - if k.ndim == 3: - k = rearrange(k, "b ... -> b 1 ...").expand_as(q) - - if v.ndim == 3: - v = rearrange(v, "b ... -> b 1 ...").expand_as(q) - - # Check if mask exists and expand to compatible shape - # The mask is B L, so it would have to be expanded to B H N L - - if exists(mask): - mask = rearrange(mask, "b j -> b 1 1 j") - mask = mask.expand(-1, heads, q_len, -1) - - # Check if there is a compatible device for flash attention - - config = self.cuda_config if is_cuda else self.cpu_config - - # pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale - - with torch.backends.cuda.sdp_kernel(**config._asdict()): - out = F.scaled_dot_product_attention( - q, k, v, attn_mask=mask, dropout_p=self.dropout if self.training else 0.0, is_causal=self.causal - ) - - return out - - def forward(self, q, k, v, mask=None): - """ - einstein notation - b - batch - h - heads - n, i, j - sequence length (base sequence length, source, target) - d - feature dimension - """ - - n, device = q.shape[-2], q.device - - scale = q.shape[-1] ** -0.5 - - if self.use_flash: - return self.flash_attn(q, k, v, mask=mask) - - kv_einsum_eq = "b j d" if k.ndim == 3 else "b h j d" - - # similarity - - sim = einsum(f"b h i d, {kv_einsum_eq} -> b h i j", q, k) * scale - - # key padding mask - - if exists(mask): - mask = rearrange(mask, "b j -> b 1 1 j") - sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max) - - # causal mask - - if self.causal: - causal_mask = self.get_mask(n, device) - sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max) - - # attention - - attn = sim.softmax(dim=-1) - attn = self.attn_dropout(attn) - - # aggregate values - - out = einsum(f"b h i j, {kv_einsum_eq} -> b h i d", attn, v) - - return out - - -def Sequential(*mods): - return nn.Sequential(*filter(exists, mods)) - - -def exists(x): - return x is not None - - -def default(val, d): - if exists(val): - return val - return d() if callable(d) else d - - -class RMSNorm(nn.Module): - def __init__(self, dim, scale=True, dim_cond=None): - super().__init__() - self.cond = exists(dim_cond) - self.to_gamma_beta = nn.Linear(dim_cond, dim * 2) if self.cond else None - - self.scale = dim**0.5 - self.gamma = nn.Parameter(torch.ones(dim)) if scale else None - - def forward(self, x, cond=None): - gamma = default(self.gamma, 1) - out = F.normalize(x, dim=-1) * self.scale * gamma - - if not self.cond: - return out - - assert exists(cond) - gamma, beta = self.to_gamma_beta(cond).chunk(2, dim=-1) - gamma, beta = map(lambda t: rearrange(t, "b d -> b 1 d"), (gamma, beta)) - return out * gamma + beta - - -class CausalConv1d(nn.Conv1d): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - (kernel_size,) = self.kernel_size - (dilation,) = self.dilation - (stride,) = self.stride - - assert stride == 1 - self.causal_padding = dilation * (kernel_size - 1) - - def forward(self, x): - causal_padded_x = F.pad(x, (self.causal_padding, 0), value=0.0) - return super().forward(causal_padded_x) - - -class GEGLU(nn.Module): - def forward(self, x): - x, gate = x.chunk(2, dim=-1) - return F.gelu(gate) * x - - -def FeedForward(dim, mult=4, causal_conv=False): - dim_inner = int(dim * mult * 2 / 3) - - conv = None - if causal_conv: - conv = nn.Sequential( - Rearrange("b n d -> b d n"), - CausalConv1d(dim_inner, dim_inner, 3), - Rearrange("b d n -> b n d"), - ) - - return Sequential(nn.Linear(dim, dim_inner * 2), GEGLU(), conv, nn.Linear(dim_inner, dim)) - - -class PerceiverResampler(nn.Module): - def __init__( - self, - *, - dim, - depth=2, - dim_context=None, - num_latents=32, - dim_head=64, - heads=8, - ff_mult=4, - use_flash_attn=False, - ): - super().__init__() - dim_context = default(dim_context, dim) - - self.proj_context = nn.Linear(dim_context, dim) if dim_context != dim else nn.Identity() - - self.latents = nn.Parameter(torch.randn(num_latents, dim)) - nn.init.normal_(self.latents, std=0.02) - - self.layers = nn.ModuleList([]) - for _ in range(depth): - self.layers.append( - nn.ModuleList( - [ - Attention( - dim=dim, - dim_head=dim_head, - heads=heads, - use_flash=use_flash_attn, - cross_attn_include_queries=True, - ), - FeedForward(dim=dim, mult=ff_mult), - ] - ) - ) - - self.norm = RMSNorm(dim) - - def forward(self, x, mask=None): - batch = x.shape[0] - - x = self.proj_context(x) - - latents = repeat(self.latents, "n d -> b n d", b=batch) - - for attn, ff in self.layers: - latents = attn(latents, x, mask=mask) + latents - latents = ff(latents) + latents - - return self.norm(latents) - - -class Attention(nn.Module): - def __init__( - self, - dim, - *, - dim_context=None, - causal=False, - dim_head=64, - heads=8, - dropout=0.0, - use_flash=False, - cross_attn_include_queries=False, - ): - super().__init__() - self.scale = dim_head**-0.5 - self.heads = heads - self.cross_attn_include_queries = cross_attn_include_queries - - dim_inner = dim_head * heads - dim_context = default(dim_context, dim) - - self.attend = Attend(causal=causal, dropout=dropout, use_flash=use_flash) - self.to_q = nn.Linear(dim, dim_inner, bias=False) - self.to_kv = nn.Linear(dim_context, dim_inner * 2, bias=False) - self.to_out = nn.Linear(dim_inner, dim, bias=False) - - def forward(self, x, context=None, mask=None): - h, has_context = self.heads, exists(context) - - context = default(context, x) - - if has_context and self.cross_attn_include_queries: - context = torch.cat((x, context), dim=-2) - - q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim=-1)) - q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=h), (q, k, v)) - - out = self.attend(q, k, v, mask=mask) - - out = rearrange(out, "b h n d -> b n (h d)") - return self.to_out(out) diff --git a/spaces/artificialguybr/video-dubbing/TTS/tests/tts_tests/test_vits_multilingual_train-d_vectors.py b/spaces/artificialguybr/video-dubbing/TTS/tests/tts_tests/test_vits_multilingual_train-d_vectors.py deleted file mode 100644 index fd58db534af914849f30ca821436f3aaabceabb8..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/tests/tts_tests/test_vits_multilingual_train-d_vectors.py +++ /dev/null @@ -1,117 +0,0 @@ -import glob -import json -import os -import shutil - -from trainer import get_last_checkpoint - -from tests import get_device_id, get_tests_output_path, run_cli -from TTS.config.shared_configs import BaseDatasetConfig -from TTS.tts.configs.vits_config import VitsConfig - -config_path = os.path.join(get_tests_output_path(), "test_model_config.json") -output_path = os.path.join(get_tests_output_path(), "train_outputs") - - -dataset_config_en = BaseDatasetConfig( - formatter="ljspeech_test", - meta_file_train="metadata.csv", - meta_file_val="metadata.csv", - path="tests/data/ljspeech", - language="en", -) - -dataset_config_pt = BaseDatasetConfig( - formatter="ljspeech_test", - meta_file_train="metadata.csv", - meta_file_val="metadata.csv", - path="tests/data/ljspeech", - language="pt-br", -) - -config = VitsConfig( - batch_size=2, - eval_batch_size=2, - num_loader_workers=0, - num_eval_loader_workers=0, - text_cleaner="multilingual_cleaners", - use_phonemes=False, - phoneme_cache_path="tests/data/ljspeech/phoneme_cache/", - run_eval=True, - test_delay_epochs=-1, - epochs=1, - print_step=1, - print_eval=True, - test_sentences=[ - ["Be a voice, not an echo.", "ljspeech-0", None, "en"], - ["Be a voice, not an echo.", "ljspeech-1", None, "pt-br"], - ], - datasets=[dataset_config_en, dataset_config_en, dataset_config_en, dataset_config_pt], -) -# set audio config -config.audio.do_trim_silence = True -config.audio.trim_db = 60 - -# active multilingual mode -config.model_args.use_language_embedding = True -config.use_language_embedding = True - -# deactivate multispeaker mode -config.model_args.use_speaker_embedding = False -config.use_speaker_embedding = False - -# active multispeaker d-vec mode -config.model_args.use_d_vector_file = True -config.use_d_vector_file = True -config.model_args.d_vector_file = ["tests/data/ljspeech/speakers.json"] -config.d_vector_file = ["tests/data/ljspeech/speakers.json"] -config.model_args.d_vector_dim = 256 -config.d_vector_dim = 256 - -# duration predictor -config.model_args.use_sdp = True -config.use_sdp = True - -# activate language and speaker samplers -config.use_language_weighted_sampler = True -config.language_weighted_sampler_alpha = 10 -config.use_speaker_weighted_sampler = True -config.speaker_weighted_sampler_alpha = 5 - -config.save_json(config_path) - -# train the model for one epoch -command_train = ( - f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --config_path {config_path} " - f"--coqpit.output_path {output_path} " - "--coqpit.test_delay_epochs 0" -) -run_cli(command_train) - -# Find latest folder -continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime) - -# Inference using TTS API -continue_config_path = os.path.join(continue_path, "config.json") -continue_restore_path, _ = get_last_checkpoint(continue_path) -out_wav_path = os.path.join(get_tests_output_path(), "output.wav") -speaker_id = "ljspeech-1" -languae_id = "en" -continue_speakers_path = config.d_vector_file -continue_languages_path = os.path.join(continue_path, "language_ids.json") - -# Check integrity of the config -with open(continue_config_path, "r", encoding="utf-8") as f: - config_loaded = json.load(f) -assert config_loaded["characters"] is not None -assert config_loaded["output_path"] in continue_path -assert config_loaded["test_delay_epochs"] == 0 - -# Load the model and run inference -inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --language_ids_file_path {continue_languages_path} --language_idx {languae_id} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}" -run_cli(inference_command) - -# restore the model and continue training for one more epoch -command_train = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --continue_path {continue_path} " -run_cli(command_train) -shutil.rmtree(continue_path) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Utility/ObjectHandling.c b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Utility/ObjectHandling.c deleted file mode 100644 index 864b658f7453abcb8077409a88d48c616cf7928d..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Utility/ObjectHandling.c +++ /dev/null @@ -1,2504 +0,0 @@ -/* - * General object operations and protocol implementations, - * including their specialisations for certain builtins. - * - * Optional optimisations for builtins are in Optimize.c. - * - * Required replacements of builtins are in Builtins.c. - */ - -/////////////// RaiseNoneIterError.proto /////////////// - -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); - -/////////////// RaiseNoneIterError /////////////// - -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); -} - -/////////////// RaiseTooManyValuesToUnpack.proto /////////////// - -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); - -/////////////// RaiseTooManyValuesToUnpack /////////////// - -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { - PyErr_Format(PyExc_ValueError, - "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); -} - -/////////////// RaiseNeedMoreValuesToUnpack.proto /////////////// - -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); - -/////////////// RaiseNeedMoreValuesToUnpack /////////////// - -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { - PyErr_Format(PyExc_ValueError, - "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", - index, (index == 1) ? "" : "s"); -} - -/////////////// UnpackTupleError.proto /////////////// - -static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /*proto*/ - -/////////////// UnpackTupleError /////////////// -//@requires: RaiseNoneIterError -//@requires: RaiseNeedMoreValuesToUnpack -//@requires: RaiseTooManyValuesToUnpack - -static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) { - if (t == Py_None) { - __Pyx_RaiseNoneNotIterableError(); - } else if (PyTuple_GET_SIZE(t) < index) { - __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t)); - } else { - __Pyx_RaiseTooManyValuesError(index); - } -} - -/////////////// UnpackItemEndCheck.proto /////////////// - -static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ - -/////////////// UnpackItemEndCheck /////////////// -//@requires: RaiseTooManyValuesToUnpack -//@requires: IterFinish - -static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { - if (unlikely(retval)) { - Py_DECREF(retval); - __Pyx_RaiseTooManyValuesError(expected); - return -1; - } else { - return __Pyx_IterFinish(); - } - return 0; -} - -/////////////// UnpackTuple2.proto /////////////// - -#define __Pyx_unpack_tuple2(tuple, value1, value2, is_tuple, has_known_size, decref_tuple) \ - (likely(is_tuple || PyTuple_Check(tuple)) ? \ - (likely(has_known_size || PyTuple_GET_SIZE(tuple) == 2) ? \ - __Pyx_unpack_tuple2_exact(tuple, value1, value2, decref_tuple) : \ - (__Pyx_UnpackTupleError(tuple, 2), -1)) : \ - __Pyx_unpack_tuple2_generic(tuple, value1, value2, has_known_size, decref_tuple)) - -static CYTHON_INLINE int __Pyx_unpack_tuple2_exact( - PyObject* tuple, PyObject** value1, PyObject** value2, int decref_tuple); -static int __Pyx_unpack_tuple2_generic( - PyObject* tuple, PyObject** value1, PyObject** value2, int has_known_size, int decref_tuple); - -/////////////// UnpackTuple2 /////////////// -//@requires: UnpackItemEndCheck -//@requires: UnpackTupleError -//@requires: RaiseNeedMoreValuesToUnpack - -static CYTHON_INLINE int __Pyx_unpack_tuple2_exact( - PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2, int decref_tuple) { - PyObject *value1 = NULL, *value2 = NULL; -#if CYTHON_COMPILING_IN_PYPY - value1 = PySequence_ITEM(tuple, 0); if (unlikely(!value1)) goto bad; - value2 = PySequence_ITEM(tuple, 1); if (unlikely(!value2)) goto bad; -#else - value1 = PyTuple_GET_ITEM(tuple, 0); Py_INCREF(value1); - value2 = PyTuple_GET_ITEM(tuple, 1); Py_INCREF(value2); -#endif - if (decref_tuple) { - Py_DECREF(tuple); - } - - *pvalue1 = value1; - *pvalue2 = value2; - return 0; -#if CYTHON_COMPILING_IN_PYPY -bad: - Py_XDECREF(value1); - Py_XDECREF(value2); - if (decref_tuple) { Py_XDECREF(tuple); } - return -1; -#endif -} - -static int __Pyx_unpack_tuple2_generic(PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2, - int has_known_size, int decref_tuple) { - Py_ssize_t index; - PyObject *value1 = NULL, *value2 = NULL, *iter = NULL; - iternextfunc iternext; - - iter = PyObject_GetIter(tuple); - if (unlikely(!iter)) goto bad; - if (decref_tuple) { Py_DECREF(tuple); tuple = NULL; } - - iternext = Py_TYPE(iter)->tp_iternext; - value1 = iternext(iter); if (unlikely(!value1)) { index = 0; goto unpacking_failed; } - value2 = iternext(iter); if (unlikely(!value2)) { index = 1; goto unpacking_failed; } - if (!has_known_size && unlikely(__Pyx_IternextUnpackEndCheck(iternext(iter), 2))) goto bad; - - Py_DECREF(iter); - *pvalue1 = value1; - *pvalue2 = value2; - return 0; - -unpacking_failed: - if (!has_known_size && __Pyx_IterFinish() == 0) - __Pyx_RaiseNeedMoreValuesError(index); -bad: - Py_XDECREF(iter); - Py_XDECREF(value1); - Py_XDECREF(value2); - if (decref_tuple) { Py_XDECREF(tuple); } - return -1; -} - - -/////////////// IterNext.proto /////////////// - -#define __Pyx_PyIter_Next(obj) __Pyx_PyIter_Next2(obj, NULL) -static CYTHON_INLINE PyObject *__Pyx_PyIter_Next2(PyObject *, PyObject *); /*proto*/ - -/////////////// IterNext /////////////// -//@requires: Exceptions.c::PyThreadStateGet -//@requires: Exceptions.c::PyErrFetchRestore - -static PyObject *__Pyx_PyIter_Next2Default(PyObject* defval) { - PyObject* exc_type; - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - exc_type = __Pyx_PyErr_Occurred(); - if (unlikely(exc_type)) { - if (!defval || unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) - return NULL; - __Pyx_PyErr_Clear(); - Py_INCREF(defval); - return defval; - } - if (defval) { - Py_INCREF(defval); - return defval; - } - __Pyx_PyErr_SetNone(PyExc_StopIteration); - return NULL; -} - -static void __Pyx_PyIter_Next_ErrorNoIterator(PyObject *iterator) { - PyErr_Format(PyExc_TypeError, - "%.200s object is not an iterator", Py_TYPE(iterator)->tp_name); -} - -// originally copied from Py3's builtin_next() -static CYTHON_INLINE PyObject *__Pyx_PyIter_Next2(PyObject* iterator, PyObject* defval) { - PyObject* next; - // We always do a quick slot check because calling PyIter_Check() is so wasteful. - iternextfunc iternext = Py_TYPE(iterator)->tp_iternext; - if (likely(iternext)) { -#if CYTHON_USE_TYPE_SLOTS - next = iternext(iterator); - if (likely(next)) - return next; - #if PY_VERSION_HEX >= 0x02070000 - if (unlikely(iternext == &_PyObject_NextNotImplemented)) - return NULL; - #endif -#else - // Since the slot was set, assume that PyIter_Next() will likely succeed, and properly fail otherwise. - // Note: PyIter_Next() crashes in CPython if "tp_iternext" is NULL. - next = PyIter_Next(iterator); - if (likely(next)) - return next; -#endif - } else if (CYTHON_USE_TYPE_SLOTS || unlikely(!PyIter_Check(iterator))) { - // If CYTHON_USE_TYPE_SLOTS, then the slot was not set and we don't have an iterable. - // Otherwise, don't trust "tp_iternext" and rely on PyIter_Check(). - __Pyx_PyIter_Next_ErrorNoIterator(iterator); - return NULL; - } -#if !CYTHON_USE_TYPE_SLOTS - else { - // We have an iterator with an empty "tp_iternext", but didn't call next() on it yet. - next = PyIter_Next(iterator); - if (likely(next)) - return next; - } -#endif - return __Pyx_PyIter_Next2Default(defval); -} - -/////////////// IterFinish.proto /////////////// - -static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/ - -/////////////// IterFinish /////////////// - -// When PyIter_Next(iter) has returned NULL in order to signal termination, -// this function does the right cleanup and returns 0 on success. If it -// detects an error that occurred in the iterator, it returns -1. - -static CYTHON_INLINE int __Pyx_IterFinish(void) { -#if CYTHON_FAST_THREAD_STATE - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject* exc_type = tstate->curexc_type; - if (unlikely(exc_type)) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) { - PyObject *exc_value, *exc_tb; - exc_value = tstate->curexc_value; - exc_tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; - Py_DECREF(exc_type); - Py_XDECREF(exc_value); - Py_XDECREF(exc_tb); - return 0; - } else { - return -1; - } - } - return 0; -#else - if (unlikely(PyErr_Occurred())) { - if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { - PyErr_Clear(); - return 0; - } else { - return -1; - } - } - return 0; -#endif -} - - -/////////////// ObjectGetItem.proto /////////////// - -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);/*proto*/ -#else -#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) -#endif - -/////////////// ObjectGetItem /////////////// -// //@requires: GetItemInt - added in IndexNode as it uses templating. - -#if CYTHON_USE_TYPE_SLOTS -static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { - PyObject *runerr; - Py_ssize_t key_value; - PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; - if (unlikely(!(m && m->sq_item))) { - PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); - return NULL; - } - - key_value = __Pyx_PyIndex_AsSsize_t(index); - if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { - return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); - } - - // Error handling code -- only manage OverflowError differently. - if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { - PyErr_Clear(); - PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); - } - return NULL; -} - -static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { - PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; - if (likely(m && m->mp_subscript)) { - return m->mp_subscript(obj, key); - } - return __Pyx_PyObject_GetIndex(obj, key); -} -#endif - - -/////////////// DictGetItem.proto /////////////// - -#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY -static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key);/*proto*/ - -#define __Pyx_PyObject_Dict_GetItem(obj, name) \ - (likely(PyDict_CheckExact(obj)) ? \ - __Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name)) - -#else -#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) -#define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name) -#endif - -/////////////// DictGetItem /////////////// - -#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY -static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { - PyObject *value; - value = PyDict_GetItemWithError(d, key); - if (unlikely(!value)) { - if (!PyErr_Occurred()) { - if (unlikely(PyTuple_Check(key))) { - // CPython interprets tuples as separate arguments => must wrap them in another tuple. - PyObject* args = PyTuple_Pack(1, key); - if (likely(args)) { - PyErr_SetObject(PyExc_KeyError, args); - Py_DECREF(args); - } - } else { - // Avoid tuple packing if possible. - PyErr_SetObject(PyExc_KeyError, key); - } - } - return NULL; - } - Py_INCREF(value); - return value; -} -#endif - -/////////////// GetItemInt.proto /////////////// - -#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ - __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) : \ - (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) : \ - __Pyx_GetItemInt_Generic(o, to_py_func(i)))) - -{{for type in ['List', 'Tuple']}} -#define __Pyx_GetItemInt_{{type}}(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ - __Pyx_GetItemInt_{{type}}_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \ - (PyErr_SetString(PyExc_IndexError, "{{ type.lower() }} index out of range"), (PyObject*)NULL)) - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_{{type}}_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -{{endfor}} - -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, - int is_list, int wraparound, int boundscheck); - -/////////////// GetItemInt /////////////// - -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { - PyObject *r; - if (!j) return NULL; - r = PyObject_GetItem(o, j); - Py_DECREF(j); - return r; -} - -{{for type in ['List', 'Tuple']}} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_{{type}}_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += Py{{type}}_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, Py{{type}}_GET_SIZE(o)))) { - PyObject *r = Py{{type}}_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -{{endfor}} - -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS - if (is_list || PyList_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); - if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { - PyObject *r = PyList_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } - else if (PyTuple_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); - if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } else { - // inlined PySequence_GetItem() + special cased length overflow - PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; - if (likely(m && m->sq_item)) { - if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { - Py_ssize_t l = m->sq_length(o); - if (likely(l >= 0)) { - i += l; - } else { - // if length > max(Py_ssize_t), maybe the object can wrap around itself? - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - return NULL; - PyErr_Clear(); - } - } - return m->sq_item(o, i); - } - } -#else - if (is_list || PySequence_Check(o)) { - return PySequence_GetItem(o, i); - } -#endif - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - -/////////////// SetItemInt.proto /////////////// - -#define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ - __Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) : \ - (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) : \ - __Pyx_SetItemInt_Generic(o, to_py_func(i), v))) - -static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v); -static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, - int is_list, int wraparound, int boundscheck); - -/////////////// SetItemInt /////////////// - -static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { - int r; - if (!j) return -1; - r = PyObject_SetItem(o, j, v); - Py_DECREF(j); - return r; -} - -static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, - CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS - if (is_list || PyList_CheckExact(o)) { - Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o)); - if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o)))) { - PyObject* old = PyList_GET_ITEM(o, n); - Py_INCREF(v); - PyList_SET_ITEM(o, n, v); - Py_DECREF(old); - return 1; - } - } else { - // inlined PySequence_SetItem() + special cased length overflow - PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; - if (likely(m && m->sq_ass_item)) { - if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { - Py_ssize_t l = m->sq_length(o); - if (likely(l >= 0)) { - i += l; - } else { - // if length > max(Py_ssize_t), maybe the object can wrap around itself? - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - return -1; - PyErr_Clear(); - } - } - return m->sq_ass_item(o, i, v); - } - } -#else -#if CYTHON_COMPILING_IN_PYPY - if (is_list || (PySequence_Check(o) && !PyDict_Check(o))) -#else - if (is_list || PySequence_Check(o)) -#endif - { - return PySequence_SetItem(o, i, v); - } -#endif - return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v); -} - - -/////////////// DelItemInt.proto /////////////// - -#define __Pyx_DelItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ - __Pyx_DelItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound) : \ - (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) : \ - __Pyx_DelItem_Generic(o, to_py_func(i)))) - -static int __Pyx_DelItem_Generic(PyObject *o, PyObject *j); -static CYTHON_INLINE int __Pyx_DelItemInt_Fast(PyObject *o, Py_ssize_t i, - int is_list, int wraparound); - -/////////////// DelItemInt /////////////// - -static int __Pyx_DelItem_Generic(PyObject *o, PyObject *j) { - int r; - if (!j) return -1; - r = PyObject_DelItem(o, j); - Py_DECREF(j); - return r; -} - -static CYTHON_INLINE int __Pyx_DelItemInt_Fast(PyObject *o, Py_ssize_t i, - CYTHON_UNUSED int is_list, CYTHON_NCP_UNUSED int wraparound) { -#if !CYTHON_USE_TYPE_SLOTS - if (is_list || PySequence_Check(o)) { - return PySequence_DelItem(o, i); - } -#else - // inlined PySequence_DelItem() + special cased length overflow - PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; - if (likely(m && m->sq_ass_item)) { - if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { - Py_ssize_t l = m->sq_length(o); - if (likely(l >= 0)) { - i += l; - } else { - // if length > max(Py_ssize_t), maybe the object can wrap around itself? - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - return -1; - PyErr_Clear(); - } - } - return m->sq_ass_item(o, i, (PyObject *)NULL); - } -#endif - return __Pyx_DelItem_Generic(o, PyInt_FromSsize_t(i)); -} - - -/////////////// SliceObject.proto /////////////// - -// we pass pointer addresses to show the C compiler what is NULL and what isn't -{{if access == 'Get'}} -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice( - PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, - PyObject** py_start, PyObject** py_stop, PyObject** py_slice, - int has_cstart, int has_cstop, int wraparound); -{{else}} -#define __Pyx_PyObject_DelSlice(obj, cstart, cstop, py_start, py_stop, py_slice, has_cstart, has_cstop, wraparound) \ - __Pyx_PyObject_SetSlice(obj, (PyObject*)NULL, cstart, cstop, py_start, py_stop, py_slice, has_cstart, has_cstop, wraparound) - -// we pass pointer addresses to show the C compiler what is NULL and what isn't -static CYTHON_INLINE int __Pyx_PyObject_SetSlice( - PyObject* obj, PyObject* value, Py_ssize_t cstart, Py_ssize_t cstop, - PyObject** py_start, PyObject** py_stop, PyObject** py_slice, - int has_cstart, int has_cstop, int wraparound); -{{endif}} - -/////////////// SliceObject /////////////// - -{{if access == 'Get'}} -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj, -{{else}} -static CYTHON_INLINE int __Pyx_PyObject_SetSlice(PyObject* obj, PyObject* value, -{{endif}} - Py_ssize_t cstart, Py_ssize_t cstop, - PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice, - int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) { -#if CYTHON_USE_TYPE_SLOTS - PyMappingMethods* mp; -#if PY_MAJOR_VERSION < 3 - PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence; - if (likely(ms && ms->sq_{{if access == 'Set'}}ass_{{endif}}slice)) { - if (!has_cstart) { - if (_py_start && (*_py_start != Py_None)) { - cstart = __Pyx_PyIndex_AsSsize_t(*_py_start); - if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; - } else - cstart = 0; - } - if (!has_cstop) { - if (_py_stop && (*_py_stop != Py_None)) { - cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop); - if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; - } else - cstop = PY_SSIZE_T_MAX; - } - if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) { - Py_ssize_t l = ms->sq_length(obj); - if (likely(l >= 0)) { - if (cstop < 0) { - cstop += l; - if (cstop < 0) cstop = 0; - } - if (cstart < 0) { - cstart += l; - if (cstart < 0) cstart = 0; - } - } else { - // if length > max(Py_ssize_t), maybe the object can wrap around itself? - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - goto bad; - PyErr_Clear(); - } - } -{{if access == 'Get'}} - return ms->sq_slice(obj, cstart, cstop); -{{else}} - return ms->sq_ass_slice(obj, cstart, cstop, value); -{{endif}} - } -#endif - - mp = Py_TYPE(obj)->tp_as_mapping; -{{if access == 'Get'}} - if (likely(mp && mp->mp_subscript)) -{{else}} - if (likely(mp && mp->mp_ass_subscript)) -{{endif}} -#endif - { - {{if access == 'Get'}}PyObject*{{else}}int{{endif}} result; - PyObject *py_slice, *py_start, *py_stop; - if (_py_slice) { - py_slice = *_py_slice; - } else { - PyObject* owned_start = NULL; - PyObject* owned_stop = NULL; - if (_py_start) { - py_start = *_py_start; - } else { - if (has_cstart) { - owned_start = py_start = PyInt_FromSsize_t(cstart); - if (unlikely(!py_start)) goto bad; - } else - py_start = Py_None; - } - if (_py_stop) { - py_stop = *_py_stop; - } else { - if (has_cstop) { - owned_stop = py_stop = PyInt_FromSsize_t(cstop); - if (unlikely(!py_stop)) { - Py_XDECREF(owned_start); - goto bad; - } - } else - py_stop = Py_None; - } - py_slice = PySlice_New(py_start, py_stop, Py_None); - Py_XDECREF(owned_start); - Py_XDECREF(owned_stop); - if (unlikely(!py_slice)) goto bad; - } -#if CYTHON_USE_TYPE_SLOTS -{{if access == 'Get'}} - result = mp->mp_subscript(obj, py_slice); -#else - result = PyObject_GetItem(obj, py_slice); -{{else}} - result = mp->mp_ass_subscript(obj, py_slice, value); -#else - result = value ? PyObject_SetItem(obj, py_slice, value) : PyObject_DelItem(obj, py_slice); -{{endif}} -#endif - if (!_py_slice) { - Py_DECREF(py_slice); - } - return result; - } - PyErr_Format(PyExc_TypeError, -{{if access == 'Get'}} - "'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name); -{{else}} - "'%.200s' object does not support slice %.10s", - Py_TYPE(obj)->tp_name, value ? "assignment" : "deletion"); -{{endif}} - -bad: - return {{if access == 'Get'}}NULL{{else}}-1{{endif}}; -} - - -/////////////// SliceTupleAndList.proto /////////////// - -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyList_GetSlice(PyObject* src, Py_ssize_t start, Py_ssize_t stop); -static CYTHON_INLINE PyObject* __Pyx_PyTuple_GetSlice(PyObject* src, Py_ssize_t start, Py_ssize_t stop); -#else -#define __Pyx_PyList_GetSlice(seq, start, stop) PySequence_GetSlice(seq, start, stop) -#define __Pyx_PyTuple_GetSlice(seq, start, stop) PySequence_GetSlice(seq, start, stop) -#endif - -/////////////// SliceTupleAndList /////////////// - -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE void __Pyx_crop_slice(Py_ssize_t* _start, Py_ssize_t* _stop, Py_ssize_t* _length) { - Py_ssize_t start = *_start, stop = *_stop, length = *_length; - if (start < 0) { - start += length; - if (start < 0) - start = 0; - } - - if (stop < 0) - stop += length; - else if (stop > length) - stop = length; - - *_length = stop - start; - *_start = start; - *_stop = stop; -} - -static CYTHON_INLINE void __Pyx_copy_object_array(PyObject** CYTHON_RESTRICT src, PyObject** CYTHON_RESTRICT dest, Py_ssize_t length) { - PyObject *v; - Py_ssize_t i; - for (i = 0; i < length; i++) { - v = dest[i] = src[i]; - Py_INCREF(v); - } -} - -{{for type in ['List', 'Tuple']}} -static CYTHON_INLINE PyObject* __Pyx_Py{{type}}_GetSlice( - PyObject* src, Py_ssize_t start, Py_ssize_t stop) { - PyObject* dest; - Py_ssize_t length = Py{{type}}_GET_SIZE(src); - __Pyx_crop_slice(&start, &stop, &length); - if (unlikely(length <= 0)) - return Py{{type}}_New(0); - - dest = Py{{type}}_New(length); - if (unlikely(!dest)) - return NULL; - __Pyx_copy_object_array( - ((Py{{type}}Object*)src)->ob_item + start, - ((Py{{type}}Object*)dest)->ob_item, - length); - return dest; -} -{{endfor}} -#endif - - -/////////////// CalculateMetaclass.proto /////////////// - -static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases); - -/////////////// CalculateMetaclass /////////////// - -static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases) { - Py_ssize_t i, nbases = PyTuple_GET_SIZE(bases); - for (i=0; i < nbases; i++) { - PyTypeObject *tmptype; - PyObject *tmp = PyTuple_GET_ITEM(bases, i); - tmptype = Py_TYPE(tmp); -#if PY_MAJOR_VERSION < 3 - if (tmptype == &PyClass_Type) - continue; -#endif - if (!metaclass) { - metaclass = tmptype; - continue; - } - if (PyType_IsSubtype(metaclass, tmptype)) - continue; - if (PyType_IsSubtype(tmptype, metaclass)) { - metaclass = tmptype; - continue; - } - // else: - PyErr_SetString(PyExc_TypeError, - "metaclass conflict: " - "the metaclass of a derived class " - "must be a (non-strict) subclass " - "of the metaclasses of all its bases"); - return NULL; - } - if (!metaclass) { -#if PY_MAJOR_VERSION < 3 - metaclass = &PyClass_Type; -#else - metaclass = &PyType_Type; -#endif - } - // make owned reference - Py_INCREF((PyObject*) metaclass); - return (PyObject*) metaclass; -} - - -/////////////// FindInheritedMetaclass.proto /////////////// - -static PyObject *__Pyx_FindInheritedMetaclass(PyObject *bases); /*proto*/ - -/////////////// FindInheritedMetaclass /////////////// -//@requires: PyObjectGetAttrStr -//@requires: CalculateMetaclass - -static PyObject *__Pyx_FindInheritedMetaclass(PyObject *bases) { - PyObject *metaclass; - if (PyTuple_Check(bases) && PyTuple_GET_SIZE(bases) > 0) { - PyTypeObject *metatype; -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - PyObject *base = PyTuple_GET_ITEM(bases, 0); -#else - PyObject *base = PySequence_ITEM(bases, 0); -#endif -#if PY_MAJOR_VERSION < 3 - PyObject* basetype = __Pyx_PyObject_GetAttrStr(base, PYIDENT("__class__")); - if (basetype) { - metatype = (PyType_Check(basetype)) ? ((PyTypeObject*) basetype) : NULL; - } else { - PyErr_Clear(); - metatype = Py_TYPE(base); - basetype = (PyObject*) metatype; - Py_INCREF(basetype); - } -#else - metatype = Py_TYPE(base); -#endif - metaclass = __Pyx_CalculateMetaclass(metatype, bases); -#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) - Py_DECREF(base); -#endif -#if PY_MAJOR_VERSION < 3 - Py_DECREF(basetype); -#endif - } else { - // no bases => use default metaclass -#if PY_MAJOR_VERSION < 3 - metaclass = (PyObject *) &PyClass_Type; -#else - metaclass = (PyObject *) &PyType_Type; -#endif - Py_INCREF(metaclass); - } - return metaclass; -} - -/////////////// Py3MetaclassGet.proto /////////////// - -static PyObject *__Pyx_Py3MetaclassGet(PyObject *bases, PyObject *mkw); /*proto*/ - -/////////////// Py3MetaclassGet /////////////// -//@requires: FindInheritedMetaclass -//@requires: CalculateMetaclass - -static PyObject *__Pyx_Py3MetaclassGet(PyObject *bases, PyObject *mkw) { - PyObject *metaclass = mkw ? __Pyx_PyDict_GetItemStr(mkw, PYIDENT("metaclass")) : NULL; - if (metaclass) { - Py_INCREF(metaclass); - if (PyDict_DelItem(mkw, PYIDENT("metaclass")) < 0) { - Py_DECREF(metaclass); - return NULL; - } - if (PyType_Check(metaclass)) { - PyObject* orig = metaclass; - metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases); - Py_DECREF(orig); - } - return metaclass; - } - return __Pyx_FindInheritedMetaclass(bases); -} - -/////////////// CreateClass.proto /////////////// - -static PyObject *__Pyx_CreateClass(PyObject *bases, PyObject *dict, PyObject *name, - PyObject *qualname, PyObject *modname); /*proto*/ - -/////////////// CreateClass /////////////// -//@requires: FindInheritedMetaclass -//@requires: CalculateMetaclass - -static PyObject *__Pyx_CreateClass(PyObject *bases, PyObject *dict, PyObject *name, - PyObject *qualname, PyObject *modname) { - PyObject *result; - PyObject *metaclass; - - if (PyDict_SetItem(dict, PYIDENT("__module__"), modname) < 0) - return NULL; - if (PyDict_SetItem(dict, PYIDENT("__qualname__"), qualname) < 0) - return NULL; - - /* Python2 __metaclass__ */ - metaclass = __Pyx_PyDict_GetItemStr(dict, PYIDENT("__metaclass__")); - if (metaclass) { - Py_INCREF(metaclass); - if (PyType_Check(metaclass)) { - PyObject* orig = metaclass; - metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases); - Py_DECREF(orig); - } - } else { - metaclass = __Pyx_FindInheritedMetaclass(bases); - } - if (unlikely(!metaclass)) - return NULL; - result = PyObject_CallFunctionObjArgs(metaclass, name, bases, dict, NULL); - Py_DECREF(metaclass); - return result; -} - -/////////////// Py3ClassCreate.proto /////////////// - -static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname, - PyObject *mkw, PyObject *modname, PyObject *doc); /*proto*/ -static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict, - PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass); /*proto*/ - -/////////////// Py3ClassCreate /////////////// -//@requires: PyObjectGetAttrStr -//@requires: CalculateMetaclass - -static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, - PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc) { - PyObject *ns; - if (metaclass) { - PyObject *prep = __Pyx_PyObject_GetAttrStr(metaclass, PYIDENT("__prepare__")); - if (prep) { - PyObject *pargs = PyTuple_Pack(2, name, bases); - if (unlikely(!pargs)) { - Py_DECREF(prep); - return NULL; - } - ns = PyObject_Call(prep, pargs, mkw); - Py_DECREF(prep); - Py_DECREF(pargs); - } else { - if (unlikely(!PyErr_ExceptionMatches(PyExc_AttributeError))) - return NULL; - PyErr_Clear(); - ns = PyDict_New(); - } - } else { - ns = PyDict_New(); - } - - if (unlikely(!ns)) - return NULL; - - /* Required here to emulate assignment order */ - if (unlikely(PyObject_SetItem(ns, PYIDENT("__module__"), modname) < 0)) goto bad; - if (unlikely(PyObject_SetItem(ns, PYIDENT("__qualname__"), qualname) < 0)) goto bad; - if (unlikely(doc && PyObject_SetItem(ns, PYIDENT("__doc__"), doc) < 0)) goto bad; - return ns; -bad: - Py_DECREF(ns); - return NULL; -} - -static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, - PyObject *dict, PyObject *mkw, - int calculate_metaclass, int allow_py2_metaclass) { - PyObject *result, *margs; - PyObject *owned_metaclass = NULL; - if (allow_py2_metaclass) { - /* honour Python2 __metaclass__ for backward compatibility */ - owned_metaclass = PyObject_GetItem(dict, PYIDENT("__metaclass__")); - if (owned_metaclass) { - metaclass = owned_metaclass; - } else if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) { - PyErr_Clear(); - } else { - return NULL; - } - } - if (calculate_metaclass && (!metaclass || PyType_Check(metaclass))) { - metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases); - Py_XDECREF(owned_metaclass); - if (unlikely(!metaclass)) - return NULL; - owned_metaclass = metaclass; - } - margs = PyTuple_Pack(3, name, bases, dict); - if (unlikely(!margs)) { - result = NULL; - } else { - result = PyObject_Call(metaclass, margs, mkw); - Py_DECREF(margs); - } - Py_XDECREF(owned_metaclass); - return result; -} - -/////////////// ExtTypeTest.proto /////////////// - -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ - -/////////////// ExtTypeTest /////////////// - -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { - if (unlikely(!type)) { - PyErr_SetString(PyExc_SystemError, "Missing type object"); - return 0; - } - if (likely(__Pyx_TypeCheck(obj, type))) - return 1; - PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", - Py_TYPE(obj)->tp_name, type->tp_name); - return 0; -} - -/////////////// CallableCheck.proto /////////////// - -#if CYTHON_USE_TYPE_SLOTS && PY_MAJOR_VERSION >= 3 -#define __Pyx_PyCallable_Check(obj) (Py_TYPE(obj)->tp_call != NULL) -#else -#define __Pyx_PyCallable_Check(obj) PyCallable_Check(obj) -#endif - -/////////////// PyDictContains.proto /////////////// - -static CYTHON_INLINE int __Pyx_PyDict_ContainsTF(PyObject* item, PyObject* dict, int eq) { - int result = PyDict_Contains(dict, item); - return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); -} - -/////////////// PySetContains.proto /////////////// - -static CYTHON_INLINE int __Pyx_PySet_ContainsTF(PyObject* key, PyObject* set, int eq); /* proto */ - -/////////////// PySetContains /////////////// -//@requires: Builtins.c::pyfrozenset_new - -static int __Pyx_PySet_ContainsUnhashable(PyObject *set, PyObject *key) { - int result = -1; - if (PySet_Check(key) && PyErr_ExceptionMatches(PyExc_TypeError)) { - /* Convert key to frozenset */ - PyObject *tmpkey; - PyErr_Clear(); - tmpkey = __Pyx_PyFrozenSet_New(key); - if (tmpkey != NULL) { - result = PySet_Contains(set, tmpkey); - Py_DECREF(tmpkey); - } - } - return result; -} - -static CYTHON_INLINE int __Pyx_PySet_ContainsTF(PyObject* key, PyObject* set, int eq) { - int result = PySet_Contains(set, key); - - if (unlikely(result < 0)) { - result = __Pyx_PySet_ContainsUnhashable(set, key); - } - return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); -} - -/////////////// PySequenceContains.proto /////////////// - -static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) { - int result = PySequence_Contains(seq, item); - return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); -} - -/////////////// PyBoolOrNullFromLong.proto /////////////// - -static CYTHON_INLINE PyObject* __Pyx_PyBoolOrNull_FromLong(long b) { - return unlikely(b < 0) ? NULL : __Pyx_PyBool_FromLong(b); -} - -/////////////// GetBuiltinName.proto /////////////// - -static PyObject *__Pyx_GetBuiltinName(PyObject *name); /*proto*/ - -/////////////// GetBuiltinName /////////////// -//@requires: PyObjectGetAttrStr -//@substitute: naming - -static PyObject *__Pyx_GetBuiltinName(PyObject *name) { - PyObject* result = __Pyx_PyObject_GetAttrStr($builtins_cname, name); - if (unlikely(!result)) { - PyErr_Format(PyExc_NameError, -#if PY_MAJOR_VERSION >= 3 - "name '%U' is not defined", name); -#else - "name '%.200s' is not defined", PyString_AS_STRING(name)); -#endif - } - return result; -} - -/////////////// GetNameInClass.proto /////////////// - -#define __Pyx_GetNameInClass(var, nmspace, name) (var) = __Pyx__GetNameInClass(nmspace, name) -static PyObject *__Pyx__GetNameInClass(PyObject *nmspace, PyObject *name); /*proto*/ - -/////////////// GetNameInClass /////////////// -//@requires: PyObjectGetAttrStr -//@requires: GetModuleGlobalName -//@requires: Exceptions.c::PyThreadStateGet -//@requires: Exceptions.c::PyErrFetchRestore -//@requires: Exceptions.c::PyErrExceptionMatches - -static PyObject *__Pyx_GetGlobalNameAfterAttributeLookup(PyObject *name) { - PyObject *result; - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) - return NULL; - __Pyx_PyErr_Clear(); - __Pyx_GetModuleGlobalNameUncached(result, name); - return result; -} - -static PyObject *__Pyx__GetNameInClass(PyObject *nmspace, PyObject *name) { - PyObject *result; - result = __Pyx_PyObject_GetAttrStr(nmspace, name); - if (!result) { - result = __Pyx_GetGlobalNameAfterAttributeLookup(name); - } - return result; -} - - -/////////////// SetNameInClass.proto /////////////// - -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 -// Identifier names are always interned and have a pre-calculated hash value. -#define __Pyx_SetNameInClass(ns, name, value) \ - (likely(PyDict_CheckExact(ns)) ? _PyDict_SetItem_KnownHash(ns, name, value, ((PyASCIIObject *) name)->hash) : PyObject_SetItem(ns, name, value)) -#elif CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_SetNameInClass(ns, name, value) \ - (likely(PyDict_CheckExact(ns)) ? PyDict_SetItem(ns, name, value) : PyObject_SetItem(ns, name, value)) -#else -#define __Pyx_SetNameInClass(ns, name, value) PyObject_SetItem(ns, name, value) -#endif - - -/////////////// GetModuleGlobalName.proto /////////////// -//@requires: PyDictVersioning -//@substitute: naming - -#if CYTHON_USE_DICT_VERSIONS -#define __Pyx_GetModuleGlobalName(var, name) { \ - static PY_UINT64_T __pyx_dict_version = 0; \ - static PyObject *__pyx_dict_cached_value = NULL; \ - (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION($moddict_cname))) ? \ - (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) : \ - __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value); \ -} -#define __Pyx_GetModuleGlobalNameUncached(var, name) { \ - PY_UINT64_T __pyx_dict_version; \ - PyObject *__pyx_dict_cached_value; \ - (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value); \ -} -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); /*proto*/ -#else -#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) -#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); /*proto*/ -#endif - - -/////////////// GetModuleGlobalName /////////////// -//@requires: GetBuiltinName -//@substitute: naming - -#if CYTHON_USE_DICT_VERSIONS -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) -#else -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) -#endif -{ - PyObject *result; -#if !CYTHON_AVOID_BORROWED_REFS -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 - // Identifier names are always interned and have a pre-calculated hash value. - result = _PyDict_GetItem_KnownHash($moddict_cname, name, ((PyASCIIObject *) name)->hash); - __PYX_UPDATE_DICT_CACHE($moddict_cname, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } else if (unlikely(PyErr_Occurred())) { - return NULL; - } -#else - result = PyDict_GetItem($moddict_cname, name); - __PYX_UPDATE_DICT_CACHE($moddict_cname, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } -#endif -#else - result = PyObject_GetItem($moddict_cname, name); - __PYX_UPDATE_DICT_CACHE($moddict_cname, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } - PyErr_Clear(); -#endif - return __Pyx_GetBuiltinName(name); -} - -//////////////////// GetAttr.proto //////////////////// - -static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /*proto*/ - -//////////////////// GetAttr //////////////////// -//@requires: PyObjectGetAttrStr - -static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { -#if CYTHON_USE_TYPE_SLOTS -#if PY_MAJOR_VERSION >= 3 - if (likely(PyUnicode_Check(n))) -#else - if (likely(PyString_Check(n))) -#endif - return __Pyx_PyObject_GetAttrStr(o, n); -#endif - return PyObject_GetAttr(o, n); -} - -/////////////// PyObjectLookupSpecial.proto /////////////// -//@requires: PyObjectGetAttrStr - -#if CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_LookupSpecial(PyObject* obj, PyObject* attr_name) { - PyObject *res; - PyTypeObject *tp = Py_TYPE(obj); -#if PY_MAJOR_VERSION < 3 - if (unlikely(PyInstance_Check(obj))) - return __Pyx_PyObject_GetAttrStr(obj, attr_name); -#endif - // adapted from CPython's special_lookup() in ceval.c - res = _PyType_Lookup(tp, attr_name); - if (likely(res)) { - descrgetfunc f = Py_TYPE(res)->tp_descr_get; - if (!f) { - Py_INCREF(res); - } else { - res = f(res, obj, (PyObject *)tp); - } - } else { - PyErr_SetObject(PyExc_AttributeError, attr_name); - } - return res; -} -#else -#define __Pyx_PyObject_LookupSpecial(o,n) __Pyx_PyObject_GetAttrStr(o,n) -#endif - - -/////////////// PyObject_GenericGetAttrNoDict.proto /////////////// - -// Setting "tp_getattro" to anything but "PyObject_GenericGetAttr" disables fast method calls in Py3.7. -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); -#else -// No-args macro to allow function pointer assignment. -#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr -#endif - -/////////////// PyObject_GenericGetAttrNoDict /////////////// - -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 - -static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { - PyErr_Format(PyExc_AttributeError, -#if PY_MAJOR_VERSION >= 3 - "'%.50s' object has no attribute '%U'", - tp->tp_name, attr_name); -#else - "'%.50s' object has no attribute '%.400s'", - tp->tp_name, PyString_AS_STRING(attr_name)); -#endif - return NULL; -} - -static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { - // Copied and adapted from _PyObject_GenericGetAttrWithDict() in CPython 2.6/3.7. - // To be used in the "tp_getattro" slot of extension types that have no instance dict and cannot be subclassed. - PyObject *descr; - PyTypeObject *tp = Py_TYPE(obj); - - if (unlikely(!PyString_Check(attr_name))) { - return PyObject_GenericGetAttr(obj, attr_name); - } - - assert(!tp->tp_dictoffset); - descr = _PyType_Lookup(tp, attr_name); - if (unlikely(!descr)) { - return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); - } - - Py_INCREF(descr); - - #if PY_MAJOR_VERSION < 3 - if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) - #endif - { - descrgetfunc f = Py_TYPE(descr)->tp_descr_get; - // Optimise for the non-descriptor case because it is faster. - if (unlikely(f)) { - PyObject *res = f(descr, obj, (PyObject *)tp); - Py_DECREF(descr); - return res; - } - } - return descr; -} -#endif - - -/////////////// PyObject_GenericGetAttr.proto /////////////// - -// Setting "tp_getattro" to anything but "PyObject_GenericGetAttr" disables fast method calls in Py3.7. -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); -#else -// No-args macro to allow function pointer assignment. -#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr -#endif - -/////////////// PyObject_GenericGetAttr /////////////// -//@requires: PyObject_GenericGetAttrNoDict - -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { - if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { - return PyObject_GenericGetAttr(obj, attr_name); - } - return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); -} -#endif - - -/////////////// PyObjectGetAttrStrNoError.proto /////////////// - -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);/*proto*/ - -/////////////// PyObjectGetAttrStrNoError /////////////// -//@requires: PyObjectGetAttrStr -//@requires: Exceptions.c::PyThreadStateGet -//@requires: Exceptions.c::PyErrFetchRestore -//@requires: Exceptions.c::PyErrExceptionMatches - -static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) - __Pyx_PyErr_Clear(); -} - -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { - PyObject *result; -#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 - // _PyObject_GenericGetAttrWithDict() in CPython 3.7+ can avoid raising the AttributeError. - // See https://bugs.python.org/issue32544 - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { - return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); - } -#endif - result = __Pyx_PyObject_GetAttrStr(obj, attr_name); - if (unlikely(!result)) { - __Pyx_PyObject_GetAttrStr_ClearAttributeError(); - } - return result; -} - - -/////////////// PyObjectGetAttrStr.proto /////////////// - -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);/*proto*/ -#else -#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) -#endif - -/////////////// PyObjectGetAttrStr /////////////// - -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro)) - return tp->tp_getattro(obj, attr_name); -#if PY_MAJOR_VERSION < 3 - if (likely(tp->tp_getattr)) - return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); -#endif - return PyObject_GetAttr(obj, attr_name); -} -#endif - - -/////////////// PyObjectSetAttrStr.proto /////////////// - -#if CYTHON_USE_TYPE_SLOTS -#define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o, n, NULL) -static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value);/*proto*/ -#else -#define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n) -#define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v) -#endif - -/////////////// PyObjectSetAttrStr /////////////// - -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) { - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_setattro)) - return tp->tp_setattro(obj, attr_name, value); -#if PY_MAJOR_VERSION < 3 - if (likely(tp->tp_setattr)) - return tp->tp_setattr(obj, PyString_AS_STRING(attr_name), value); -#endif - return PyObject_SetAttr(obj, attr_name, value); -} -#endif - - -/////////////// PyObjectGetMethod.proto /////////////// - -static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method);/*proto*/ - -/////////////// PyObjectGetMethod /////////////// -//@requires: PyObjectGetAttrStr - -static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) { - PyObject *attr; -#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP - // Copied from _PyObject_GetMethod() in CPython 3.7 - PyTypeObject *tp = Py_TYPE(obj); - PyObject *descr; - descrgetfunc f = NULL; - PyObject **dictptr, *dict; - int meth_found = 0; - - assert (*method == NULL); - - if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) { - attr = __Pyx_PyObject_GetAttrStr(obj, name); - goto try_unpack; - } - if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) { - return 0; - } - - descr = _PyType_Lookup(tp, name); - if (likely(descr != NULL)) { - Py_INCREF(descr); - // Repeating the condition below accommodates for MSVC's inability to test macros inside of macro expansions. -#if PY_MAJOR_VERSION >= 3 - #ifdef __Pyx_CyFunction_USED - if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr))) - #else - if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type))) - #endif -#else - // "PyMethodDescr_Type" is not part of the C-API in Py2. - #ifdef __Pyx_CyFunction_USED - if (likely(PyFunction_Check(descr) || __Pyx_CyFunction_Check(descr))) - #else - if (likely(PyFunction_Check(descr))) - #endif -#endif - { - meth_found = 1; - } else { - f = Py_TYPE(descr)->tp_descr_get; - if (f != NULL && PyDescr_IsData(descr)) { - attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); - Py_DECREF(descr); - goto try_unpack; - } - } - } - - dictptr = _PyObject_GetDictPtr(obj); - if (dictptr != NULL && (dict = *dictptr) != NULL) { - Py_INCREF(dict); - attr = __Pyx_PyDict_GetItemStr(dict, name); - if (attr != NULL) { - Py_INCREF(attr); - Py_DECREF(dict); - Py_XDECREF(descr); - goto try_unpack; - } - Py_DECREF(dict); - } - - if (meth_found) { - *method = descr; - return 1; - } - - if (f != NULL) { - attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); - Py_DECREF(descr); - goto try_unpack; - } - - if (descr != NULL) { - *method = descr; - return 0; - } - - PyErr_Format(PyExc_AttributeError, -#if PY_MAJOR_VERSION >= 3 - "'%.50s' object has no attribute '%U'", - tp->tp_name, name); -#else - "'%.50s' object has no attribute '%.400s'", - tp->tp_name, PyString_AS_STRING(name)); -#endif - return 0; - -// Generic fallback implementation using normal attribute lookup. -#else - attr = __Pyx_PyObject_GetAttrStr(obj, name); - goto try_unpack; -#endif - -try_unpack: -#if CYTHON_UNPACK_METHODS - // Even if we failed to avoid creating a bound method object, it's still worth unpacking it now, if possible. - if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) { - PyObject *function = PyMethod_GET_FUNCTION(attr); - Py_INCREF(function); - Py_DECREF(attr); - *method = function; - return 1; - } -#endif - *method = attr; - return 0; -} - - -/////////////// UnpackUnboundCMethod.proto /////////////// - -typedef struct { - PyObject *type; - PyObject **method_name; - // "func" is set on first access (direct C function pointer) - PyCFunction func; - // "method" is set on first access (fallback) - PyObject *method; - int flag; -} __Pyx_CachedCFunction; - -/////////////// UnpackUnboundCMethod /////////////// -//@requires: PyObjectGetAttrStr - -static int __Pyx_TryUnpackUnboundCMethod(__Pyx_CachedCFunction* target) { - PyObject *method; - method = __Pyx_PyObject_GetAttrStr(target->type, *target->method_name); - if (unlikely(!method)) - return -1; - target->method = method; -#if CYTHON_COMPILING_IN_CPYTHON - #if PY_MAJOR_VERSION >= 3 - // method dscriptor type isn't exported in Py2.x, cannot easily check the type there - if (likely(__Pyx_TypeCheck(method, &PyMethodDescr_Type))) - #endif - { - PyMethodDescrObject *descr = (PyMethodDescrObject*) method; - target->func = descr->d_method->ml_meth; - target->flag = descr->d_method->ml_flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_STACKLESS); - } -#endif - return 0; -} - - -/////////////// CallUnboundCMethod0.proto /////////////// -//@substitute: naming - -static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self); /*proto*/ -#if CYTHON_COMPILING_IN_CPYTHON -// FASTCALL methods receive "&empty_tuple" as simple "PyObject[0]*" -#define __Pyx_CallUnboundCMethod0(cfunc, self) \ - (likely((cfunc)->func) ? \ - (likely((cfunc)->flag == METH_NOARGS) ? (*((cfunc)->func))(self, NULL) : \ - (PY_VERSION_HEX >= 0x030600B1 && likely((cfunc)->flag == METH_FASTCALL) ? \ - (PY_VERSION_HEX >= 0x030700A0 ? \ - (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)(cfunc)->func)(self, &$empty_tuple, 0) : \ - (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, &$empty_tuple, 0, NULL)) : \ - (PY_VERSION_HEX >= 0x030700A0 && (cfunc)->flag == (METH_FASTCALL | METH_KEYWORDS) ? \ - (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, &$empty_tuple, 0, NULL) : \ - (likely((cfunc)->flag == (METH_VARARGS | METH_KEYWORDS)) ? ((*(PyCFunctionWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, $empty_tuple, NULL)) : \ - ((cfunc)->flag == METH_VARARGS ? (*((cfunc)->func))(self, $empty_tuple) : \ - __Pyx__CallUnboundCMethod0(cfunc, self)))))) : \ - __Pyx__CallUnboundCMethod0(cfunc, self)) -#else -#define __Pyx_CallUnboundCMethod0(cfunc, self) __Pyx__CallUnboundCMethod0(cfunc, self) -#endif - -/////////////// CallUnboundCMethod0 /////////////// -//@requires: UnpackUnboundCMethod -//@requires: PyObjectCall - -static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self) { - PyObject *args, *result = NULL; - if (unlikely(!cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL; -#if CYTHON_ASSUME_SAFE_MACROS - args = PyTuple_New(1); - if (unlikely(!args)) goto bad; - Py_INCREF(self); - PyTuple_SET_ITEM(args, 0, self); -#else - args = PyTuple_Pack(1, self); - if (unlikely(!args)) goto bad; -#endif - result = __Pyx_PyObject_Call(cfunc->method, args, NULL); - Py_DECREF(args); -bad: - return result; -} - - -/////////////// CallUnboundCMethod1.proto /////////////// - -static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg);/*proto*/ - -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg);/*proto*/ -#else -#define __Pyx_CallUnboundCMethod1(cfunc, self, arg) __Pyx__CallUnboundCMethod1(cfunc, self, arg) -#endif - -/////////////// CallUnboundCMethod1 /////////////// -//@requires: UnpackUnboundCMethod -//@requires: PyObjectCall - -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg) { - if (likely(cfunc->func)) { - int flag = cfunc->flag; - // Not using #ifdefs for PY_VERSION_HEX to avoid C compiler warnings about unused functions. - if (flag == METH_O) { - return (*(cfunc->func))(self, arg); - } else if (PY_VERSION_HEX >= 0x030600B1 && flag == METH_FASTCALL) { - if (PY_VERSION_HEX >= 0x030700A0) { - return (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)cfunc->func)(self, &arg, 1); - } else { - return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, &arg, 1, NULL); - } - } else if (PY_VERSION_HEX >= 0x030700A0 && flag == (METH_FASTCALL | METH_KEYWORDS)) { - return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, &arg, 1, NULL); - } - } - return __Pyx__CallUnboundCMethod1(cfunc, self, arg); -} -#endif - -static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg){ - PyObject *args, *result = NULL; - if (unlikely(!cfunc->func && !cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL; -#if CYTHON_COMPILING_IN_CPYTHON - if (cfunc->func && (cfunc->flag & METH_VARARGS)) { - args = PyTuple_New(1); - if (unlikely(!args)) goto bad; - Py_INCREF(arg); - PyTuple_SET_ITEM(args, 0, arg); - if (cfunc->flag & METH_KEYWORDS) - result = (*(PyCFunctionWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, NULL); - else - result = (*cfunc->func)(self, args); - } else { - args = PyTuple_New(2); - if (unlikely(!args)) goto bad; - Py_INCREF(self); - PyTuple_SET_ITEM(args, 0, self); - Py_INCREF(arg); - PyTuple_SET_ITEM(args, 1, arg); - result = __Pyx_PyObject_Call(cfunc->method, args, NULL); - } -#else - args = PyTuple_Pack(2, self, arg); - if (unlikely(!args)) goto bad; - result = __Pyx_PyObject_Call(cfunc->method, args, NULL); -#endif -bad: - Py_XDECREF(args); - return result; -} - - -/////////////// CallUnboundCMethod2.proto /////////////// - -static PyObject* __Pyx__CallUnboundCMethod2(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg1, PyObject* arg2); /*proto*/ - -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030600B1 -static CYTHON_INLINE PyObject *__Pyx_CallUnboundCMethod2(__Pyx_CachedCFunction *cfunc, PyObject *self, PyObject *arg1, PyObject *arg2); /*proto*/ -#else -#define __Pyx_CallUnboundCMethod2(cfunc, self, arg1, arg2) __Pyx__CallUnboundCMethod2(cfunc, self, arg1, arg2) -#endif - -/////////////// CallUnboundCMethod2 /////////////// -//@requires: UnpackUnboundCMethod -//@requires: PyObjectCall - -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030600B1 -static CYTHON_INLINE PyObject *__Pyx_CallUnboundCMethod2(__Pyx_CachedCFunction *cfunc, PyObject *self, PyObject *arg1, PyObject *arg2) { - if (likely(cfunc->func)) { - PyObject *args[2] = {arg1, arg2}; - if (cfunc->flag == METH_FASTCALL) { - #if PY_VERSION_HEX >= 0x030700A0 - return (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)cfunc->func)(self, args, 2); - #else - return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, 2, NULL); - #endif - } - #if PY_VERSION_HEX >= 0x030700A0 - if (cfunc->flag == (METH_FASTCALL | METH_KEYWORDS)) - return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, 2, NULL); - #endif - } - return __Pyx__CallUnboundCMethod2(cfunc, self, arg1, arg2); -} -#endif - -static PyObject* __Pyx__CallUnboundCMethod2(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg1, PyObject* arg2){ - PyObject *args, *result = NULL; - if (unlikely(!cfunc->func && !cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL; -#if CYTHON_COMPILING_IN_CPYTHON - if (cfunc->func && (cfunc->flag & METH_VARARGS)) { - args = PyTuple_New(2); - if (unlikely(!args)) goto bad; - Py_INCREF(arg1); - PyTuple_SET_ITEM(args, 0, arg1); - Py_INCREF(arg2); - PyTuple_SET_ITEM(args, 1, arg2); - if (cfunc->flag & METH_KEYWORDS) - result = (*(PyCFunctionWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, NULL); - else - result = (*cfunc->func)(self, args); - } else { - args = PyTuple_New(3); - if (unlikely(!args)) goto bad; - Py_INCREF(self); - PyTuple_SET_ITEM(args, 0, self); - Py_INCREF(arg1); - PyTuple_SET_ITEM(args, 1, arg1); - Py_INCREF(arg2); - PyTuple_SET_ITEM(args, 2, arg2); - result = __Pyx_PyObject_Call(cfunc->method, args, NULL); - } -#else - args = PyTuple_Pack(3, self, arg1, arg2); - if (unlikely(!args)) goto bad; - result = __Pyx_PyObject_Call(cfunc->method, args, NULL); -#endif -bad: - Py_XDECREF(args); - return result; -} - - -/////////////// PyObjectCallMethod0.proto /////////////// - -static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name); /*proto*/ - -/////////////// PyObjectCallMethod0 /////////////// -//@requires: PyObjectGetMethod -//@requires: PyObjectCallOneArg -//@requires: PyObjectCallNoArg - -static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name) { - PyObject *method = NULL, *result = NULL; - int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); - if (likely(is_method)) { - result = __Pyx_PyObject_CallOneArg(method, obj); - Py_DECREF(method); - return result; - } - if (unlikely(!method)) goto bad; - result = __Pyx_PyObject_CallNoArg(method); - Py_DECREF(method); -bad: - return result; -} - - -/////////////// PyObjectCallMethod1.proto /////////////// - -static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg); /*proto*/ - -/////////////// PyObjectCallMethod1 /////////////// -//@requires: PyObjectGetMethod -//@requires: PyObjectCallOneArg -//@requires: PyObjectCall2Args - -static PyObject* __Pyx__PyObject_CallMethod1(PyObject* method, PyObject* arg) { - // Separate function to avoid excessive inlining. - PyObject *result = __Pyx_PyObject_CallOneArg(method, arg); - Py_DECREF(method); - return result; -} - -static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg) { - PyObject *method = NULL, *result; - int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); - if (likely(is_method)) { - result = __Pyx_PyObject_Call2Args(method, obj, arg); - Py_DECREF(method); - return result; - } - if (unlikely(!method)) return NULL; - return __Pyx__PyObject_CallMethod1(method, arg); -} - - -/////////////// PyObjectCallMethod2.proto /////////////// - -static PyObject* __Pyx_PyObject_CallMethod2(PyObject* obj, PyObject* method_name, PyObject* arg1, PyObject* arg2); /*proto*/ - -/////////////// PyObjectCallMethod2 /////////////// -//@requires: PyObjectCall -//@requires: PyFunctionFastCall -//@requires: PyCFunctionFastCall -//@requires: PyObjectCall2Args - -static PyObject* __Pyx_PyObject_Call3Args(PyObject* function, PyObject* arg1, PyObject* arg2, PyObject* arg3) { - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(function)) { - PyObject *args[3] = {arg1, arg2, arg3}; - return __Pyx_PyFunction_FastCall(function, args, 3); - } - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(function)) { - PyObject *args[3] = {arg1, arg2, arg3}; - return __Pyx_PyFunction_FastCall(function, args, 3); - } - #endif - - args = PyTuple_New(3); - if (unlikely(!args)) goto done; - Py_INCREF(arg1); - PyTuple_SET_ITEM(args, 0, arg1); - Py_INCREF(arg2); - PyTuple_SET_ITEM(args, 1, arg2); - Py_INCREF(arg3); - PyTuple_SET_ITEM(args, 2, arg3); - - result = __Pyx_PyObject_Call(function, args, NULL); - Py_DECREF(args); - return result; -} - -static PyObject* __Pyx_PyObject_CallMethod2(PyObject* obj, PyObject* method_name, PyObject* arg1, PyObject* arg2) { - PyObject *args, *method = NULL, *result = NULL; - int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); - if (likely(is_method)) { - result = __Pyx_PyObject_Call3Args(method, obj, arg1, arg2); - Py_DECREF(method); - return result; - } - if (unlikely(!method)) return NULL; - result = __Pyx_PyObject_Call2Args(method, arg1, arg2); - Py_DECREF(method); - return result; -} - - -/////////////// tp_new.proto /////////////// - -#define __Pyx_tp_new(type_obj, args) __Pyx_tp_new_kwargs(type_obj, args, NULL) -static CYTHON_INLINE PyObject* __Pyx_tp_new_kwargs(PyObject* type_obj, PyObject* args, PyObject* kwargs) { - return (PyObject*) (((PyTypeObject*)type_obj)->tp_new((PyTypeObject*)type_obj, args, kwargs)); -} - - -/////////////// PyObjectCall.proto /////////////// - -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); /*proto*/ -#else -#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) -#endif - -/////////////// PyObjectCall /////////////// - -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { - PyObject *result; - ternaryfunc call = Py_TYPE(func)->tp_call; - - if (unlikely(!call)) - return PyObject_Call(func, arg, kw); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = (*call)(func, arg, kw); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - - -/////////////// PyObjectCallMethO.proto /////////////// - -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); /*proto*/ -#endif - -/////////////// PyObjectCallMethO /////////////// - -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { - PyObject *self, *result; - PyCFunction cfunc; - cfunc = PyCFunction_GET_FUNCTION(func); - self = PyCFunction_GET_SELF(func); - - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = cfunc(self, arg); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - - -/////////////// PyFunctionFastCall.proto /////////////// - -#if CYTHON_FAST_PYCALL -#define __Pyx_PyFunction_FastCall(func, args, nargs) \ - __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) - -// let's assume that the non-public C-API function might still change during the 3.6 beta phase -#if 1 || PY_VERSION_HEX < 0x030600B1 -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); -#else -#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) -#endif - -// Backport from Python 3 -// Assert a build-time dependency, as an expression. -// Your compile will fail if the condition isn't true, or can't be evaluated -// by the compiler. This can be used in an expression: its value is 0. -// Example: -// #define foo_to_char(foo) \ -// ((char *)(foo) \ -// + Py_BUILD_ASSERT_EXPR(offsetof(struct foo, string) == 0)) -// -// Written by Rusty Russell, public domain, http://ccodearchive.net/ -#define __Pyx_BUILD_ASSERT_EXPR(cond) \ - (sizeof(char [1 - 2*!(cond)]) - 1) - -#ifndef Py_MEMBER_SIZE -// Get the size of a structure member in bytes -#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) -#endif - -#if CYTHON_FAST_PYCALL - // Initialised by module init code. - static size_t __pyx_pyframe_localsplus_offset = 0; - - #include "frameobject.h" -#if PY_VERSION_HEX >= 0x030b00a6 - #ifndef Py_BUILD_CORE - #define Py_BUILD_CORE 1 - #endif - #include "internal/pycore_frame.h" -#endif - - // This is the long runtime version of - // #define __Pyx_PyFrame_GetLocalsplus(frame) ((frame)->f_localsplus) - // offsetof(PyFrameObject, f_localsplus) differs between regular C-Python and Stackless Python. - // Therefore the offset is computed at run time from PyFrame_type.tp_basicsize. That is feasible, - // because f_localsplus is the last field of PyFrameObject (checked by Py_BUILD_ASSERT_EXPR below). - #define __Pxy_PyFrame_Initialize_Offsets() \ - ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)), \ - (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) - #define __Pyx_PyFrame_GetLocalsplus(frame) \ - (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) -#endif // CYTHON_FAST_PYCALL -#endif - - -/////////////// PyFunctionFastCall /////////////// -// copied from CPython 3.6 ceval.c - -#if CYTHON_FAST_PYCALL - -static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, - PyObject *globals) { - PyFrameObject *f; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject **fastlocals; - Py_ssize_t i; - PyObject *result; - - assert(globals != NULL); - /* XXX Perhaps we should create a specialized - PyFrame_New() that doesn't take locals, but does - take builtins without sanity checking them. - */ - assert(tstate != NULL); - f = PyFrame_New(tstate, co, globals, NULL); - if (f == NULL) { - return NULL; - } - - fastlocals = __Pyx_PyFrame_GetLocalsplus(f); - - for (i = 0; i < na; i++) { - Py_INCREF(*args); - fastlocals[i] = *args++; - } - result = PyEval_EvalFrameEx(f,0); - - ++tstate->recursion_depth; - Py_DECREF(f); - --tstate->recursion_depth; - - return result; -} - - -#if 1 || PY_VERSION_HEX < 0x030600B1 -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { - PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); - PyObject *globals = PyFunction_GET_GLOBALS(func); - PyObject *argdefs = PyFunction_GET_DEFAULTS(func); - PyObject *closure; -#if PY_MAJOR_VERSION >= 3 - PyObject *kwdefs; - //#if PY_VERSION_HEX >= 0x03050000 - //PyObject *name, *qualname; - //#endif -#endif - PyObject *kwtuple, **k; - PyObject **d; - Py_ssize_t nd; - Py_ssize_t nk; - PyObject *result; - - assert(kwargs == NULL || PyDict_Check(kwargs)); - nk = kwargs ? PyDict_Size(kwargs) : 0; - - if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { - return NULL; - } - - if ( -#if PY_MAJOR_VERSION >= 3 - co->co_kwonlyargcount == 0 && -#endif - likely(kwargs == NULL || nk == 0) && - co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { - /* Fast paths */ - if (argdefs == NULL && co->co_argcount == nargs) { - result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); - goto done; - } - else if (nargs == 0 && argdefs != NULL - && co->co_argcount == Py_SIZE(argdefs)) { - /* function called with no arguments, but all parameters have - a default value: use default values as arguments .*/ - args = &PyTuple_GET_ITEM(argdefs, 0); - result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); - goto done; - } - } - - if (kwargs != NULL) { - Py_ssize_t pos, i; - kwtuple = PyTuple_New(2 * nk); - if (kwtuple == NULL) { - result = NULL; - goto done; - } - - k = &PyTuple_GET_ITEM(kwtuple, 0); - pos = i = 0; - while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { - Py_INCREF(k[i]); - Py_INCREF(k[i+1]); - i += 2; - } - nk = i / 2; - } - else { - kwtuple = NULL; - k = NULL; - } - - closure = PyFunction_GET_CLOSURE(func); -#if PY_MAJOR_VERSION >= 3 - kwdefs = PyFunction_GET_KW_DEFAULTS(func); - //#if PY_VERSION_HEX >= 0x03050000 - //name = ((PyFunctionObject *)func) -> func_name; - //qualname = ((PyFunctionObject *)func) -> func_qualname; - //#endif -#endif - - if (argdefs != NULL) { - d = &PyTuple_GET_ITEM(argdefs, 0); - nd = Py_SIZE(argdefs); - } - else { - d = NULL; - nd = 0; - } - - //#if PY_VERSION_HEX >= 0x03050000 - //return _PyEval_EvalCodeWithName((PyObject*)co, globals, (PyObject *)NULL, - // args, nargs, - // NULL, 0, - // d, nd, kwdefs, - // closure, name, qualname); - //#elif PY_MAJOR_VERSION >= 3 -#if PY_MAJOR_VERSION >= 3 - result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, kwdefs, closure); -#else - result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, closure); -#endif - Py_XDECREF(kwtuple); - -done: - Py_LeaveRecursiveCall(); - return result; -} -#endif /* CPython < 3.6 */ -#endif /* CYTHON_FAST_PYCALL */ - - -/////////////// PyCFunctionFastCall.proto /////////////// - -#if CYTHON_FAST_PYCCALL -static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); -#else -#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) -#endif - -/////////////// PyCFunctionFastCall /////////////// - -#if CYTHON_FAST_PYCCALL -static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { - PyCFunctionObject *func = (PyCFunctionObject*)func_obj; - PyCFunction meth = PyCFunction_GET_FUNCTION(func); - PyObject *self = PyCFunction_GET_SELF(func); - int flags = PyCFunction_GET_FLAGS(func); - - assert(PyCFunction_Check(func)); - assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); - assert(nargs >= 0); - assert(nargs == 0 || args != NULL); - - /* _PyCFunction_FastCallDict() must not be called with an exception set, - because it may clear it (directly or indirectly) and so the - caller loses its exception */ - assert(!PyErr_Occurred()); - - if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { - return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); - } else { - return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); - } -} -#endif /* CYTHON_FAST_PYCCALL */ - - -/////////////// PyObjectCall2Args.proto /////////////// - -static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /*proto*/ - -/////////////// PyObjectCall2Args /////////////// -//@requires: PyObjectCall -//@requires: PyFunctionFastCall -//@requires: PyCFunctionFastCall - -static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { - PyObject *args, *result = NULL; - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(function)) { - PyObject *args[2] = {arg1, arg2}; - return __Pyx_PyFunction_FastCall(function, args, 2); - } - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(function)) { - PyObject *args[2] = {arg1, arg2}; - return __Pyx_PyCFunction_FastCall(function, args, 2); - } - #endif - - args = PyTuple_New(2); - if (unlikely(!args)) goto done; - Py_INCREF(arg1); - PyTuple_SET_ITEM(args, 0, arg1); - Py_INCREF(arg2); - PyTuple_SET_ITEM(args, 1, arg2); - - Py_INCREF(function); - result = __Pyx_PyObject_Call(function, args, NULL); - Py_DECREF(args); - Py_DECREF(function); -done: - return result; -} - - -/////////////// PyObjectCallOneArg.proto /////////////// - -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /*proto*/ - -/////////////// PyObjectCallOneArg /////////////// -//@requires: PyObjectCallMethO -//@requires: PyObjectCall -//@requires: PyFunctionFastCall -//@requires: PyCFunctionFastCall - -#if CYTHON_COMPILING_IN_CPYTHON -static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { - PyObject *result; - PyObject *args = PyTuple_New(1); - if (unlikely(!args)) return NULL; - Py_INCREF(arg); - PyTuple_SET_ITEM(args, 0, arg); - result = __Pyx_PyObject_Call(func, args, NULL); - Py_DECREF(args); - return result; -} - -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { -#if CYTHON_FAST_PYCALL - if (PyFunction_Check(func)) { - return __Pyx_PyFunction_FastCall(func, &arg, 1); - } -#endif - if (likely(PyCFunction_Check(func))) { - if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { - // fast and simple case that we are optimising for - return __Pyx_PyObject_CallMethO(func, arg); -#if CYTHON_FAST_PYCCALL - } else if (__Pyx_PyFastCFunction_Check(func)) { - return __Pyx_PyCFunction_FastCall(func, &arg, 1); -#endif - } - } - return __Pyx__PyObject_CallOneArg(func, arg); -} -#else -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { - PyObject *result; - PyObject *args = PyTuple_Pack(1, arg); - if (unlikely(!args)) return NULL; - result = __Pyx_PyObject_Call(func, args, NULL); - Py_DECREF(args); - return result; -} -#endif - - -/////////////// PyObjectCallNoArg.proto /////////////// -//@requires: PyObjectCall -//@substitute: naming - -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); /*proto*/ -#else -#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, $empty_tuple, NULL) -#endif - -/////////////// PyObjectCallNoArg /////////////// -//@requires: PyObjectCallMethO -//@requires: PyObjectCall -//@requires: PyFunctionFastCall -//@substitute: naming - -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { -#if CYTHON_FAST_PYCALL - if (PyFunction_Check(func)) { - return __Pyx_PyFunction_FastCall(func, NULL, 0); - } -#endif -#ifdef __Pyx_CyFunction_USED - if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func))) -#else - if (likely(PyCFunction_Check(func))) -#endif - { - if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { - // fast and simple case that we are optimising for - return __Pyx_PyObject_CallMethO(func, NULL); - } - } - return __Pyx_PyObject_Call(func, $empty_tuple, NULL); -} -#endif - - -/////////////// MatrixMultiply.proto /////////////// - -#if PY_VERSION_HEX >= 0x03050000 - #define __Pyx_PyNumber_MatrixMultiply(x,y) PyNumber_MatrixMultiply(x,y) - #define __Pyx_PyNumber_InPlaceMatrixMultiply(x,y) PyNumber_InPlaceMatrixMultiply(x,y) -#else -#define __Pyx_PyNumber_MatrixMultiply(x,y) __Pyx__PyNumber_MatrixMultiply(x, y, "@") -static PyObject* __Pyx__PyNumber_MatrixMultiply(PyObject* x, PyObject* y, const char* op_name); -static PyObject* __Pyx_PyNumber_InPlaceMatrixMultiply(PyObject* x, PyObject* y); -#endif - -/////////////// MatrixMultiply /////////////// -//@requires: PyObjectGetAttrStr -//@requires: PyObjectCallOneArg -//@requires: PyFunctionFastCall -//@requires: PyCFunctionFastCall - -#if PY_VERSION_HEX < 0x03050000 -static PyObject* __Pyx_PyObject_CallMatrixMethod(PyObject* method, PyObject* arg) { - // NOTE: eats the method reference - PyObject *result = NULL; -#if CYTHON_UNPACK_METHODS - if (likely(PyMethod_Check(method))) { - PyObject *self = PyMethod_GET_SELF(method); - if (likely(self)) { - PyObject *args; - PyObject *function = PyMethod_GET_FUNCTION(method); - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(function)) { - PyObject *args[2] = {self, arg}; - result = __Pyx_PyFunction_FastCall(function, args, 2); - goto done; - } - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(function)) { - PyObject *args[2] = {self, arg}; - result = __Pyx_PyCFunction_FastCall(function, args, 2); - goto done; - } - #endif - args = PyTuple_New(2); - if (unlikely(!args)) goto done; - Py_INCREF(self); - PyTuple_SET_ITEM(args, 0, self); - Py_INCREF(arg); - PyTuple_SET_ITEM(args, 1, arg); - Py_INCREF(function); - Py_DECREF(method); method = NULL; - result = __Pyx_PyObject_Call(function, args, NULL); - Py_DECREF(args); - Py_DECREF(function); - return result; - } - } -#endif - result = __Pyx_PyObject_CallOneArg(method, arg); -done: - Py_DECREF(method); - return result; -} - -#define __Pyx_TryMatrixMethod(x, y, py_method_name) { \ - PyObject *func = __Pyx_PyObject_GetAttrStr(x, py_method_name); \ - if (func) { \ - PyObject *result = __Pyx_PyObject_CallMatrixMethod(func, y); \ - if (result != Py_NotImplemented) \ - return result; \ - Py_DECREF(result); \ - } else { \ - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) \ - return NULL; \ - PyErr_Clear(); \ - } \ -} - -static PyObject* __Pyx__PyNumber_MatrixMultiply(PyObject* x, PyObject* y, const char* op_name) { - int right_is_subtype = PyObject_IsSubclass((PyObject*)Py_TYPE(y), (PyObject*)Py_TYPE(x)); - if (unlikely(right_is_subtype == -1)) - return NULL; - if (right_is_subtype) { - // to allow subtypes to override parent behaviour, try reversed operation first - // see note at https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types - __Pyx_TryMatrixMethod(y, x, PYIDENT("__rmatmul__")) - } - __Pyx_TryMatrixMethod(x, y, PYIDENT("__matmul__")) - if (!right_is_subtype) { - __Pyx_TryMatrixMethod(y, x, PYIDENT("__rmatmul__")) - } - PyErr_Format(PyExc_TypeError, - "unsupported operand type(s) for %.2s: '%.100s' and '%.100s'", - op_name, - Py_TYPE(x)->tp_name, - Py_TYPE(y)->tp_name); - return NULL; -} - -static PyObject* __Pyx_PyNumber_InPlaceMatrixMultiply(PyObject* x, PyObject* y) { - __Pyx_TryMatrixMethod(x, y, PYIDENT("__imatmul__")) - return __Pyx__PyNumber_MatrixMultiply(x, y, "@="); -} - -#undef __Pyx_TryMatrixMethod -#endif - - -/////////////// PyDictVersioning.proto /////////////// - -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) -#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) \ - (version_var) = __PYX_GET_DICT_VERSION(dict); \ - (cache_var) = (value); - -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) { \ - static PY_UINT64_T __pyx_dict_version = 0; \ - static PyObject *__pyx_dict_cached_value = NULL; \ - if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) { \ - (VAR) = __pyx_dict_cached_value; \ - } else { \ - (VAR) = __pyx_dict_cached_value = (LOOKUP); \ - __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT); \ - } \ -} - -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); /*proto*/ -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); /*proto*/ -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); /*proto*/ - -#else -#define __PYX_GET_DICT_VERSION(dict) (0) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); -#endif - -/////////////// PyDictVersioning /////////////// - -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; -} - -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { - PyObject **dictptr = NULL; - Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; - if (offset) { -#if CYTHON_COMPILING_IN_CPYTHON - dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); -#else - dictptr = _PyObject_GetDictPtr(obj); -#endif - } - return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; -} - -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) - return 0; - return obj_dict_version == __Pyx_get_object_dict_version(obj); -} -#endif diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/certifi/core.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/certifi/core.py deleted file mode 100644 index de028981b97e1fcc8ef4ab2c817cc8731b9c8738..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/certifi/core.py +++ /dev/null @@ -1,108 +0,0 @@ -""" -certifi.py -~~~~~~~~~~ - -This module returns the installation location of cacert.pem or its contents. -""" -import sys - - -if sys.version_info >= (3, 11): - - from importlib.resources import as_file, files - - _CACERT_CTX = None - _CACERT_PATH = None - - def where() -> str: - # This is slightly terrible, but we want to delay extracting the file - # in cases where we're inside of a zipimport situation until someone - # actually calls where(), but we don't want to re-extract the file - # on every call of where(), so we'll do it once then store it in a - # global variable. - global _CACERT_CTX - global _CACERT_PATH - if _CACERT_PATH is None: - # This is slightly janky, the importlib.resources API wants you to - # manage the cleanup of this file, so it doesn't actually return a - # path, it returns a context manager that will give you the path - # when you enter it and will do any cleanup when you leave it. In - # the common case of not needing a temporary file, it will just - # return the file system location and the __exit__() is a no-op. - # - # We also have to hold onto the actual context manager, because - # it will do the cleanup whenever it gets garbage collected, so - # we will also store that at the global level as well. - _CACERT_CTX = as_file(files("certifi").joinpath("cacert.pem")) - _CACERT_PATH = str(_CACERT_CTX.__enter__()) - - return _CACERT_PATH - - def contents() -> str: - return files("certifi").joinpath("cacert.pem").read_text(encoding="ascii") - -elif sys.version_info >= (3, 7): - - from importlib.resources import path as get_path, read_text - - _CACERT_CTX = None - _CACERT_PATH = None - - def where() -> str: - # This is slightly terrible, but we want to delay extracting the - # file in cases where we're inside of a zipimport situation until - # someone actually calls where(), but we don't want to re-extract - # the file on every call of where(), so we'll do it once then store - # it in a global variable. - global _CACERT_CTX - global _CACERT_PATH - if _CACERT_PATH is None: - # This is slightly janky, the importlib.resources API wants you - # to manage the cleanup of this file, so it doesn't actually - # return a path, it returns a context manager that will give - # you the path when you enter it and will do any cleanup when - # you leave it. In the common case of not needing a temporary - # file, it will just return the file system location and the - # __exit__() is a no-op. - # - # We also have to hold onto the actual context manager, because - # it will do the cleanup whenever it gets garbage collected, so - # we will also store that at the global level as well. - _CACERT_CTX = get_path("certifi", "cacert.pem") - _CACERT_PATH = str(_CACERT_CTX.__enter__()) - - return _CACERT_PATH - - def contents() -> str: - return read_text("certifi", "cacert.pem", encoding="ascii") - -else: - import os - import types - from typing import Union - - Package = Union[types.ModuleType, str] - Resource = Union[str, "os.PathLike"] - - # This fallback will work for Python versions prior to 3.7 that lack the - # importlib.resources module but relies on the existing `where` function - # so won't address issues with environments like PyOxidizer that don't set - # __file__ on modules. - def read_text( - package: Package, - resource: Resource, - encoding: str = 'utf-8', - errors: str = 'strict' - ) -> str: - with open(where(), encoding=encoding) as data: - return data.read() - - # If we don't have importlib.resources, then we will just do the old logic - # of assuming we're on the filesystem and munge the path directly. - def where() -> str: - f = os.path.dirname(__file__) - - return os.path.join(f, "cacert.pem") - - def contents() -> str: - return read_text("certifi", "cacert.pem", encoding="ascii") diff --git a/spaces/aryadytm/photo-colorization/README.md b/spaces/aryadytm/photo-colorization/README.md deleted file mode 100644 index 663a00440d1b9ac359375b6d827e276f88da005a..0000000000000000000000000000000000000000 --- a/spaces/aryadytm/photo-colorization/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Photo Colorization -emoji: 🌍 -colorFrom: gray -colorTo: blue -sdk: streamlit -sdk_version: 1.2.0 -python_version: 3.9.5 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Maithrreye Srinivasan.html b/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Maithrreye Srinivasan.html deleted file mode 100644 index a7ee19cb802f4473fd2981d4319465ae27fe065b..0000000000000000000000000000000000000000 --- a/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Maithrreye Srinivasan.html +++ /dev/null @@ -1,132 +0,0 @@ - - - - Maithrreye Srinivasan - - - - -
    -

    Maithrreye Srinivasan

    - -
    -

    Application

    I have different mentors in every stage of my career and I would like to give back to AI community by sharing my experience in the data science field with people who want to break into this field as well as people who want to change career. Being someone who worked in technical customer success and moved to high studies to enter into AI field, I believe I can help others from my experience. Mentoring also helps me to be a better leader, gain new perspective and be ladder for a change in someone's life. Also I have time that I can use it here. 

    Interview


    How did you hear about SM?
    • Started seeing folks on Linkedin talking about it. Then followed us Twitter

    Why do you want to mentor?
    • From India, MS at U Alberta in ML
    • now working at research institute
    • In India, worked at Microsoft for 3 years
    • Infrastructure job to a ML job
    • Has had a lot of mentors who helped with that transition
    • Wants to give back

    Mentorship experience?
    • UofA mentored high school students, helped them plan their careers
    • as a grad student, mentored some BS students
    • at Microsoft did a bit of mentoring there

    What does a mentor do?
    • Sharing knowledge /experience that they have gained over the years
    • Mentee can learn and grow. 
    • Both mentor and mentee grow together

    What are beginners lacking?
    • Lots of role names, can be overwhelming
    • Learn what type of role is best for you within the field
    • Too much noise to know what is best
    • boundaries are blurry / fuzzy

    How can you help?
    • Explain the difference of the different roles
    • Get their understanding of what they want
    • Try to find intersect of their interests and roles
    • Kaggle 


    Questions about SM?
    • Understood a lot from my email (the dating site analogy helped), not many questions
    • How do you actually review the performance of a mentor? Is it based on getting your mentee hires?
    • Are your mentees just from North America? 




    -
    - -
    - - - \ No newline at end of file diff --git a/spaces/aus10powell/TwitterAccounts/templates/charts/handle_sentiment_distribution.html b/spaces/aus10powell/TwitterAccounts/templates/charts/handle_sentiment_distribution.html deleted file mode 100644 index 26846ea7b31a567c6a2fa2d54916d3cd321e5ea3..0000000000000000000000000000000000000000 --- a/spaces/aus10powell/TwitterAccounts/templates/charts/handle_sentiment_distribution.html +++ /dev/null @@ -1,35 +0,0 @@ - - - - - - - - - -
    - - - \ No newline at end of file diff --git a/spaces/awaawawawa/iurf7irfuyytruyyugb/optimizedSD/splitAttention.py b/spaces/awaawawawa/iurf7irfuyytruyyugb/optimizedSD/splitAttention.py deleted file mode 100644 index a9df37ba8dd2caeac62fea038946b4aa5a724b7e..0000000000000000000000000000000000000000 --- a/spaces/awaawawawa/iurf7irfuyytruyyugb/optimizedSD/splitAttention.py +++ /dev/null @@ -1,280 +0,0 @@ -from inspect import isfunction -import math -import torch -import torch.nn.functional as F -from torch import nn, einsum -from einops import rearrange, repeat - -from ldmlib.modules.diffusionmodules.util import checkpoint - - -def exists(val): - return val is not None - - -def uniq(arr): - return{el: True for el in arr}.keys() - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def max_neg_value(t): - return -torch.finfo(t.dtype).max - - -def init_(tensor): - dim = tensor.shape[-1] - std = 1 / math.sqrt(dim) - tensor.uniform_(-std, std) - return tensor - - -# feedforward -class GEGLU(nn.Module): - def __init__(self, dim_in, dim_out): - super().__init__() - self.proj = nn.Linear(dim_in, dim_out * 2) - - def forward(self, x): - x, gate = self.proj(x).chunk(2, dim=-1) - return x * F.gelu(gate) - - -class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): - super().__init__() - inner_dim = int(dim * mult) - dim_out = default(dim_out, dim) - project_in = nn.Sequential( - nn.Linear(dim, inner_dim), - nn.GELU() - ) if not glu else GEGLU(dim, inner_dim) - - self.net = nn.Sequential( - project_in, - nn.Dropout(dropout), - nn.Linear(inner_dim, dim_out) - ) - - def forward(self, x): - return self.net(x) - - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - - -def Normalize(in_channels): - return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) - - -class LinearAttention(nn.Module): - def __init__(self, dim, heads=4, dim_head=32): - super().__init__() - self.heads = heads - hidden_dim = dim_head * heads - self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False) - self.to_out = nn.Conv2d(hidden_dim, dim, 1) - - def forward(self, x): - b, c, h, w = x.shape - qkv = self.to_qkv(x) - q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3) - k = k.softmax(dim=-1) - context = torch.einsum('bhdn,bhen->bhde', k, v) - out = torch.einsum('bhde,bhdn->bhen', context, q) - out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) - return self.to_out(out) - - -class SpatialSelfAttention(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b,c,h,w = q.shape - q = rearrange(q, 'b c h w -> b (h w) c') - k = rearrange(k, 'b c h w -> b c (h w)') - w_ = torch.einsum('bij,bjk->bik', q, k) - - w_ = w_ * (int(c)**(-0.5)) - w_ = torch.nn.functional.softmax(w_, dim=2) - - # attend to values - v = rearrange(v, 'b c h w -> b c (h w)') - w_ = rearrange(w_, 'b i j -> b j i') - h_ = torch.einsum('bij,bjk->bik', v, w_) - h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h) - h_ = self.proj_out(h_) - - return x+h_ - - -class CrossAttention(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., att_step=1): - super().__init__() - inner_dim = dim_head * heads - context_dim = default(context_dim, query_dim) - - self.scale = dim_head ** -0.5 - self.heads = heads - self.att_step = att_step - - self.to_q = nn.Linear(query_dim, inner_dim, bias=False) - self.to_k = nn.Linear(context_dim, inner_dim, bias=False) - self.to_v = nn.Linear(context_dim, inner_dim, bias=False) - - self.to_out = nn.Sequential( - nn.Linear(inner_dim, query_dim), - nn.Dropout(dropout) - ) - - def forward(self, x, context=None, mask=None): - h = self.heads - - q = self.to_q(x) - context = default(context, x) - k = self.to_k(context) - v = self.to_v(context) - del context, x - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) - - - limit = k.shape[0] - att_step = self.att_step - q_chunks = list(torch.tensor_split(q, limit//att_step, dim=0)) - k_chunks = list(torch.tensor_split(k, limit//att_step, dim=0)) - v_chunks = list(torch.tensor_split(v, limit//att_step, dim=0)) - - q_chunks.reverse() - k_chunks.reverse() - v_chunks.reverse() - sim = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device) - del k, q, v - for i in range (0, limit, att_step): - - q_buffer = q_chunks.pop() - k_buffer = k_chunks.pop() - v_buffer = v_chunks.pop() - sim_buffer = einsum('b i d, b j d -> b i j', q_buffer, k_buffer) * self.scale - - del k_buffer, q_buffer - # attention, what we cannot get enough of, by chunks - - sim_buffer = sim_buffer.softmax(dim=-1) - - sim_buffer = einsum('b i j, b j d -> b i d', sim_buffer, v_buffer) - del v_buffer - sim[i:i+att_step,:,:] = sim_buffer - - del sim_buffer - sim = rearrange(sim, '(b h) n d -> b n (h d)', h=h) - return self.to_out(sim) - - -class BasicTransformerBlock(nn.Module): - def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True): - super().__init__() - self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout) # is a self-attention - self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) - self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, - heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none - self.norm1 = nn.LayerNorm(dim) - self.norm2 = nn.LayerNorm(dim) - self.norm3 = nn.LayerNorm(dim) - self.checkpoint = checkpoint - - def forward(self, x, context=None): - return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) - - def _forward(self, x, context=None): - x = self.attn1(self.norm1(x)) + x - x = self.attn2(self.norm2(x), context=context) + x - x = self.ff(self.norm3(x)) + x - return x - - -class SpatialTransformer(nn.Module): - """ - Transformer block for image-like data. - First, project the input (aka embedding) - and reshape to b, t, d. - Then apply standard transformer action. - Finally, reshape to image - """ - def __init__(self, in_channels, n_heads, d_head, - depth=1, dropout=0., context_dim=None): - super().__init__() - self.in_channels = in_channels - inner_dim = n_heads * d_head - self.norm = Normalize(in_channels) - - self.proj_in = nn.Conv2d(in_channels, - inner_dim, - kernel_size=1, - stride=1, - padding=0) - - self.transformer_blocks = nn.ModuleList( - [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim) - for d in range(depth)] - ) - - self.proj_out = zero_module(nn.Conv2d(inner_dim, - in_channels, - kernel_size=1, - stride=1, - padding=0)) - - def forward(self, x, context=None): - # note: if no context is given, cross-attention defaults to self-attention - b, c, h, w = x.shape - x_in = x - x = self.norm(x) - x = self.proj_in(x) - x = rearrange(x, 'b c h w -> b (h w) c') - for block in self.transformer_blocks: - x = block(x, context=context) - x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w) - x = self.proj_out(x) - return x + x_in diff --git a/spaces/awacke1/3d-Breakout-Game-Three.JS/version2-index.html b/spaces/awacke1/3d-Breakout-Game-Three.JS/version2-index.html deleted file mode 100644 index 176bcea503b31642f3e1b82f3523b70f3cc91876..0000000000000000000000000000000000000000 --- a/spaces/awacke1/3d-Breakout-Game-Three.JS/version2-index.html +++ /dev/null @@ -1,122 +0,0 @@ - - - - - 3D Breakout Game - - - - - - - - - \ No newline at end of file diff --git a/spaces/awacke1/ChatBotPersonalities/app.py b/spaces/awacke1/ChatBotPersonalities/app.py deleted file mode 100644 index 4a3d4eadd2aca8116dc72b26647efea1b3f179ac..0000000000000000000000000000000000000000 --- a/spaces/awacke1/ChatBotPersonalities/app.py +++ /dev/null @@ -1,137 +0,0 @@ -import gradio as gr -import random -import torch -from transformers import AutoConfig -from transformers import GPT2Tokenizer, GPT2LMHeadModel -from itertools import chain - -config = AutoConfig.from_pretrained('gorkemgoknar/gpt2chatbotenglish') -model = GPT2LMHeadModel.from_pretrained('gorkemgoknar/gpt2chatbotenglish', config=config) - -tokenizer = GPT2Tokenizer.from_pretrained('gorkemgoknar/gpt2chatbotenglish') -tokenizer.model_max_length = 1024 - -#Dynamic Temperature -#See experiment https://www.linkedin.com/pulse/ai-goes-job-interview-g%25C3%25B6rkem-g%25C3%25B6knar - -base_temperature = 1.3 -dynamic_temperature_range = 0.15 - -rand_range = random.uniform(-1 * dynamic_temperature_range , dynamic_temperature_range ) -temperature = base_temperature + rand_range - -SPECIAL_TOKENS = ["", "", "", "", ""] - -#See document for experiment https://www.linkedin.com/pulse/ai-goes-job-interview-g%C3%B6rkem-g%C3%B6knar/ - -def get_chat_response(name,history=[], input_txt = "Hello , what is your name?"): - - ai_history = history.copy() - - #ai_history.append(input_txt) - ai_history_e = [tokenizer.encode(e) for e in ai_history] - - personality = "My name is " + name - - bos, eos, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:-1]) - - #persona first, history next, input text must be at the end - #[[bos, persona] , [history] , [input]] - sequence = [[bos] + tokenizer.encode(personality)] + ai_history_e + [tokenizer.encode(input_txt)] - ##[[bos, persona] , [speaker1 .., speakser2 .., speaker1 ... speaker2 ... , [input]] - sequence = [sequence[0]] + [[speaker2 if (len(sequence)-i) % 2 else speaker1] + s for i, s in enumerate(sequence[1:])] - - sequence = list(chain(*sequence)) - - #bot_input_ids = tokenizer.encode(personality + tokenizer.eos_token + input_txt + tokenizer.eos_token , return_tensors='pt') - sequence_len = len(sequence) - - #optimum response and speed - chat_history_ids = model.generate( - torch.tensor(sequence).unsqueeze(0), max_length=50, - pad_token_id=tokenizer.eos_token_id, - no_repeat_ngram_size=3, - do_sample=True, - top_k=60, - top_p=0.8, - temperature = 1.3 - ) - out_str = tokenizer.decode(chat_history_ids[0][sequence_len:], skip_special_tokens=True) - #out_str = tokenizer.decode(chat_history_ids[:, sequence.shape[-1]:][0], skip_special_tokens=False) - return out_str - -##you can use anyone from below -''' -| Macleod | Moran | Brenda | Ramirez | Peter Parker | Quentin Beck | Andy -| Red | Norton | Willard | Chief | Chef | Kilgore | Kurtz | Westley | Buttercup -| Vizzini | Fezzik | Inigo | Man In Black | Taylor | Zira | Zaius | Cornelius -| Bud | Lindsey | Hippy | Erin | Ed | George | Donna | Trinity | Agent Smith -| Morpheus | Neo | Tank | Meryl | Truman | Marlon | Christof | Stromboli | Bumstead -| Schreber | Walker | Korben | Cornelius | Loc Rhod | Anakin | Obi-Wan | Palpatine -| Padme | Superman | Luthor | Dude | Walter | Donny | Maude | General | Starkiller -| Indiana | Willie | Short Round | John | Sarah | Terminator | Miller | Sarge | Reiben -| Jackson | Upham | Chuckie | Will | Lambeau | Sean | Skylar | Saavik | Spock -| Kirk | Bones | Khan | Kirk | Spock | Sybok | Scotty | Bourne | Pamela | Abbott -| Nicky | Marshall | Korshunov | Troy | Vig | Archie Gates | Doc | Interrogator -| Ellie | Ted | Peter | Drumlin | Joss | Macready | Childs | Nicholas | Conrad -| Feingold | Christine | Adam | Barbara | Delia | Lydia | Cathy | Charles | Otho -| Schaefer | Han | Luke | Leia | Threepio | Vader | Yoda | Lando | Elaine | Striker -| Dr. Rumack | Kramer | David | Saavik | Kirk | Kruge | Holden | Deckard | Rachael -| Batty | Sebastian | Sam | Frodo | Pippin | Gandalf | Kay | Edwards | Laurel -| Edgar | Zed | Jay | Malloy | Plissken | Steve Rogers | Tony Stark | Scott Lang -| Bruce Banner | Bruce | Edward | Two-Face | Batman | Chase | Alfred | Dick -| Riddler | Din Djarin | Greef Karga | Kuiil | Ig-11 | Cara Dune | Peli Motto -| Toro Calican | Ripley | Meredith | Dickie | Marge | Peter | Lambert | Kane -| Dallas | Ripley | Ash | Parker | Threepio | Luke | Leia | Ben | Han | Common Bob -| Common Alice | Jack | Tyler | Marla | Dana | Stantz | Venkman | Spengler | Louis -| Fry | Johns | Riddick | Kirk | Decker | Spock | "Ilia | Indy | Belloq | Marion -| Brother | Allnut | Rose | Qui-Gon | Jar Jar -''' - - - - -def greet(character,message,history): - history = history or {"character": character, "message_history" : [] } - if history["character"] != character: - history = {"character": character, "message_history" : [] } - response = get_chat_response(character,history=history["message_history"],input_txt=message) - history["message_history"].append((message, response)) - html = "
    " - for user_msg, resp_msg in history["message_history"]: - html += f"
    You: {user_msg}
    " - html += f"
    {character}: {resp_msg}
    " - html += "
    " - return html,history - - -personality_choices = ["Gandalf", "Riddick", "Macleod", "Morpheus", "Neo","Spock","Vader","Indy"] - -examples= ["Gandalf", "What is your name?"] - -css=""" - .chatbox {display:flex;flex-direction:column} - .user_msg, .resp_msg {padding:4px;margin-bottom:4px;border-radius:4px;width:80%} - .user_msg {background-color:cornflowerblue;color:white;align-self:start} - .resp_msg {background-color:lightgray;align-self:self-end} -""" - - -#some selected ones are in for demo use -#personality_choices = ["Gandalf", "Riddick", "Macleod", "Morpheus", "Neo","Spock","Vader","Indy", "Ig-11","Threepio","Tony Stark","Batman","Vizzini"] -# Macleod| Ramirez | Peter Parker | Vizzini | Fezzik | Cornelius | Agent Smith | Korben | Deckard | Plissken | Steve Rogers | Tony Stark | Riddler | Kirk | Spock | Sybok | Scotty | Threepio | Luke | Leia | Ben | Han | Qui-Gon -personality_choices = ["Macleod","Ramirez","Morpheus","Neo","Spock","Vader","Indy","Peter Parker","Vizzini","Fezzik","Cornelius","Agent Smith","Korben","Deckard","Plissken","Steve Rogers","Tony Stark","Riddler","Kirk","Spock", "Scotty", "Threepio","Leia","Ben","Han","Qui-Gon"] - -title = "Chatbot Personalities" -description = "Characters include | Macleod| Ramirez | Peter Parker | Vizzini | Fezzik | Cornelius | Agent Smith | Korben | Deckard | Plissken | Steve Rogers | Tony Stark | Riddler | Kirk | Spock | Sybok | Scotty | Threepio | Luke | Leia | Ben | Han | Qui-Gon ." -article = "

    AI Goes to Job Interview | Metayazar AI Writer |Görkem Göknar

    " - -#History not implemented in this demo, use metayazar.com/chatbot for a movie and character dropdown chat interface -##interface = gr.Interface(fn=greet, inputs=[gr.inputs.Dropdown(personality_choices) ,"text"], title=title, description=description, outputs="text") - -history = {"character": "None", "message_history" : [] } -interface= gr.Interface(fn=greet, inputs=[gr.inputs.Dropdown(personality_choices) ,"text", "state"], outputs=["html","state"],css=css, title=title, description=description,article=article ) - - -if __name__ == "__main__": - interface.launch() \ No newline at end of file diff --git a/spaces/awacke1/ChatGPT-QA-Translation-Summary-14/app.py b/spaces/awacke1/ChatGPT-QA-Translation-Summary-14/app.py deleted file mode 100644 index 9b3e2e737e4109756b670f98b8a9933a9a01a800..0000000000000000000000000000000000000000 --- a/spaces/awacke1/ChatGPT-QA-Translation-Summary-14/app.py +++ /dev/null @@ -1,357 +0,0 @@ -import streamlit as st -import openai -import os -import base64 -import glob -import json -import mistune -import pytz -import math -import requests -import time - -from datetime import datetime -from openai import ChatCompletion -from xml.etree import ElementTree as ET -from bs4 import BeautifulSoup -from collections import deque -from audio_recorder_streamlit import audio_recorder - -from dotenv import load_dotenv -from PyPDF2 import PdfReader -from langchain.text_splitter import CharacterTextSplitter -from langchain.embeddings import OpenAIEmbeddings -from langchain.vectorstores import FAISS -from langchain.chat_models import ChatOpenAI -from langchain.memory import ConversationBufferMemory -from langchain.chains import ConversationalRetrievalChain -from htmlTemplates import css, bot_template, user_template - - - -def generate_filename(prompt, file_type): - central = pytz.timezone('US/Central') - safe_date_time = datetime.now(central).strftime("%m%d_%I%M") # Date and time DD-TT - safe_prompt = "".join(x for x in prompt if x.isalnum())[:45] # Limit file name size and trim whitespace - return f"{safe_date_time}_{safe_prompt}.{file_type}" # Return a safe file name - -def transcribe_audio(openai_key, file_path, model): - OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions" - headers = { - "Authorization": f"Bearer {openai_key}", - } - with open(file_path, 'rb') as f: - data = {'file': f} - response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model}) - if response.status_code == 200: - st.write(response.json()) - chatResponse = chat_with_model(response.json().get('text'), '') # ************************************* - transcript = response.json().get('text') - st.write('Responses:') - st.write(chatResponse) - filename = generate_filename(transcript, 'txt') - create_file(filename, transcript, chatResponse) - return transcript - else: - st.write(response.json()) - st.error("Error in API call.") - return None - -def save_and_play_audio(audio_recorder): - audio_bytes = audio_recorder() - if audio_bytes: - filename = generate_filename("Recording", "wav") - with open(filename, 'wb') as f: - f.write(audio_bytes) - st.audio(audio_bytes, format="audio/wav") - return filename - return None - -def create_file(filename, prompt, response): - if filename.endswith(".txt"): - with open(filename, 'w') as file: - file.write(f"{prompt}\n{response}") - elif filename.endswith(".htm"): - with open(filename, 'w') as file: - file.write(f"{prompt} {response}") - elif filename.endswith(".md"): - with open(filename, 'w') as file: - file.write(f"{prompt}\n\n{response}") - -def truncate_document(document, length): - return document[:length] -def divide_document(document, max_length): - return [document[i:i+max_length] for i in range(0, len(document), max_length)] - -def get_table_download_link(file_path): - with open(file_path, 'r') as file: - try: - data = file.read() - except: - st.write('') - return file_path - b64 = base64.b64encode(data.encode()).decode() - file_name = os.path.basename(file_path) - ext = os.path.splitext(file_name)[1] # get the file extension - if ext == '.txt': - mime_type = 'text/plain' - elif ext == '.py': - mime_type = 'text/plain' - elif ext == '.xlsx': - mime_type = 'text/plain' - elif ext == '.csv': - mime_type = 'text/plain' - elif ext == '.htm': - mime_type = 'text/html' - elif ext == '.md': - mime_type = 'text/markdown' - else: - mime_type = 'application/octet-stream' # general binary data type - href = f'{file_name}' - return href - -def CompressXML(xml_text): - root = ET.fromstring(xml_text) - for elem in list(root.iter()): - if isinstance(elem.tag, str) and 'Comment' in elem.tag: - elem.parent.remove(elem) - return ET.tostring(root, encoding='unicode', method="xml") - -def read_file_content(file,max_length): - if file.type == "application/json": - content = json.load(file) - return str(content) - elif file.type == "text/html" or file.type == "text/htm": - content = BeautifulSoup(file, "html.parser") - return content.text - elif file.type == "application/xml" or file.type == "text/xml": - tree = ET.parse(file) - root = tree.getroot() - xml = CompressXML(ET.tostring(root, encoding='unicode')) - return xml - elif file.type == "text/markdown" or file.type == "text/md": - md = mistune.create_markdown() - content = md(file.read().decode()) - return content - elif file.type == "text/plain": - return file.getvalue().decode() - else: - return "" - -def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'): - model = model_choice - conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}] - conversation.append({'role': 'user', 'content': prompt}) - if len(document_section)>0: - conversation.append({'role': 'assistant', 'content': document_section}) - - start_time = time.time() - report = [] - res_box = st.empty() - collected_chunks = [] - collected_messages = [] - - for chunk in openai.ChatCompletion.create( - model='gpt-3.5-turbo', - messages=conversation, - temperature=0.5, - stream=True - ): - - collected_chunks.append(chunk) # save the event response - chunk_message = chunk['choices'][0]['delta'] # extract the message - collected_messages.append(chunk_message) # save the message - - content=chunk["choices"][0].get("delta",{}).get("content") - - try: - report.append(content) - if len(content) > 0: - result = "".join(report).strip() - #result = result.replace("\n", "") - res_box.markdown(f'*{result}*') - except: - st.write('.') - - full_reply_content = ''.join([m.get('content', '') for m in collected_messages]) - st.write("Elapsed time:") - st.write(time.time() - start_time) - return full_reply_content - -def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'): - conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}] - conversation.append({'role': 'user', 'content': prompt}) - if len(file_content)>0: - conversation.append({'role': 'assistant', 'content': file_content}) - response = openai.ChatCompletion.create(model=model_choice, messages=conversation) - return response['choices'][0]['message']['content'] - -def pdf2txt(pdf_docs): - text = "" - for pdf in pdf_docs: - pdf_reader = PdfReader(pdf) - for page in pdf_reader.pages: - text += page.extract_text() - return text - -def txt2chunks(text): - text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=200, length_function=len) - return text_splitter.split_text(text) - -def vector_store(text_chunks): - key = os.getenv('OPENAI_API_KEY') - embeddings = OpenAIEmbeddings(openai_api_key=key) - return FAISS.from_texts(texts=text_chunks, embedding=embeddings) - -def get_chain(vectorstore): - llm = ChatOpenAI() - memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True) - return ConversationalRetrievalChain.from_llm(llm=llm, retriever=vectorstore.as_retriever(), memory=memory) - -def process_user_input(user_question): - response = st.session_state.conversation({'question': user_question}) - st.session_state.chat_history = response['chat_history'] - for i, message in enumerate(st.session_state.chat_history): - template = user_template if i % 2 == 0 else bot_template - st.write(template.replace("{{MSG}}", message.content), unsafe_allow_html=True) - # Save file output from PDF query results - filename = generate_filename(user_question, 'txt') - create_file(filename, user_question, message.content) - - #st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True) - - -def main(): - # Sidebar and global - openai.api_key = os.getenv('OPENAI_API_KEY') - st.set_page_config(page_title="GPT Streamlit Document Reasoner",layout="wide") - - # File type for output, model choice - menu = ["htm", "txt", "xlsx", "csv", "md", "py"] #619 - choice = st.sidebar.selectbox("Output File Type:", menu) - model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301')) - - # Audio, transcribe, GPT: - filename = save_and_play_audio(audio_recorder) - if filename is not None: - transcription = transcribe_audio(openai.api_key, filename, "whisper-1") - st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True) - filename=None # since transcription is finished next time just use the saved transcript - - # prompt interfaces - user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100) - - # file section interface for prompts against large documents as context - collength, colupload = st.columns([2,3]) # adjust the ratio as needed - with collength: - max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000) - with colupload: - uploaded_file = st.file_uploader("Add a file for context:", type=["xml", "json", "xlsx","csv","html", "htm", "md", "txt"]) - - # Document section chat - document_sections = deque() - document_responses = {} - if uploaded_file is not None: - file_content = read_file_content(uploaded_file, max_length) - document_sections.extend(divide_document(file_content, max_length)) - if len(document_sections) > 0: - if st.button("👁️ View Upload"): - st.markdown("**Sections of the uploaded file:**") - for i, section in enumerate(list(document_sections)): - st.markdown(f"**Section {i+1}**\n{section}") - st.markdown("**Chat with the model:**") - for i, section in enumerate(list(document_sections)): - if i in document_responses: - st.markdown(f"**Section {i+1}**\n{document_responses[i]}") - else: - if st.button(f"Chat about Section {i+1}"): - st.write('Reasoning with your inputs...') - response = chat_with_model(user_prompt, section, model_choice) # ************************************* - st.write('Response:') - st.write(response) - document_responses[i] = response - filename = generate_filename(f"{user_prompt}_section_{i+1}", choice) - create_file(filename, user_prompt, response) - st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True) - - if st.button('💬 Chat'): - st.write('Reasoning with your inputs...') - response = chat_with_model(user_prompt, ''.join(list(document_sections,)), model_choice) # ************************************* - st.write('Response:') - st.write(response) - - filename = generate_filename(user_prompt, choice) - create_file(filename, user_prompt, response) - st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True) - - all_files = glob.glob("*.*") - all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 20] # exclude files with short names - all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order - - # sidebar of files - file_contents='' - next_action='' - for file in all_files: - col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1]) # adjust the ratio as needed - with col1: - if st.button("🌐", key="md_"+file): # md emoji button - with open(file, 'r') as f: - file_contents = f.read() - next_action='md' - with col2: - st.markdown(get_table_download_link(file), unsafe_allow_html=True) - with col3: - if st.button("📂", key="open_"+file): # open emoji button - with open(file, 'r') as f: - file_contents = f.read() - next_action='open' - with col4: - if st.button("🔍", key="read_"+file): # search emoji button - with open(file, 'r') as f: - file_contents = f.read() - next_action='search' - with col5: - if st.button("🗑", key="delete_"+file): - os.remove(file) - st.experimental_rerun() - - if len(file_contents) > 0: - if next_action=='open': - file_content_area = st.text_area("File Contents:", file_contents, height=500) - if next_action=='md': - st.markdown(file_contents) - if next_action=='search': - file_content_area = st.text_area("File Contents:", file_contents, height=500) - st.write('Reasoning with your inputs...') - response = chat_with_model(user_prompt, file_contents, model_choice) - filename = generate_filename(file_contents, choice) - create_file(filename, file_contents, response) - - st.experimental_rerun() - #st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True) - -if __name__ == "__main__": - main() - -load_dotenv() -st.write(css, unsafe_allow_html=True) - -st.header("Chat with documents :books:") -user_question = st.text_input("Ask a question about your documents:") -if user_question: - process_user_input(user_question) - -with st.sidebar: - st.subheader("Your documents") - docs = st.file_uploader("Upload your documents", accept_multiple_files=True) - with st.spinner("Processing"): - raw = pdf2txt(docs) - if len(raw) > 0: - length = str(len(raw)) - text_chunks = txt2chunks(raw) - vectorstore = vector_store(text_chunks) - st.session_state.conversation = get_chain(vectorstore) - st.markdown('# AI Search Index of Length:' + length + ' Created.') - filename = generate_filename(raw, 'txt') - create_file(filename, raw, '') - \ No newline at end of file diff --git a/spaces/awacke1/Docker-FlanT5-TextGeneratorTranslator/README.md b/spaces/awacke1/Docker-FlanT5-TextGeneratorTranslator/README.md deleted file mode 100644 index 8b0c194cd4f251a3a9afd1a02fdcee8d7a24d9b3..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Docker-FlanT5-TextGeneratorTranslator/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: ✍️Flan T5 Text Generator Translator 🚢 Docker -emoji: 🚢Text -colorFrom: yellow -colorTo: indigo -sdk: docker -app_port: 7860 -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/PersistState/app.py b/spaces/awacke1/PersistState/app.py deleted file mode 100644 index dcb0d5649af7b19be91fb6f27ac8af1cc03e3c4a..0000000000000000000000000000000000000000 --- a/spaces/awacke1/PersistState/app.py +++ /dev/null @@ -1,26 +0,0 @@ -import random -import gradio as gr - -def chat(message, history): - history = history or [] - if message.startswith("How many"): - response = random.randint(1, 10) - elif message.startswith("How"): - response = random.choice(["Great", "Good", "Okay", "Bad"]) - elif message.startswith("Where"): - response = random.choice(["Here", "There", "Somewhere"]) - else: - response = "I don't know" - history.append((message, response)) - return history, history - -iface = gr.Interface( - chat, - ["text", "state"], - ["chatbot", "state"], - allow_screenshot=False, - allow_flagging="never", -) -#iface.launch(share=True) --share not supported on spaces - find better persist - -iface.launch() \ No newline at end of file diff --git a/spaces/awacke1/SNOMED-LOINC-eCQM/files/Readme.md b/spaces/awacke1/SNOMED-LOINC-eCQM/files/Readme.md deleted file mode 100644 index 9d494f6d6336624e46e1ca6eb75996bf156099d8..0000000000000000000000000000000000000000 --- a/spaces/awacke1/SNOMED-LOINC-eCQM/files/Readme.md +++ /dev/null @@ -1 +0,0 @@ -Files Directory - drop in examples here to ref by app.py \ No newline at end of file diff --git a/spaces/awacke1/Webcam-Object-Recognition-Yolo-n-Coco/README.md b/spaces/awacke1/Webcam-Object-Recognition-Yolo-n-Coco/README.md deleted file mode 100644 index 58d833cd3215a5171709dbe5a665759f9ccceb04..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Webcam-Object-Recognition-Yolo-n-Coco/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: 📷WebCam AI🧠 -emoji: 🚀 -colorFrom: purple -colorTo: blue -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false ---- diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/BlendShader.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/BlendShader.js deleted file mode 100644 index 8a7a59cd85a0a50ce26a2cf84d9550cbf5b67de6..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/BlendShader.js +++ /dev/null @@ -1,51 +0,0 @@ -/** - * @author alteredq / http://alteredqualia.com/ - * - * Blend two textures - */ - -THREE.BlendShader = { - - uniforms: { - - "tDiffuse1": { value: null }, - "tDiffuse2": { value: null }, - "mixRatio": { value: 0.5 }, - "opacity": { value: 1.0 } - - }, - - vertexShader: [ - - "varying vec2 vUv;", - - "void main() {", - - "vUv = uv;", - "gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );", - - "}" - - ].join( "\n" ), - - fragmentShader: [ - - "uniform float opacity;", - "uniform float mixRatio;", - - "uniform sampler2D tDiffuse1;", - "uniform sampler2D tDiffuse2;", - - "varying vec2 vUv;", - - "void main() {", - - "vec4 texel1 = texture2D( tDiffuse1, vUv );", - "vec4 texel2 = texture2D( tDiffuse2, vUv );", - "gl_FragColor = opacity * mix( texel1, texel2, mixRatio );", - - "}" - - ].join( "\n" ) - -}; diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/TechnicolorShader.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/TechnicolorShader.js deleted file mode 100644 index ba1c8e7136a5d876d7b8db58e85d5b31caaa9bf5..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/TechnicolorShader.js +++ /dev/null @@ -1,47 +0,0 @@ -/** - * @author flimshaw / http://charliehoey.com - * - * Technicolor Shader - * Simulates the look of the two-strip technicolor process popular in early 20th century films. - * More historical info here: http://www.widescreenmuseum.com/oldcolor/technicolor1.htm - * Demo here: http://charliehoey.com/technicolor_shader/shader_test.html - */ - -THREE.TechnicolorShader = { - - uniforms: { - - "tDiffuse": { value: null } - - }, - - vertexShader: [ - - "varying vec2 vUv;", - - "void main() {", - - "vUv = uv;", - "gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );", - - "}" - - ].join( "\n" ), - - fragmentShader: [ - - "uniform sampler2D tDiffuse;", - "varying vec2 vUv;", - - "void main() {", - - "vec4 tex = texture2D( tDiffuse, vec2( vUv.x, vUv.y ) );", - "vec4 newTex = vec4(tex.r, (tex.g + tex.b) * .5, (tex.g + tex.b) * .5, 1.0);", - - "gl_FragColor = newTex;", - - "}" - - ].join( "\n" ) - -}; diff --git a/spaces/banana-projects/web3d/node_modules/three/src/Three.Legacy.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/Three.Legacy.d.ts deleted file mode 100644 index 5b3adde0e38f79a9f1b6c68dfbbe2e154fc893be..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/Three.Legacy.d.ts +++ /dev/null @@ -1,13 +0,0 @@ -import { Geometry } from './core/Geometry'; -import { Material } from './materials/Material'; -import { Object3D } from './core/Object3D'; -import { Scene } from './scenes/Scene'; - -export namespace SceneUtils { - export function createMultiMaterialObject( - geometry: Geometry, - materials: Material[] - ): Object3D; - export function detach(child: Object3D, parent: Object3D, scene: Scene): void; - export function attach(child: Object3D, scene: Scene, parent: Object3D): void; -} diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/lights_phong_fragment.glsl.js b/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/lights_phong_fragment.glsl.js deleted file mode 100644 index b9cfe42148bd1a2259812162745af3ff29b78168..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/lights_phong_fragment.glsl.js +++ /dev/null @@ -1,7 +0,0 @@ -export default /* glsl */` -BlinnPhongMaterial material; -material.diffuseColor = diffuseColor.rgb; -material.specularColor = specular; -material.specularShininess = shininess; -material.specularStrength = specularStrength; -`; diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/webgl/WebGLState.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/renderers/webgl/WebGLState.d.ts deleted file mode 100644 index fdcfc23115fac668091686058f09d355631b3b77..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/webgl/WebGLState.d.ts +++ /dev/null @@ -1,96 +0,0 @@ -import { CullFace } from '../../constants'; - -export class WebGLColorBuffer { - constructor(gl: any, state: any); - - setMask(colorMask: number): void; - setLocked(lock: boolean): void; - setClear(r: number, g: number, b: number, a: number): void; - reset(): void; -} - -export class WebGLDepthBuffer { - constructor(gl: any, state: any); - - setTest(depthTest: boolean): void; - setMask(depthMask: number): void; - setFunc(depthFunc: number): void; - setLocked(lock: boolean): void; - setClear(depth: any): void; - reset(): void; -} - -export class WebGLStencilBuffer { - constructor(gl: any, state: any); - - setTest(stencilTest: boolean): void; - setMask(stencilMask: number): void; - setFunc(stencilFunc: number, stencilRef: any, stencilMask: number): void; - setOp(stencilFail: any, stencilZFail: any, stencilZPass: any): void; - setLocked(lock: boolean): void; - setClear(stencil: any): void; - reset(): void; -} - -export class WebGLState { - constructor(gl: any, extensions: any, paramThreeToGL: Function); - - buffers: { - color: WebGLColorBuffer; - depth: WebGLDepthBuffer; - stencil: WebGLStencilBuffer; - }; - - init(): void; - initAttributes(): void; - enableAttribute(attribute: string): void; - enableAttributeAndDivisor( - attribute: string, - meshPerAttribute: any, - extension: any - ): void; - disableUnusedAttributes(): void; - enable(id: string): void; - disable(id: string): void; - getCompressedTextureFormats(): any[]; - setBlending( - blending: number, - blendEquation?: number, - blendSrc?: number, - blendDst?: number, - blendEquationAlpha?: number, - blendSrcAlpha?: number, - blendDstAlpha?: number, - premultiplyAlpha?: boolean - ): void; - setColorWrite(colorWrite: number): void; - setDepthTest(depthTest: number): void; - setDepthWrite(depthWrite: number): void; - setDepthFunc(depthFunc: Function): void; - setStencilTest(stencilTest: boolean): void; - setStencilWrite(stencilWrite: any): void; - setStencilFunc( - stencilFunc: Function, - stencilRef: any, - stencilMask: number - ): void; - setStencilOp(stencilFail: any, stencilZFail: any, stencilZPass: any): void; - setFlipSided(flipSided: number): void; - setCullFace(cullFace: CullFace): void; - setLineWidth(width: number): void; - setPolygonOffset(polygonoffset: number, factor: number, units: number): void; - setScissorTest(scissorTest: boolean): void; - getScissorTest(): boolean; - activeTexture(webglSlot: any): void; - bindTexture(webglType: any, webglTexture: any): void; - // Same interface as https://developer.mozilla.org/en-US/docs/Web/API/WebGLRenderingContext/compressedTexImage2D - compressedTexImage2D(): void; - // Same interface as https://developer.mozilla.org/en-US/docs/Web/API/WebGLRenderingContext/texImage2D - texImage2D(): void; - clearColor(r: number, g: number, b: number, a: number): void; - clearDepth(depth: number): void; - clearStencil(stencil: any): void; - scissor(scissor: any): void; - viewport(viewport: any): void; - reset(): void; -} diff --git a/spaces/bigscience/petals-api/src/bloom/block.py b/spaces/bigscience/petals-api/src/bloom/block.py deleted file mode 100644 index a4175a760e6b1f0d30f4ef72c27e1a2511fe031a..0000000000000000000000000000000000000000 --- a/spaces/bigscience/petals-api/src/bloom/block.py +++ /dev/null @@ -1,248 +0,0 @@ -""" -Bloom intermediate layer -Based on https://github.com/huggingface/transformers/commit/ca2a55e9dfb245527b5e1c954fec6ffbb7aef07b -See commit history for authorship. -""" -import math - -import torch -import torch.nn as nn -import torch.nn.quantized.dynamic.modules.linear - -from src.bloom.ops import (BloomGelu, BloomScaledSoftmax, attention_mask_func, build_alibi_tensor, dropout_add, - pre_process_alibi_for_pad, split_tensor_along_last_dim) - - -class BloomAttention(nn.Module): - def __init__(self, config, layer_number=None): - super().__init__() - - self.hidden_size = config.hidden_size - self.num_heads = config.n_head - self.head_dim = self.hidden_size // self.num_heads - self.split_size = self.hidden_size - self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32 - self.masked_softmax_fusion = config.masked_softmax_fusion - self.hidden_dropout = config.hidden_dropout - - if self.head_dim * self.num_heads != self.hidden_size: - raise ValueError( - f"`hidden_size` must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`:" - f" {self.num_heads})." - ) - - # Layer-wise attention scaling - self.layer_number = max(1, layer_number) - self.norm_factor = math.sqrt(self.head_dim) * self.layer_number - - # Scaled Softmax - self.scale_mask_softmax = BloomScaledSoftmax( - self.masked_softmax_fusion, - attention_mask_func, - self.attention_softmax_in_fp32, - self.layer_number, - ) - - self.query_key_value = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=True) - self.dense = nn.Linear(self.hidden_size, self.hidden_size) - - self.attention_dropout = nn.Dropout(config.attention_dropout) - - def forward( - self, - hidden_states, - residual, - layer_past=None, - attention_mask=None, - alibi=None, - head_mask=None, - use_cache=False, - output_attentions=False, - ): - if alibi is None: - current_sequence_length = hidden_states.shape[1] + (0 if layer_past is None else layer_past[0].shape[1]) - alibi = build_alibi_tensor( - current_sequence_length, n_head=self.num_heads, dtype=hidden_states.dtype, device=hidden_states.device - ) - - # hidden_states: [batch_size, seq_length, hidden_size] - # apply preprocessing if the input is padded - if attention_mask is not None: - alibi = pre_process_alibi_for_pad(alibi, attention_mask) - # otherwise repeat alibi tensor with the batch size - else: - alibi = alibi.repeat(hidden_states.shape[0], 1, 1) - - mixed_x_layer = self.query_key_value(hidden_states) - - # [batch_size, seq_length, 3 x hidden_size] --> [batch_size, seq_length, num_heads, 3 x head_dim] - new_tensor_shape = mixed_x_layer.size()[:-1] + (self.num_heads, 3 * self.head_dim) - mixed_x_layer = mixed_x_layer.view(*new_tensor_shape) - - # [batch_size, seq_length, num_heads, 3 x head_dim] --> 3 [batch_size, seq_length, num_heads, head_dim] - (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3) - - if layer_past is not None: - past_key, past_value = layer_past - key_layer = torch.cat((past_key.type_as(key_layer), key_layer), dim=1) - value_layer = torch.cat((past_value.type_as(value_layer), value_layer), dim=1) - - if use_cache is True: - present = (key_layer, value_layer) - else: - present = None - - # [batch_size, head_dim, q_length, k_length] - output_size = (query_layer.size(0), query_layer.size(2), query_layer.size(1), key_layer.size(1)) - - # [batch_size, q_length, num_heads, head_dim] -> [q_length, batch_size * num_heads, head_dim] - query_layer = query_layer.transpose(1, 0).reshape(output_size[2], output_size[0] * output_size[1], -1) - - # [batch_size, k_length, num_heads, head_dim] -> [k_length, batch_size * num_heads, head_dim] - key_layer = key_layer.transpose(1, 0).reshape(output_size[3], output_size[0] * output_size[1], -1) - - # Raw attention scores. [batch_size * num_heads, q_length, k_length] - beta = 1.0 / self.layer_number - - matmul_result = torch.baddbmm( - alibi, - query_layer.transpose(1, 0), - key_layer.transpose(1, 0).transpose(1, 2), - beta=beta, - alpha=(1.0 / self.norm_factor), - ) - - # change view to [batch_size, num_heads, q_length, k_length] - attention_scores = matmul_result.view(*output_size) - - # attention scores and attention mask [b, np, sq, sk] - max_positions = max(attention_scores.shape[-1], attention_scores.shape[-2]) - attention_probs = self.scale_mask_softmax(attention_scores, attention_mask, max_positions).to(value_layer.dtype) - attention_probs = self.attention_dropout(attention_probs) - - if head_mask is not None: - attention_probs = attention_probs * head_mask - - # context layer shape: [batch_size, num_heads, q_length, head_dim] - output_size = (value_layer.size(0), value_layer.size(2), query_layer.size(0), value_layer.size(3)) - - # change view [k_length, batch_size x num_heads, head_dim] - value_layer = value_layer.transpose(1, 0).reshape(value_layer.size(1), output_size[0] * output_size[1], -1) - - # change view [batch_size x num_heads, q_length, k_length] - attention_probs_reshaped = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1) - - # matmul: [batch_size * num_heads, q_length, head_dim] - context_layer = torch.bmm(attention_probs_reshaped, value_layer.transpose(0, 1)) - - # change view [batch_size, num_heads, q_length, head_dim] - context_layer = context_layer.view(*output_size) - - # [batchs_size, num_heads, q_length, head_dim] --> [q_length, batch_size, num_heads, head_dim] - context_layer = context_layer.permute(2, 0, 1, 3).contiguous() - - # [q_length, batch_size, num_heads, head_dim] --> [q_length, batch_size, hidden_size] - new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size,) - - context_layer = context_layer.view(*new_context_layer_shape) - - # Output. [q_length, batch_size, hidden_size] - - # aggregate results across tp ranks. See here: https://github.com/pytorch/pytorch/issues/76232 - output_tensor = self.dense(context_layer) - output = output_tensor.transpose(1, 0) - - output = dropout_add(output, residual, self.hidden_dropout, self.training) - - outputs = (output, present) - if output_attentions: - outputs += (attention_probs,) - - return outputs - - -class BloomMLP(nn.Module): - def __init__(self, config): - super().__init__() - self.hidden_size = config.hidden_size - self.dense_h_to_4h = nn.Linear(self.hidden_size, 4 * self.hidden_size) - self.dense_4h_to_h = nn.Linear(4 * self.hidden_size, self.hidden_size) - self.hidden_dropout = config.hidden_dropout - self.gelu_impl = BloomGelu() - - def forward(self, hidden_states, residual): - hidden_states = self.gelu_impl(self.dense_h_to_4h(hidden_states)) - intermediate_output = self.dense_4h_to_h(hidden_states) - output = dropout_add(intermediate_output, residual, self.hidden_dropout, self.training) - return output - - -class BloomBlock(nn.Module): - def __init__(self, config, layer_number=None): - super().__init__() - self.hidden_size = config.hidden_size - - self.input_layernorm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_epsilon) - self.n_head = config.n_head - self.self_attention = BloomAttention(config, layer_number=layer_number) - self.post_attention_layernorm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_epsilon) - - self.mlp = BloomMLP(config) - - self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm - self.hidden_dropout = config.hidden_dropout - - def forward( - self, - hidden_states, - layer_past=None, - attention_mask=None, - head_mask=None, - use_cache=False, - output_attentions=False, - alibi=None, - ): - # hidden_states: [batch_size, seq_length, hidden_size] - - # Layer norm at the beginning of the transformer layer. - layernorm_output = self.input_layernorm(hidden_states) - - # Layer norm post the self attention. - if self.apply_residual_connection_post_layernorm: - residual = layernorm_output - else: - residual = hidden_states - - # Self attention. - attn_outputs = self.self_attention( - layernorm_output, - residual, - layer_past=layer_past, - attention_mask=attention_mask, - alibi=alibi, - head_mask=head_mask, - use_cache=use_cache, - output_attentions=output_attentions, - ) - - attention_output = attn_outputs[0] - - outputs = attn_outputs[1:] - - layernorm_output = self.post_attention_layernorm(attention_output) - - # Get residual - if self.apply_residual_connection_post_layernorm: - residual = layernorm_output - else: - residual = attention_output - - # MLP. - output = self.mlp(layernorm_output, residual) - - if use_cache: - outputs = (output,) + outputs - else: - outputs = (output,) + outputs[1:] - - return outputs # hidden_states, present, attentions diff --git a/spaces/bioriAsaeru/text-to-voice/Boss Movie Trailer 2013 Download Everything You Need to Know About the Film Directed by Anthony DSouza.md b/spaces/bioriAsaeru/text-to-voice/Boss Movie Trailer 2013 Download Everything You Need to Know About the Film Directed by Anthony DSouza.md deleted file mode 100644 index 3b2a7b0be39b05fb627bd1846719f8c2696db7d4..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Boss Movie Trailer 2013 Download Everything You Need to Know About the Film Directed by Anthony DSouza.md +++ /dev/null @@ -1,9 +0,0 @@ -
    -

    The upbeat staccato strings in this track conjure a fairy tale world where small children can have big adventures. This inspirational orchestral music works well for movies, short films, games, trailers, and commercials.

    -

    Boss Movie Trailer 2013 Download


    Download Ziphttps://urloso.com/2uyPmf



    -

    The pre-look poster was released on 12 August 2013, and the first look was released on 13 August 2013.[37] Akshay Kumar released the official teaser of Boss on 14 August 2013, and was premiered with Once Upon ay Time in Mumbai Dobaara![38] Theatrical trailer was launched on 28 August 2013, coinciding with Krishna Janmashtami.[39]

    -

    Boss was released on 16 October 2013 in 2750 screens worldwide. It was given an U/A certificate by the Censor Board for its action sequences and bikini scenes.[49] Boss was the first Indian movie released in Latin America.[50] It was distributed in Panama, Peru, Denmark and France in addition to the 400 screens already announced across Europe, North America, Southeast Asia and Australia.[51][52] It was also released in Pakistan on the occasion of Eid al-Adha.[53]

    -

    In addition to this, there's a new update called the Deadly Six Card Collection Challenge. Players across the globe must work together to collect a set number of character cards scattered across the Sonic Dash tracks. When enough have been collected by the whole community, those who took part will receive exclusive Sonic Lost World themed prizes. The trailer has been posted below; you can also download the Sonic Dash app here.

    -

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Drpu Barcode Label Maker Professional Keygen Download.md b/spaces/bioriAsaeru/text-to-voice/Drpu Barcode Label Maker Professional Keygen Download.md deleted file mode 100644 index b79008bad6d8b7cfd90fe189985fe7088190ad7b..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Drpu Barcode Label Maker Professional Keygen Download.md +++ /dev/null @@ -1,11 +0,0 @@ -
    -

    making barcode labels has never been so easy! drpu barcode label maker professional is a tool which lets you generate barcodes from any template, as well as from your own custom barcode generator. the software is able to generate barcodes for just about every type of label, including serial number, inventory, inventory control, item, product, and many more.

    -

    drpu barcode label maker professional keygen download


    Downloadhttps://urloso.com/2uyOfc



    -

    the barcode label maker software is extremely easy to use. you can choose any type of label, from its size, shape, colors, background and type, as well as the barcode type itself. you can also create as many templates as you like, so you won't have to create all barcodes from scratch.

    -

    the barcode label maker software has a simple and intuitive interface. there are no complex menus or tools to use to generate your barcode labels. it allows you to generate labels in just a few simple steps:

    -

    the application has beendr installed on a vast array of platfrms, including the most recent, ranging from windows to linux, mac and android. drpu barcode label maker software - professional is an advanced software product which can be used as a stand-alone labl maker program or be used in conjunc with the companion application, drpu barcode label maker - personal.

    -

    the programme will help you to create barcodes from scratch, modfying labls that you may have already created in the earlier version of the software. you may asily use the built-in templates to crat your label.

    -

    -

    create your own labels by usin the built-in templates or using your own graphics or text. drpu barcode label maker software - professional gives you the ability to create the most advanced barcode labels, including qr codes, asins, upcs, mvcs and ines. qr codes are highly useful in today's world because they can be used to launch applications on a smartphone, send sms and make payments.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/How Communism Works By The Catholic Library Services.md b/spaces/bioriAsaeru/text-to-voice/How Communism Works By The Catholic Library Services.md deleted file mode 100644 index ae0c920f4796a283f6b103869664ff69942980b5..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/How Communism Works By The Catholic Library Services.md +++ /dev/null @@ -1,5 +0,0 @@ -
    -

    Of all the eyes staring into the cave, among the weariest must have been Gretchen Knief's. She had trekked to New Mexico by way of theSouth and was on her way back home, to California's San Joaquin Valley, where she was the chief librarian for Kern County. She was a tall woman, impeccably dressed, her smile warm. No one would have called the thirty-seven-year-old a beauty, and she could be a little awkward at times. But it was an endearing awkwardness, and everybody admired her smarts. Knief had spent a portion of her trip examining libraries in Florida and Louisiana, and she had walked away feeling pleased with how Kern County's far-flung network of seventy-one branches, many of which she had single-handedly expanded, stacked up by comparison. But pressures were mounting too. Kern's main library was housed in the basement of the county courthouse in Bakersfield, in quarters so cramped that some of its materials were buried beneath old lighting fixtures, furniture, and other bric-a-brac. A proposed $300,000 bond issue to finance a new facility was scheduled to go before the voters in the fall. But who knew what they'd decide, given the budget squeeze afflicting the county? The situation showed no signs of easing, either, the way people were still streaming in to California's heartland, taxing public services of all kinds. "Authorities Predict Increase in Migrant Flow to Kern Soon," read the headline in the August 7 edition of the Bakersfield Californian.

    -

    How Communism Works By The Catholic Library Services


    Download Ziphttps://urloso.com/2uyRmj



    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/John J Donovan Systems Programming Pdf Free Download NEW!.md b/spaces/bioriAsaeru/text-to-voice/John J Donovan Systems Programming Pdf Free Download NEW!.md deleted file mode 100644 index 368ba2c093ad273a24be156bec8f3fb35dfb7a33..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/John J Donovan Systems Programming Pdf Free Download NEW!.md +++ /dev/null @@ -1,21 +0,0 @@ -

    john j donovan systems programming pdf free download


    DOWNLOADhttps://urloso.com/2uyRh8



    - -3294 downloads 8639 views Size 8MB. Report. DOWNLOAD .PDF ... System programming. John J. Donovan book ... Donovan. System programmingFull description ... Donovan ... Donovan .doc ... Donovan John J. ... Download the book Donovan John J. ... John Donovan. -John... -John Donovan. -John Donovan. ... -Donovan John. -Donovan W. Donovan John John Donovan. -Donovan John. ... -John Donovan ... -John Donovan Author of the book: John J. Donovan. -Genre: Programming, IT manuals. -Book language: English. -Book publisher: John Wiley & Sons. -Download the book John J. Donovan "System Programming" - page 1 of the text of the book : ied B. S. K., M. D. (N. Y.: John Wiley & Sons Inc., 2001 -System Programming John Donovan Free Download. -Download the book System Programming in Unix by John Donovan in FB2, TXT, EPUB, RTF, HTML, Mobi formats: A collection of free books in e-books. -John J. 8a78ff9644
    -
    -
    -

    diff --git a/spaces/bird-watching-society-of-greater-clare/brainy/questionnaires.py b/spaces/bird-watching-society-of-greater-clare/brainy/questionnaires.py deleted file mode 100644 index 992583e308d3d25ef78f9c114aceea4e2b1acf39..0000000000000000000000000000000000000000 --- a/spaces/bird-watching-society-of-greater-clare/brainy/questionnaires.py +++ /dev/null @@ -1,35 +0,0 @@ -import json - -FILE_PATH = "static/questionnaires/" -END = "" - -with open(FILE_PATH+"index.json", "r") as fh: - index = json.load(fh) - -topics = list(index.keys()) - -questionnaires = {} -for topic, filename in index.items(): - with open(FILE_PATH+filename, "r") as fh: - questionnaires[topic] = json.load(fh) - -numbers = { - topic: [question["number"] for question in questionnaires[topic]["questions"]] + [END] - for topic - in topics - } -sections = { - topic: [question["section"] for question in questionnaires[topic]["questions"]] + [END] - for topic - in topics - } -questions = { - topic: [question["text"] for question in questionnaires[topic]["questions"]] + [END] - for topic - in topics - } -scales = { - topic: [question["scale"] for question in questionnaires[topic]["questions"]] + [END] - for topic - in topics - } diff --git a/spaces/brainblow/AudioCreator_Music-Audio_Generation/tests/__init__.py b/spaces/brainblow/AudioCreator_Music-Audio_Generation/tests/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/brainblow/AudioCreator_Music-Audio_Generation/tests/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/PointSup/point_sup/__init__.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/PointSup/point_sup/__init__.py deleted file mode 100644 index 510e3814ac1bb273b48804191b4a7c1272ea9a9b..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/PointSup/point_sup/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -from . import register_point_annotations -from .config import add_point_sup_config -from .dataset_mapper import PointSupDatasetMapper -from .mask_head import MaskRCNNConvUpsamplePointSupHead -from .point_utils import get_point_coords_from_point_annotation diff --git a/spaces/camenduru-com/VITS-Umamusume-voice-synthesizer/monotonic_align/setup.py b/spaces/camenduru-com/VITS-Umamusume-voice-synthesizer/monotonic_align/setup.py deleted file mode 100644 index 30c224807a70faa9df9c9eb75f8e80c8c867b16b..0000000000000000000000000000000000000000 --- a/spaces/camenduru-com/VITS-Umamusume-voice-synthesizer/monotonic_align/setup.py +++ /dev/null @@ -1,9 +0,0 @@ -from distutils.core import setup -from Cython.Build import cythonize -import numpy - -setup( - name = 'monotonic_align', - ext_modules = cythonize("core.pyx"), - include_dirs=[numpy.get_include()] -) diff --git a/spaces/camenduru-com/jupyter/login.html b/spaces/camenduru-com/jupyter/login.html deleted file mode 100644 index d140724bab7966daa98268d1e69d827402e255dc..0000000000000000000000000000000000000000 --- a/spaces/camenduru-com/jupyter/login.html +++ /dev/null @@ -1,67 +0,0 @@ -{% extends "page.html" %} - - -{% block stylesheet %} -{% endblock %} - -{% block site %} - -
    - - Huggig Face Logo -

    You can duplicate this Space to run it private.

    -
    - - Duplicate Space -
    -
    -

    Token is huggingface

    - - {% if login_available %} - {# login_available means password-login is allowed. Show the form. #} -
    - -
    - {% else %} -

    {% trans %}No login available, you shouldn't be seeing this page.{% endtrans %}

    - {% endif %} - {% if message %} -
    - {% for key in message %} -
    - {{message[key]}} -
    - {% endfor %} -
    - {% endif %} - {% if token_available %} - {% block token_message %} - - {% endblock token_message %} - {% endif %} -
    - -{% endblock %} - - -{% block script %} -{% endblock %} diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/modeling/roi_heads/mask_head.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/modeling/roi_heads/mask_head.py deleted file mode 100644 index 1b1f7c97a71d8b78f9ba8efebb6f5c03143ef2ce..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/modeling/roi_heads/mask_head.py +++ /dev/null @@ -1,298 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from typing import List -import fvcore.nn.weight_init as weight_init -import torch -from torch import nn -from torch.nn import functional as F - -from detectron2.config import configurable -from detectron2.layers import Conv2d, ConvTranspose2d, ShapeSpec, cat, get_norm -from detectron2.layers.wrappers import move_device_like -from detectron2.structures import Instances -from detectron2.utils.events import get_event_storage -from detectron2.utils.registry import Registry - -__all__ = [ - "BaseMaskRCNNHead", - "MaskRCNNConvUpsampleHead", - "build_mask_head", - "ROI_MASK_HEAD_REGISTRY", -] - - -ROI_MASK_HEAD_REGISTRY = Registry("ROI_MASK_HEAD") -ROI_MASK_HEAD_REGISTRY.__doc__ = """ -Registry for mask heads, which predicts instance masks given -per-region features. - -The registered object will be called with `obj(cfg, input_shape)`. -""" - - -@torch.jit.unused -def mask_rcnn_loss(pred_mask_logits: torch.Tensor, instances: List[Instances], vis_period: int = 0): - """ - Compute the mask prediction loss defined in the Mask R-CNN paper. - - Args: - pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask) - for class-specific or class-agnostic, where B is the total number of predicted masks - in all images, C is the number of foreground classes, and Hmask, Wmask are the height - and width of the mask predictions. The values are logits. - instances (list[Instances]): A list of N Instances, where N is the number of images - in the batch. These instances are in 1:1 - correspondence with the pred_mask_logits. The ground-truth labels (class, box, mask, - ...) associated with each instance are stored in fields. - vis_period (int): the period (in steps) to dump visualization. - - Returns: - mask_loss (Tensor): A scalar tensor containing the loss. - """ - cls_agnostic_mask = pred_mask_logits.size(1) == 1 - total_num_masks = pred_mask_logits.size(0) - mask_side_len = pred_mask_logits.size(2) - assert pred_mask_logits.size(2) == pred_mask_logits.size(3), "Mask prediction must be square!" - - gt_classes = [] - gt_masks = [] - for instances_per_image in instances: - if len(instances_per_image) == 0: - continue - if not cls_agnostic_mask: - gt_classes_per_image = instances_per_image.gt_classes.to(dtype=torch.int64) - gt_classes.append(gt_classes_per_image) - - gt_masks_per_image = instances_per_image.gt_masks.crop_and_resize( - instances_per_image.proposal_boxes.tensor, mask_side_len - ).to(device=pred_mask_logits.device) - # A tensor of shape (N, M, M), N=#instances in the image; M=mask_side_len - gt_masks.append(gt_masks_per_image) - - if len(gt_masks) == 0: - return pred_mask_logits.sum() * 0 - - gt_masks = cat(gt_masks, dim=0) - - if cls_agnostic_mask: - pred_mask_logits = pred_mask_logits[:, 0] - else: - indices = torch.arange(total_num_masks) - gt_classes = cat(gt_classes, dim=0) - pred_mask_logits = pred_mask_logits[indices, gt_classes] - - if gt_masks.dtype == torch.bool: - gt_masks_bool = gt_masks - else: - # Here we allow gt_masks to be float as well (depend on the implementation of rasterize()) - gt_masks_bool = gt_masks > 0.5 - gt_masks = gt_masks.to(dtype=torch.float32) - - # Log the training accuracy (using gt classes and 0.5 threshold) - mask_incorrect = (pred_mask_logits > 0.0) != gt_masks_bool - mask_accuracy = 1 - (mask_incorrect.sum().item() / max(mask_incorrect.numel(), 1.0)) - num_positive = gt_masks_bool.sum().item() - false_positive = (mask_incorrect & ~gt_masks_bool).sum().item() / max( - gt_masks_bool.numel() - num_positive, 1.0 - ) - false_negative = (mask_incorrect & gt_masks_bool).sum().item() / max(num_positive, 1.0) - - storage = get_event_storage() - storage.put_scalar("mask_rcnn/accuracy", mask_accuracy) - storage.put_scalar("mask_rcnn/false_positive", false_positive) - storage.put_scalar("mask_rcnn/false_negative", false_negative) - if vis_period > 0 and storage.iter % vis_period == 0: - pred_masks = pred_mask_logits.sigmoid() - vis_masks = torch.cat([pred_masks, gt_masks], axis=2) - name = "Left: mask prediction; Right: mask GT" - for idx, vis_mask in enumerate(vis_masks): - vis_mask = torch.stack([vis_mask] * 3, axis=0) - storage.put_image(name + f" ({idx})", vis_mask) - - mask_loss = F.binary_cross_entropy_with_logits(pred_mask_logits, gt_masks, reduction="mean") - return mask_loss - - -def mask_rcnn_inference(pred_mask_logits: torch.Tensor, pred_instances: List[Instances]): - """ - Convert pred_mask_logits to estimated foreground probability masks while also - extracting only the masks for the predicted classes in pred_instances. For each - predicted box, the mask of the same class is attached to the instance by adding a - new "pred_masks" field to pred_instances. - - Args: - pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask) - for class-specific or class-agnostic, where B is the total number of predicted masks - in all images, C is the number of foreground classes, and Hmask, Wmask are the height - and width of the mask predictions. The values are logits. - pred_instances (list[Instances]): A list of N Instances, where N is the number of images - in the batch. Each Instances must have field "pred_classes". - - Returns: - None. pred_instances will contain an extra "pred_masks" field storing a mask of size (Hmask, - Wmask) for predicted class. Note that the masks are returned as a soft (non-quantized) - masks the resolution predicted by the network; post-processing steps, such as resizing - the predicted masks to the original image resolution and/or binarizing them, is left - to the caller. - """ - cls_agnostic_mask = pred_mask_logits.size(1) == 1 - - if cls_agnostic_mask: - mask_probs_pred = pred_mask_logits.sigmoid() - else: - # Select masks corresponding to the predicted classes - num_masks = pred_mask_logits.shape[0] - class_pred = cat([i.pred_classes for i in pred_instances]) - device = ( - class_pred.device - if torch.jit.is_scripting() - else ("cpu" if torch.jit.is_tracing() else class_pred.device) - ) - indices = move_device_like(torch.arange(num_masks, device=device), class_pred) - mask_probs_pred = pred_mask_logits[indices, class_pred][:, None].sigmoid() - # mask_probs_pred.shape: (B, 1, Hmask, Wmask) - - num_boxes_per_image = [len(i) for i in pred_instances] - mask_probs_pred = mask_probs_pred.split(num_boxes_per_image, dim=0) - - for prob, instances in zip(mask_probs_pred, pred_instances): - instances.pred_masks = prob # (1, Hmask, Wmask) - - -class BaseMaskRCNNHead(nn.Module): - """ - Implement the basic Mask R-CNN losses and inference logic described in :paper:`Mask R-CNN` - """ - - @configurable - def __init__(self, *, loss_weight: float = 1.0, vis_period: int = 0): - """ - NOTE: this interface is experimental. - - Args: - loss_weight (float): multiplier of the loss - vis_period (int): visualization period - """ - super().__init__() - self.vis_period = vis_period - self.loss_weight = loss_weight - - @classmethod - def from_config(cls, cfg, input_shape): - return {"vis_period": cfg.VIS_PERIOD} - - def forward(self, x, instances: List[Instances]): - """ - Args: - x: input region feature(s) provided by :class:`ROIHeads`. - instances (list[Instances]): contains the boxes & labels corresponding - to the input features. - Exact format is up to its caller to decide. - Typically, this is the foreground instances in training, with - "proposal_boxes" field and other gt annotations. - In inference, it contains boxes that are already predicted. - - Returns: - A dict of losses in training. The predicted "instances" in inference. - """ - x = self.layers(x) - if self.training: - return {"loss_mask": mask_rcnn_loss(x, instances, self.vis_period) * self.loss_weight} - else: - mask_rcnn_inference(x, instances) - return instances - - def layers(self, x): - """ - Neural network layers that makes predictions from input features. - """ - raise NotImplementedError - - -# To get torchscript support, we make the head a subclass of `nn.Sequential`. -# Therefore, to add new layers in this head class, please make sure they are -# added in the order they will be used in forward(). -@ROI_MASK_HEAD_REGISTRY.register() -class MaskRCNNConvUpsampleHead(BaseMaskRCNNHead, nn.Sequential): - """ - A mask head with several conv layers, plus an upsample layer (with `ConvTranspose2d`). - Predictions are made with a final 1x1 conv layer. - """ - - @configurable - def __init__(self, input_shape: ShapeSpec, *, num_classes, conv_dims, conv_norm="", **kwargs): - """ - NOTE: this interface is experimental. - - Args: - input_shape (ShapeSpec): shape of the input feature - num_classes (int): the number of foreground classes (i.e. background is not - included). 1 if using class agnostic prediction. - conv_dims (list[int]): a list of N>0 integers representing the output dimensions - of N-1 conv layers and the last upsample layer. - conv_norm (str or callable): normalization for the conv layers. - See :func:`detectron2.layers.get_norm` for supported types. - """ - super().__init__(**kwargs) - assert len(conv_dims) >= 1, "conv_dims have to be non-empty!" - - self.conv_norm_relus = [] - - cur_channels = input_shape.channels - for k, conv_dim in enumerate(conv_dims[:-1]): - conv = Conv2d( - cur_channels, - conv_dim, - kernel_size=3, - stride=1, - padding=1, - bias=not conv_norm, - norm=get_norm(conv_norm, conv_dim), - activation=nn.ReLU(), - ) - self.add_module("mask_fcn{}".format(k + 1), conv) - self.conv_norm_relus.append(conv) - cur_channels = conv_dim - - self.deconv = ConvTranspose2d( - cur_channels, conv_dims[-1], kernel_size=2, stride=2, padding=0 - ) - self.add_module("deconv_relu", nn.ReLU()) - cur_channels = conv_dims[-1] - - self.predictor = Conv2d(cur_channels, num_classes, kernel_size=1, stride=1, padding=0) - - for layer in self.conv_norm_relus + [self.deconv]: - weight_init.c2_msra_fill(layer) - # use normal distribution initialization for mask prediction layer - nn.init.normal_(self.predictor.weight, std=0.001) - if self.predictor.bias is not None: - nn.init.constant_(self.predictor.bias, 0) - - @classmethod - def from_config(cls, cfg, input_shape): - ret = super().from_config(cfg, input_shape) - conv_dim = cfg.MODEL.ROI_MASK_HEAD.CONV_DIM - num_conv = cfg.MODEL.ROI_MASK_HEAD.NUM_CONV - ret.update( - conv_dims=[conv_dim] * (num_conv + 1), # +1 for ConvTranspose - conv_norm=cfg.MODEL.ROI_MASK_HEAD.NORM, - input_shape=input_shape, - ) - if cfg.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK: - ret["num_classes"] = 1 - else: - ret["num_classes"] = cfg.MODEL.ROI_HEADS.NUM_CLASSES - return ret - - def layers(self, x): - for layer in self: - x = layer(x) - return x - - -def build_mask_head(cfg, input_shape): - """ - Build a mask head defined by `cfg.MODEL.ROI_MASK_HEAD.NAME`. - """ - name = cfg.MODEL.ROI_MASK_HEAD.NAME - return ROI_MASK_HEAD_REGISTRY.get(name)(cfg, input_shape) diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/data/samplers/densepose_cse_uniform.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/data/samplers/densepose_cse_uniform.py deleted file mode 100644 index 567636cc7dfbcc9167dd7f4aa2b752c6e53d311f..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/data/samplers/densepose_cse_uniform.py +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -from .densepose_cse_base import DensePoseCSEBaseSampler -from .densepose_uniform import DensePoseUniformSampler - - -class DensePoseCSEUniformSampler(DensePoseCSEBaseSampler, DensePoseUniformSampler): - """ - Uniform Sampler for CSE - """ - - pass diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/modeling/losses/chart_with_confidences.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/modeling/losses/chart_with_confidences.py deleted file mode 100644 index 78ce7c6cb02fa01f6319d088349ff4f422001839..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/modeling/losses/chart_with_confidences.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import math -from typing import Any, List -import torch -from torch import nn -from torch.nn import functional as F - -from detectron2.config import CfgNode -from detectron2.structures import Instances - -from .. import DensePoseConfidenceModelConfig, DensePoseUVConfidenceType -from .chart import DensePoseChartLoss -from .registry import DENSEPOSE_LOSS_REGISTRY -from .utils import BilinearInterpolationHelper, LossDict - - -@DENSEPOSE_LOSS_REGISTRY.register() -class DensePoseChartWithConfidenceLoss(DensePoseChartLoss): - """ """ - - def __init__(self, cfg: CfgNode): - super().__init__(cfg) - self.confidence_model_cfg = DensePoseConfidenceModelConfig.from_cfg(cfg) - if self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO: - self.uv_loss_with_confidences = IIDIsotropicGaussianUVLoss( - self.confidence_model_cfg.uv_confidence.epsilon - ) - elif self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.INDEP_ANISO: - self.uv_loss_with_confidences = IndepAnisotropicGaussianUVLoss( - self.confidence_model_cfg.uv_confidence.epsilon - ) - - def produce_fake_densepose_losses_uv(self, densepose_predictor_outputs: Any) -> LossDict: - """ - Overrides fake losses for fine segmentation and U/V coordinates to - include computation graphs for additional confidence parameters. - These are used when no suitable ground truth data was found in a batch. - The loss has a value 0 and is primarily used to construct the computation graph, - so that `DistributedDataParallel` has similar graphs on all GPUs and can - perform reduction properly. - - Args: - densepose_predictor_outputs: DensePose predictor outputs, an object - of a dataclass that is assumed to have the following attributes: - * fine_segm - fine segmentation estimates, tensor of shape [N, C, S, S] - * u - U coordinate estimates per fine labels, tensor of shape [N, C, S, S] - * v - V coordinate estimates per fine labels, tensor of shape [N, C, S, S] - Return: - dict: str -> tensor: dict of losses with the following entries: - * `loss_densepose_U`: has value 0 - * `loss_densepose_V`: has value 0 - * `loss_densepose_I`: has value 0 - """ - conf_type = self.confidence_model_cfg.uv_confidence.type - if self.confidence_model_cfg.uv_confidence.enabled: - loss_uv = ( - densepose_predictor_outputs.u.sum() + densepose_predictor_outputs.v.sum() - ) * 0 - if conf_type == DensePoseUVConfidenceType.IID_ISO: - loss_uv += densepose_predictor_outputs.sigma_2.sum() * 0 - elif conf_type == DensePoseUVConfidenceType.INDEP_ANISO: - loss_uv += ( - densepose_predictor_outputs.sigma_2.sum() - + densepose_predictor_outputs.kappa_u.sum() - + densepose_predictor_outputs.kappa_v.sum() - ) * 0 - return {"loss_densepose_UV": loss_uv} - else: - return super().produce_fake_densepose_losses_uv(densepose_predictor_outputs) - - def produce_densepose_losses_uv( - self, - proposals_with_gt: List[Instances], - densepose_predictor_outputs: Any, - packed_annotations: Any, - interpolator: BilinearInterpolationHelper, - j_valid_fg: torch.Tensor, - ) -> LossDict: - conf_type = self.confidence_model_cfg.uv_confidence.type - if self.confidence_model_cfg.uv_confidence.enabled: - u_gt = packed_annotations.u_gt[j_valid_fg] - u_est = interpolator.extract_at_points(densepose_predictor_outputs.u)[j_valid_fg] - v_gt = packed_annotations.v_gt[j_valid_fg] - v_est = interpolator.extract_at_points(densepose_predictor_outputs.v)[j_valid_fg] - sigma_2_est = interpolator.extract_at_points(densepose_predictor_outputs.sigma_2)[ - j_valid_fg - ] - if conf_type == DensePoseUVConfidenceType.IID_ISO: - return { - "loss_densepose_UV": ( - self.uv_loss_with_confidences(u_est, v_est, sigma_2_est, u_gt, v_gt) - * self.w_points - ) - } - elif conf_type in [DensePoseUVConfidenceType.INDEP_ANISO]: - kappa_u_est = interpolator.extract_at_points(densepose_predictor_outputs.kappa_u)[ - j_valid_fg - ] - kappa_v_est = interpolator.extract_at_points(densepose_predictor_outputs.kappa_v)[ - j_valid_fg - ] - return { - "loss_densepose_UV": ( - self.uv_loss_with_confidences( - u_est, v_est, sigma_2_est, kappa_u_est, kappa_v_est, u_gt, v_gt - ) - * self.w_points - ) - } - return super().produce_densepose_losses_uv( - proposals_with_gt, - densepose_predictor_outputs, - packed_annotations, - interpolator, - j_valid_fg, - ) - - -class IIDIsotropicGaussianUVLoss(nn.Module): - """ - Loss for the case of iid residuals with isotropic covariance: - $Sigma_i = sigma_i^2 I$ - The loss (negative log likelihood) is then: - $1/2 sum_{i=1}^n (log(2 pi) + 2 log sigma_i^2 + ||delta_i||^2 / sigma_i^2)$, - where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates - difference between estimated and ground truth UV values - For details, see: - N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning - Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019 - """ - - def __init__(self, sigma_lower_bound: float): - super(IIDIsotropicGaussianUVLoss, self).__init__() - self.sigma_lower_bound = sigma_lower_bound - self.log2pi = math.log(2 * math.pi) - - def forward( - self, - u: torch.Tensor, - v: torch.Tensor, - sigma_u: torch.Tensor, - target_u: torch.Tensor, - target_v: torch.Tensor, - ): - # compute $\sigma_i^2$ - # use sigma_lower_bound to avoid degenerate solution for variance - # (sigma -> 0) - sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound - # compute \|delta_i\|^2 - # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. - delta_t_delta = (u - target_u) ** 2 + (v - target_v) ** 2 - # the total loss from the formula above: - loss = 0.5 * (self.log2pi + 2 * torch.log(sigma2) + delta_t_delta / sigma2) - return loss.sum() - - -class IndepAnisotropicGaussianUVLoss(nn.Module): - """ - Loss for the case of independent residuals with anisotropic covariances: - $Sigma_i = sigma_i^2 I + r_i r_i^T$ - The loss (negative log likelihood) is then: - $1/2 sum_{i=1}^n (log(2 pi) - + log sigma_i^2 (sigma_i^2 + ||r_i||^2) - + ||delta_i||^2 / sigma_i^2 - - ^2 / (sigma_i^2 * (sigma_i^2 + ||r_i||^2)))$, - where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates - difference between estimated and ground truth UV values - For details, see: - N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning - Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019 - """ - - def __init__(self, sigma_lower_bound: float): - super(IndepAnisotropicGaussianUVLoss, self).__init__() - self.sigma_lower_bound = sigma_lower_bound - self.log2pi = math.log(2 * math.pi) - - def forward( - self, - u: torch.Tensor, - v: torch.Tensor, - sigma_u: torch.Tensor, - kappa_u_est: torch.Tensor, - kappa_v_est: torch.Tensor, - target_u: torch.Tensor, - target_v: torch.Tensor, - ): - # compute $\sigma_i^2$ - sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound - # compute \|r_i\|^2 - # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. - r_sqnorm2 = kappa_u_est**2 + kappa_v_est**2 - delta_u = u - target_u - delta_v = v - target_v - # compute \|delta_i\|^2 - # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. - delta_sqnorm = delta_u**2 + delta_v**2 - delta_u_r_u = delta_u * kappa_u_est - delta_v_r_v = delta_v * kappa_v_est - # compute the scalar product - delta_r = delta_u_r_u + delta_v_r_v - # compute squared scalar product ^2 - # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. - delta_r_sqnorm = delta_r**2 - denom2 = sigma2 * (sigma2 + r_sqnorm2) - loss = 0.5 * ( - self.log2pi + torch.log(denom2) + delta_sqnorm / sigma2 - delta_r_sqnorm / denom2 - ) - return loss.sum() diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/tests/layers/test_losses.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/tests/layers/test_losses.py deleted file mode 100644 index d74920246cbd4a188b3c81cf0c78e982af6da1ac..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/tests/layers/test_losses.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import numpy as np -import unittest -import torch - -from detectron2.layers import ciou_loss, diou_loss - - -class TestLosses(unittest.TestCase): - def test_diou_loss(self): - """ - loss = 1 - iou + d/c - where, - d = (distance between centers of the 2 boxes)^2 - c = (diagonal length of the smallest enclosing box covering the 2 boxes)^2 - """ - # Identical boxes should have loss of 0 - box = torch.tensor([-1, -1, 1, 1], dtype=torch.float32) - loss = diou_loss(box, box) - self.assertTrue(np.allclose(loss, [0.0])) - - # Half size box inside other box - # iou = 0.5, d = 0.25, c = 8 - box2 = torch.tensor([0, -1, 1, 1], dtype=torch.float32) - loss = diou_loss(box, box2) - self.assertTrue(np.allclose(loss, [0.53125])) - - # Two diagonally adjacent boxes - # iou = 0, d = 2, c = 8 - box3 = torch.tensor([0, 0, 1, 1], dtype=torch.float32) - box4 = torch.tensor([1, 1, 2, 2], dtype=torch.float32) - loss = diou_loss(box3, box4) - self.assertTrue(np.allclose(loss, [1.25])) - - # Test batched loss and reductions - box1s = torch.stack([box, box3], dim=0) - box2s = torch.stack([box2, box4], dim=0) - - loss = diou_loss(box1s, box2s, reduction="sum") - self.assertTrue(np.allclose(loss, [1.78125])) - - loss = diou_loss(box1s, box2s, reduction="mean") - self.assertTrue(np.allclose(loss, [0.890625])) - - def test_ciou_loss(self): - """ - loss = 1 - iou + d/c + alpha*v - where, - d = (distance between centers of the 2 boxes)^2 - c = (diagonal length of the smallest enclosing box covering the 2 boxes)^2 - v = (4/pi^2) * (arctan(box1_w/box1_h) - arctan(box2_w/box2_h))^2 - alpha = v/(1 - iou + v) - """ - # Identical boxes should have loss of 0 - box = torch.tensor([-1, -1, 1, 1], dtype=torch.float32) - loss = ciou_loss(box, box) - self.assertTrue(np.allclose(loss, [0.0])) - - # Half size box inside other box - # iou = 0.5, d = 0.25, c = 8 - # v = (4/pi^2) * (arctan(1) - arctan(0.5))^2 = 0.042 - # alpha = 0.0775 - box2 = torch.tensor([0, -1, 1, 1], dtype=torch.float32) - loss = ciou_loss(box, box2) - self.assertTrue(np.allclose(loss, [0.5345])) - - # Two diagonally adjacent boxes - # iou = 0, d = 2, c = 8, v = 0, alpha = 0 - box3 = torch.tensor([0, 0, 1, 1], dtype=torch.float32) - box4 = torch.tensor([1, 1, 2, 2], dtype=torch.float32) - loss = ciou_loss(box3, box4) - self.assertTrue(np.allclose(loss, [1.25])) - - # Test batched loss and reductions - box1s = torch.stack([box, box3], dim=0) - box2s = torch.stack([box2, box4], dim=0) - - loss = ciou_loss(box1s, box2s, reduction="sum") - self.assertTrue(np.allclose(loss, [1.7845])) - - loss = ciou_loss(box1s, box2s, reduction="mean") - self.assertTrue(np.allclose(loss, [0.89225])) diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/tools/visualize_json_results.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/tools/visualize_json_results.py deleted file mode 100644 index 472190e0b3b38b55773795915badbb5bc4599d42..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/tools/visualize_json_results.py +++ /dev/null @@ -1,90 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) Facebook, Inc. and its affiliates. - -import argparse -import json -import numpy as np -import os -from collections import defaultdict -import cv2 -import tqdm - -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.structures import Boxes, BoxMode, Instances -from detectron2.utils.file_io import PathManager -from detectron2.utils.logger import setup_logger -from detectron2.utils.visualizer import Visualizer - - -def create_instances(predictions, image_size): - ret = Instances(image_size) - - score = np.asarray([x["score"] for x in predictions]) - chosen = (score > args.conf_threshold).nonzero()[0] - score = score[chosen] - bbox = np.asarray([predictions[i]["bbox"] for i in chosen]).reshape(-1, 4) - bbox = BoxMode.convert(bbox, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) - - labels = np.asarray([dataset_id_map(predictions[i]["category_id"]) for i in chosen]) - - ret.scores = score - ret.pred_boxes = Boxes(bbox) - ret.pred_classes = labels - - try: - ret.pred_masks = [predictions[i]["segmentation"] for i in chosen] - except KeyError: - pass - return ret - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="A script that visualizes the json predictions from COCO or LVIS dataset." - ) - parser.add_argument("--input", required=True, help="JSON file produced by the model") - parser.add_argument("--output", required=True, help="output directory") - parser.add_argument("--dataset", help="name of the dataset", default="coco_2017_val") - parser.add_argument("--conf-threshold", default=0.5, type=float, help="confidence threshold") - args = parser.parse_args() - - logger = setup_logger() - - with PathManager.open(args.input, "r") as f: - predictions = json.load(f) - - pred_by_image = defaultdict(list) - for p in predictions: - pred_by_image[p["image_id"]].append(p) - - dicts = list(DatasetCatalog.get(args.dataset)) - metadata = MetadataCatalog.get(args.dataset) - if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): - - def dataset_id_map(ds_id): - return metadata.thing_dataset_id_to_contiguous_id[ds_id] - - elif "lvis" in args.dataset: - # LVIS results are in the same format as COCO results, but have a different - # mapping from dataset category id to contiguous category id in [0, #categories - 1] - def dataset_id_map(ds_id): - return ds_id - 1 - - else: - raise ValueError("Unsupported dataset: {}".format(args.dataset)) - - os.makedirs(args.output, exist_ok=True) - - for dic in tqdm.tqdm(dicts): - img = cv2.imread(dic["file_name"], cv2.IMREAD_COLOR)[:, :, ::-1] - basename = os.path.basename(dic["file_name"]) - - predictions = create_instances(pred_by_image[dic["image_id"]], img.shape[:2]) - vis = Visualizer(img, metadata) - vis_pred = vis.draw_instance_predictions(predictions).get_image() - - vis = Visualizer(img, metadata) - vis_gt = vis.draw_dataset_dict(dic).get_image() - - concat = np.concatenate((vis_pred, vis_gt), axis=1) - cv2.imwrite(os.path.join(args.output, basename), concat[:, :, ::-1]) diff --git a/spaces/ccolas/TastyPiano/src/cocktails/pipeline/cocktail2affect.py b/spaces/ccolas/TastyPiano/src/cocktails/pipeline/cocktail2affect.py deleted file mode 100644 index ad272dbdd48ea1b3cbd8d4a972dc092e16d251e1..0000000000000000000000000000000000000000 --- a/spaces/ccolas/TastyPiano/src/cocktails/pipeline/cocktail2affect.py +++ /dev/null @@ -1,372 +0,0 @@ -import pandas as pd -import numpy as np -import os -from src.cocktails.utilities.cocktail_utilities import get_bunch_of_rep_keys -from src.cocktails.utilities.other_scrubbing_utilities import print_recipe -from src.cocktails.config import COCKTAILS_CSV_DATA -from src.music.config import CHECKPOINTS_PATH, EXPERIMENT_PATH -import matplotlib.pyplot as plt -from sklearn.cluster import KMeans -from sklearn.mixture import GaussianMixture -from sklearn.neighbors import NearestNeighbors -import pickle -import random - -experiment_path = EXPERIMENT_PATH + '/cocktails/representation_analysis/affective_mapping/' -min_max_path = CHECKPOINTS_PATH + "/cocktail_representation/minmax/" -cluster_model_path = CHECKPOINTS_PATH + "/music2cocktails/affects2affect_cluster/cluster_model.pickle" -affective_space_dimensions = ((-1, 1), (-1, 1), (-1, 1)) # valence, arousal, dominance -n_splits = (3, 3, 2) # number of bins per dimension -# dimensions_weights = [1, 1, 0.5] -dimensions_weights = [1, 1, 1] -total_n_clusters = np.prod(n_splits) # total number of bins -affective_boundaries = [np.arange(asd[0], asd[1]+1e-6, (asd[1] - asd[0]) / n_split) for asd, n_split in zip(affective_space_dimensions, n_splits)] -for af in affective_boundaries: - af[-1] += 1e-6 -all_keys = get_bunch_of_rep_keys()['custom'] -original_affective_keys = get_bunch_of_rep_keys()['affective'] -affective_keys = [a.split(' ')[1] for a in original_affective_keys] -random.seed(0) -cluster_colors = ['#%06X' % random.randint(0, 0xFFFFFF) for _ in range(total_n_clusters)] - -clustering_method = 'k_means' # 'k_means', 'handcoded', 'agglo', 'spectral' -if clustering_method != 'handcoded': - total_n_clusters = 10 -min_arousal = np.loadtxt(min_max_path + 'min_arousal.txt') -max_arousal = np.loadtxt(min_max_path + 'max_arousal.txt') -min_val = np.loadtxt(min_max_path + 'min_valence.txt') -max_val = np.loadtxt(min_max_path + 'max_valence.txt') -min_dom = np.loadtxt(min_max_path + 'min_dominance.txt') -max_dom = np.loadtxt(min_max_path + 'max_dominance.txt') - -def get_cocktail_reps(path, save=False): - cocktail_data = pd.read_csv(path) - cocktail_reps = np.array([cocktail_data[k] for k in original_affective_keys]).transpose() - n_data, dim_rep = cocktail_reps.shape - # print(f'{n_data} data points of {dim_rep} dimensions: {affective_keys}') - cocktail_reps = normalize_cocktail_reps_affective(cocktail_reps, save=save) - if save: - np.savetxt(experiment_path + f'cocktail_reps_for_affective_mapping_-1_1_norm_sigmoid_rescaling_{dim_rep}_keys.txt', cocktail_reps) - return cocktail_reps - -def sigmoid(x, shift, beta): - return (1 / (1 + np.exp(-(x + shift) * beta)) - 0.5) * 2 - -def normalize_cocktail_reps_affective(cocktail_reps, save=False): - if save: - min_cr = cocktail_reps.min(axis=0) - max_cr = cocktail_reps.max(axis=0) - np.savetxt(min_max_path + 'min_cocktail_reps_affective.txt', min_cr) - np.savetxt(min_max_path + 'max_cocktail_reps_affective.txt', max_cr) - else: - min_cr = np.loadtxt(min_max_path + 'min_cocktail_reps_affective.txt') - max_cr = np.loadtxt(min_max_path + 'max_cocktail_reps_affective.txt') - cocktail_reps = ((cocktail_reps - min_cr) / (max_cr - min_cr) - 0.5) * 2 - cocktail_reps[:, 0] = sigmoid(cocktail_reps[:, 0], shift=0.05, beta=4) - cocktail_reps[:, 1] = sigmoid(cocktail_reps[:, 1], shift=0.3, beta=5) - cocktail_reps[:, 2] = sigmoid(cocktail_reps[:, 2], shift=0.15, beta=3) - cocktail_reps[:, 3] = sigmoid(cocktail_reps[:, 3], shift=0.9, beta=20) - cocktail_reps[:, 4] = sigmoid(cocktail_reps[:, 4], shift=0, beta=4) - cocktail_reps[:, 5] = sigmoid(cocktail_reps[:, 5], shift=0.2, beta=3) - cocktail_reps[:, 6] = sigmoid(cocktail_reps[:, 6], shift=0.5, beta=5) - cocktail_reps[:, 7] = sigmoid(cocktail_reps[:, 7], shift=0.2, beta=6) - return cocktail_reps - -def plot(cocktail_reps): - dim_rep = cocktail_reps.shape[1] - for i in range(dim_rep): - for j in range(i+1, dim_rep): - plt.figure() - plt.scatter(cocktail_reps[:, i], cocktail_reps[:, j], s=150, alpha=0.5) - plt.xlabel(affective_keys[i]) - plt.ylabel(affective_keys[j]) - plt.savefig(experiment_path + f'scatters/{affective_keys[i]}_vs_{affective_keys[j]}.png', dpi=300) - plt.close('all') - plt.figure() - plt.hist(cocktail_reps[:, i]) - plt.xlabel(affective_keys[i]) - plt.savefig(experiment_path + f'hists/{affective_keys[i]}.png', dpi=300) - plt.close('all') - -def get_clusters(affective_coordinates, save=False): - if clustering_method in ['k_means', 'gmm',]: - if clustering_method == 'k_means': model = KMeans(n_clusters=total_n_clusters) - elif clustering_method == 'gmm': model = GaussianMixture(n_components=total_n_clusters, covariance_type="full") - model.fit(affective_coordinates * np.array(dimensions_weights)) - - def find_cluster(aff_coord): - if aff_coord.ndim == 1: - aff_coord = aff_coord.reshape(1, -1) - return model.predict(aff_coord * np.array(dimensions_weights)) - cluster_centers = model.cluster_centers_ if clustering_method == 'k_means' else [] - if save: - to_save = dict(cluster_model=model, - cluster_centers=cluster_centers, - nb_clusters=len(cluster_centers), - dimensions_weights=dimensions_weights) - with open(cluster_model_path, 'wb') as f: - pickle.dump(to_save, f) - stop= 1 - - elif clustering_method == 'handcoded': - def find_cluster(aff_coord): - if aff_coord.ndim == 1: - aff_coord = aff_coord.reshape(1, -1) - cluster_coordinates = [] - for i in range(aff_coord.shape[0]): - cluster_coordinates.append([np.argwhere(affective_boundaries[j] <= aff_coord[i, j]).flatten()[-1] for j in range(3)]) - cluster_coordinates = np.array(cluster_coordinates) - cluster_ids = cluster_coordinates[:, 0] * np.prod(n_splits[1:]) + cluster_coordinates[:, 1] * n_splits[-1] + cluster_coordinates[:, 2] - return cluster_ids - # find cluster centers - cluster_centers = [] - for i in range(n_splits[0]): - asd = affective_space_dimensions[0] - x_coordinate = np.arange(asd[0] + 1 / n_splits[0], asd[1], (asd[1] - asd[0]) / n_splits[0])[i] - for j in range(n_splits[1]): - asd = affective_space_dimensions[1] - y_coordinate = np.arange(asd[0] + 1 / n_splits[1], asd[1], (asd[1] - asd[0]) / n_splits[1])[j] - for k in range(n_splits[2]): - asd = affective_space_dimensions[2] - z_coordinate = np.arange(asd[0] + 1 / n_splits[2], asd[1], (asd[1] - asd[0]) / n_splits[2])[k] - cluster_centers.append([x_coordinate, y_coordinate, z_coordinate]) - cluster_centers = np.array(cluster_centers) - else: - raise NotImplemented - cluster_ids = find_cluster(affective_coordinates) - return cluster_ids, cluster_centers, find_cluster - - -def cocktail2affect(cocktail_reps, save=False): - if cocktail_reps.ndim == 1: - cocktail_reps = cocktail_reps.reshape(1, -1) - - assert affective_keys == ['booze', 'sweet', 'sour', 'fizzy', 'complex', 'bitter', 'spicy', 'colorful'] - all_weights = [] - - # valence - # + sweet - bitter - booze + colorful - weights = np.array([-1, 1, 0, 0, 0, -1, 0, 1]) - valence = (cocktail_reps * weights).sum(axis=1) - if save: - min_ = valence.min() - max_ = valence.max() - np.savetxt(min_max_path + 'min_valence.txt', np.array([min_])) - np.savetxt(min_max_path + 'max_valence.txt', np.array([max_])) - else: - min_ = min_val - max_ = max_val - valence = 2 * ((valence - min_) / (max_ - min_) - 0.5) - valence = sigmoid(valence, shift=0.1, beta=3.5) - valence = valence.reshape(-1, 1) - all_weights.append(weights.copy()) - - # arousal - # + fizzy + sour + complex - sweet + spicy + bitter - # weights = np.array([0, -1, 1, 1, 1, 1, 1, 0]) - weights = np.array([0.7, 0, 1.5, 1.5, 0.6, 0, 0.6, 0]) - arousal = (cocktail_reps * weights).sum(axis=1) - if save: - min_ = arousal.min() - max_ = arousal.max() - np.savetxt(min_max_path + 'min_arousal.txt', np.array([min_])) - np.savetxt(min_max_path + 'max_arousal.txt', np.array([max_])) - else: - min_, max_ = min_arousal, max_arousal - arousal = 2 * ((arousal - min_) / (max_ - min_) - 0.5) # normalize to -1, 1 - arousal = sigmoid(arousal, shift=0.3, beta=4) - arousal = arousal.reshape(-1, 1) - all_weights.append(weights.copy()) - - # dominance - # assert affective_keys == ['booze', 'sweet', 'sour', 'fizzy', 'complex', 'bitter', 'spicy', 'colorful'] - # + booze + fizzy - complex - bitter - sweet - weights = np.array([1.5, -0.8, 0, 0.7, -1, -1.5, 0, 0]) - dominance = (cocktail_reps * weights).sum(axis=1) - if save: - min_ = dominance.min() - max_ = dominance.max() - np.savetxt(min_max_path + 'min_dominance.txt', np.array([min_])) - np.savetxt(min_max_path + 'max_dominance.txt', np.array([max_])) - else: - min_, max_ = min_dom, max_dom - dominance = 2 * ((dominance - min_) / (max_ - min_) - 0.5) - dominance = sigmoid(dominance, shift=-0.05, beta=5) - dominance = dominance.reshape(-1, 1) - all_weights.append(weights.copy()) - - affective_coordinates = np.concatenate([valence, arousal, dominance], axis=1) - # if save: - # assert (affective_coordinates.min(axis=0) == np.array([ac[0] for ac in affective_space_dimensions])).all() - # assert (affective_coordinates.max(axis=0) == np.array([ac[1] for ac in affective_space_dimensions])).all() - return affective_coordinates, all_weights - -def save_reps(path, affective_cluster_ids): - cocktail_data = pd.read_csv(path) - rep_keys = get_bunch_of_rep_keys()['custom'] - cocktail_reps = np.array([cocktail_data[k] for k in rep_keys]).transpose() - np.savetxt(experiment_path + 'clustered_representations/' + f'min_cocktail_reps_custom_keys_dim{cocktail_reps.shape[1]}.txt', cocktail_reps.min(axis=0)) - np.savetxt(experiment_path + 'clustered_representations/' + f'max_cocktail_reps_custom_keys_dim{cocktail_reps.shape[1]}.txt', cocktail_reps.max(axis=0)) - cocktail_reps = ((cocktail_reps - cocktail_reps.min(axis=0)) / (cocktail_reps.max(axis=0) - cocktail_reps.min(axis=0)) - 0.5) * 2 # normalize in -1, 1 - np.savetxt(experiment_path + 'clustered_representations/' + f'all_cocktail_reps_norm-1_1_custom_keys_dim{cocktail_reps.shape[1]}.txt', cocktail_reps) - np.savetxt(experiment_path + 'clustered_representations/' + 'affective_cluster_ids.txt', affective_cluster_ids) - for cluster_id in sorted(set(affective_cluster_ids)): - indexes = np.argwhere(affective_cluster_ids == cluster_id).flatten() - reps = cocktail_reps[indexes, :] - np.savetxt(experiment_path + 'clustered_representations/' + f'rep_cluster{cluster_id}_norm-1_1_custom_keys_dim{cocktail_reps.shape[1]}.txt', reps) - -def study_affects(affective_coordinates, affective_cluster_ids): - plt.figure() - plt.hist(affective_cluster_ids, bins=total_n_clusters) - plt.xlabel('Affective cluster ids') - plt.xticks(np.arange(total_n_clusters)) - plt.savefig(experiment_path + 'affective_cluster_distrib.png') - fig = plt.gcf() - plt.close(fig) - - fig = plt.figure() - ax = fig.add_subplot(projection='3d') - ax.set_xlim([-1, 1]) - ax.set_ylim([-1, 1]) - ax.set_zlim([-1, 1]) - for cluster_id in sorted(set(affective_cluster_ids)): - indexes = np.argwhere(affective_cluster_ids == cluster_id).flatten() - ax.scatter(affective_coordinates[indexes, 0], affective_coordinates[indexes, 1], affective_coordinates[indexes, 2], c=cluster_colors[cluster_id], s=150) - ax.set_xlabel('Valence') - ax.set_ylabel('Arousal') - ax.set_zlabel('Dominance') - stop = 1 - plt.savefig(experiment_path + 'scatters_affect/affective_mapping.png') - fig = plt.gcf() - plt.close(fig) - - affects = ['Valence', 'Arousal', 'Dominance'] - for i in range(3): - for j in range(i + 1, 3): - fig = plt.figure() - ax = fig.add_subplot() - for cluster_id in sorted(set(affective_cluster_ids)): - indexes = np.argwhere(affective_cluster_ids == cluster_id).flatten() - ax.scatter(affective_coordinates[indexes, i], affective_coordinates[indexes, j], alpha=0.5, c=cluster_colors[cluster_id], s=150) - ax.set_xlabel(affects[i]) - ax.set_ylabel(affects[j]) - plt.savefig(experiment_path + f'scatters_affect/scatter_{affects[i]}_vs_{affects[j]}.png') - fig = plt.gcf() - plt.close(fig) - plt.figure() - plt.hist(affective_coordinates[:, i]) - plt.xlabel(affects[i]) - plt.savefig(experiment_path + f'hists_affect/hist_{affects[i]}.png') - fig = plt.gcf() - plt.close(fig) - plt.close('all') - stop = 1 - -def sample_clusters(path, cocktail_reps, all_weights, affective_cluster_ids, affective_cluster_centers, affective_coordinates, n_samples=4): - cocktail_data = pd.read_csv(path) - these_cocktail_reps = normalize_cocktail_reps_affective(np.array([cocktail_data[k] for k in original_affective_keys]).transpose()) - - names = cocktail_data['names'] - urls = cocktail_data['urls'] - ingr_str = cocktail_data['ingredients_str'] - for cluster_id in sorted(set(affective_cluster_ids)): - indexes = np.argwhere(affective_cluster_ids == cluster_id).flatten() - print('\n\n\n---------\n----------\n-----------\n') - cluster_str = '' - cluster_str += f'Affective cluster #{cluster_id}' + \ - f'\n\tSize: {len(indexes)}' + \ - f'\n\tCenter: ' + \ - f'\n\t\tVal: {affective_cluster_centers[cluster_id][0]:.2f}, ' + \ - f'\n\t\tArousal: {affective_cluster_centers[cluster_id][1]:.2f}, ' + \ - f'\n\t\tDominance: {affective_cluster_centers[cluster_id][2]:.2f}' - print(cluster_str) - if affective_cluster_centers[cluster_id][2] == np.max(affective_cluster_centers[:, 2]): - stop = 1 - sampled_idx = np.random.choice(indexes, size=min(len(indexes), n_samples), replace=False) - cocktail_str = '' - for i in sampled_idx: - assert np.sum(cocktail_reps[i] - these_cocktail_reps[i]) < 1e-9 - cocktail_str += f'\n\n-------------' - cocktail_str += print_recipe(ingr_str[i], name=names[i], to_print=False) - cocktail_str += f'\nUrl: {urls[i]}' - cocktail_str += '\n\nRepresentation: ' + ', '.join([f'{af}: {cr:.2f}' for af, cr in zip(affective_keys, cocktail_reps[i])]) + '\n' - cocktail_str += '\n' + generate_explanation(cocktail_reps[i], all_weights, affective_coordinates[i]) - print(cocktail_str) - stop = 1 - cluster_str += '\n' + cocktail_str - with open(f"/home/cedric/Documents/pianocktail/experiments/cocktails/representation_analysis/affective_mapping/clusters/cluster_{cluster_id}", 'w') as f: - f.write(cluster_str) - stop = 1 - -def explanation_per_dimension(i, cocktail_rep, all_weights, aff_coord): - names = ['valence', 'arousal', 'dominance'] - weights = all_weights[i] - explanation_str = f'\n{names[i].capitalize()} explanation ({aff_coord[i]:.2f}):' - strengths = np.abs(weights * cocktail_rep) - strengths /= strengths.sum() - indexes = np.flip(np.argsort(strengths)) - for ind in indexes: - if strengths[ind] != 0: - if np.sign(weights[ind]) == np.sign(cocktail_rep[ind]): - keyword = 'high' if cocktail_rep[ind] > 0 else 'low' - explanation_str += f'\n\t{int(strengths[ind]*100)}%: higher {names[i]} because {keyword} {affective_keys[ind]}' - else: - keyword = 'high' if cocktail_rep[ind] > 0 else 'low' - explanation_str += f'\n\t{int(strengths[ind]*100)}%: low {names[i]} because {keyword} {affective_keys[ind]}' - return explanation_str - -def generate_explanation(cocktail_rep, all_weights, aff_coord): - explanation_str = '' - for i in range(3): - explanation_str += explanation_per_dimension(i, cocktail_rep, all_weights, aff_coord) - return explanation_str - -def cocktails2affect_clusters(cocktail_rep): - if cocktail_rep.ndim == 1: - cocktail_rep = cocktail_rep.reshape(1, -1) - affective_coordinates, _ = cocktail2affect(cocktail_rep) - affective_cluster_ids, _, _ = get_clusters(affective_coordinates) - return affective_cluster_ids - - -def setup_affective_space(path, save=False): - cocktail_data = pd.read_csv(path) - names = cocktail_data['names'] - recipes = cocktail_data['ingredients_str'] - urls = cocktail_data['urls'] - reps = get_cocktail_reps(path) - affective_coordinates, all_weights = cocktail2affect(reps) - affective_cluster_ids, affective_cluster_centers, find_cluster = get_clusters(affective_coordinates, save=save) - nn_model = NearestNeighbors(n_neighbors=1) - nn_model.fit(affective_coordinates) - def cocktail2affect_cluster(cocktail_rep): - affective_coordinates, _ = cocktail2affect(cocktail_rep) - return find_cluster(affective_coordinates) - - affective_clusters = dict(affective_coordinates=affective_coordinates, # coordinates of cocktail in affective space - affective_cluster_ids=affective_cluster_ids, # cluster id of cocktails - affective_cluster_centers=affective_cluster_centers, # cluster centers in affective space - affective_weights=all_weights, # weights to compute valence, arousal, dominance from cocktail representations - original_affective_keys=original_affective_keys, - cocktail_reps=reps, # cocktail representations from the dataset (normalized) - find_cluster=find_cluster, # function to retrieve a cluster from affective coordinates - nn_model=nn_model, # to predict the nearest neighbor affective space, - names=names, # names of cocktails in the dataset - urls=urls, # urls from the dataset - recipes=recipes, # recipes of the dataset - cocktail2affect=cocktail2affect, # function to compute affects from cocktail representations - cocktails2affect_clusters=cocktails2affect_clusters, - cocktail2affect_cluster=cocktail2affect_cluster - ) - - return affective_clusters - -if __name__ == '__main__': - reps = get_cocktail_reps(COCKTAILS_CSV_DATA, save=True) - # plot(reps) - affective_coordinates, all_weights = cocktail2affect(reps, save=True) - affective_cluster_ids, affective_cluster_centers, find_cluster = get_clusters(affective_coordinates) - save_reps(COCKTAILS_CSV_DATA, affective_cluster_ids) - study_affects(affective_coordinates, affective_cluster_ids) - sample_clusters(COCKTAILS_CSV_DATA, reps, all_weights, affective_cluster_ids, affective_cluster_centers, affective_coordinates) - setup_affective_space(COCKTAILS_CSV_DATA, save=True) diff --git a/spaces/charles0519/ChuanhuChatGPT/run_Linux.sh b/spaces/charles0519/ChuanhuChatGPT/run_Linux.sh deleted file mode 100644 index 62af07283093d8e580763d7acfe493c3d88e7b08..0000000000000000000000000000000000000000 --- a/spaces/charles0519/ChuanhuChatGPT/run_Linux.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# 获取脚本所在目录 -script_dir=$(dirname "$0") - -# 将工作目录更改为脚本所在目录 -cd "$script_dir" - -# 检查Git仓库是否有更新 -git remote update -pwd - -if ! git status -uno | grep 'up to date' > /dev/null; then - # 如果有更新,关闭当前运行的服务器 - pkill -f ChuanhuChatbot.py - - # 拉取最新更改 - git pull - - # 安装依赖 - pip3 install -r requirements.txt - - # 重新启动服务器 - nohup python3 ChuanhuChatbot.py & -fi diff --git a/spaces/chasemcdo/hf_localai/examples/query_data/README.md b/spaces/chasemcdo/hf_localai/examples/query_data/README.md deleted file mode 100644 index c4e384cdcb02f31c4de45fd2baf80c4eae486eba..0000000000000000000000000000000000000000 --- a/spaces/chasemcdo/hf_localai/examples/query_data/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Data query example - -This example makes use of [Llama-Index](https://gpt-index.readthedocs.io/en/stable/getting_started/installation.html) to enable question answering on a set of documents. - -It loosely follows [the quickstart](https://gpt-index.readthedocs.io/en/stable/guides/primer/usage_pattern.html). - -Summary of the steps: - -- prepare the dataset (and store it into `data`) -- prepare a vector index database to run queries on -- run queries - -## Requirements - -You will need a training data set. Copy that over `data`. - -## Setup - -Start the API: - -```bash -# Clone LocalAI -git clone https://github.com/go-skynet/LocalAI - -cd LocalAI/examples/query_data - -wget https://huggingface.co/skeskinen/ggml/resolve/main/all-MiniLM-L6-v2/ggml-model-q4_0.bin -O models/bert -wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j - -# start with docker-compose -docker-compose up -d --build -``` - -### Create a storage - -In this step we will create a local vector database from our document set, so later we can ask questions on it with the LLM. - -Note: **OPENAI_API_KEY** is not required. However the library might fail if no API_KEY is passed by, so an arbitrary string can be used. - -```bash -export OPENAI_API_BASE=http://localhost:8080/v1 -export OPENAI_API_KEY=sk- - -python store.py -``` - -After it finishes, a directory "storage" will be created with the vector index database. - -## Query - -We can now query the dataset. - -```bash -export OPENAI_API_BASE=http://localhost:8080/v1 -export OPENAI_API_KEY=sk- - -python query.py -``` - -## Update - -To update our vector database, run `update.py` - -```bash -export OPENAI_API_BASE=http://localhost:8080/v1 -export OPENAI_API_KEY=sk- - -python update.py -``` \ No newline at end of file diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chromadb/db/mixins/embeddings_queue.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chromadb/db/mixins/embeddings_queue.py deleted file mode 100644 index c5f3d180ae516b4654be6be5df80473defb92bb6..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chromadb/db/mixins/embeddings_queue.py +++ /dev/null @@ -1,270 +0,0 @@ -from chromadb.db.base import SqlDB, ParameterValue, get_sql -from chromadb.ingest import ( - Producer, - Consumer, - encode_vector, - decode_vector, - ConsumerCallbackFn, -) -from chromadb.types import ( - SubmitEmbeddingRecord, - EmbeddingRecord, - SeqId, - ScalarEncoding, - Operation, -) -from chromadb.config import System -from overrides import override -from collections import defaultdict -from typing import Tuple, Optional, Dict, Set, cast -from uuid import UUID -from pypika import Table, functions -import uuid -import json -import logging - -logger = logging.getLogger(__name__) - -_operation_codes = { - Operation.ADD: 0, - Operation.UPDATE: 1, - Operation.UPSERT: 2, - Operation.DELETE: 3, -} -_operation_codes_inv = {v: k for k, v in _operation_codes.items()} - - -class SqlEmbeddingsQueue(SqlDB, Producer, Consumer): - """A SQL database that stores embeddings, allowing a traditional RDBMS to be used as - the primary ingest queue and satisfying the top level Producer/Consumer interfaces. - - Note that this class is only suitable for use cases where the producer and consumer - are in the same process. - - This is because notifiaction of new embeddings happens solely in-process: this - implementation does not actively listen to the the database for new records added by - other processes. - """ - - class Subscription: - id: UUID - topic_name: str - start: int - end: int - callback: ConsumerCallbackFn - - def __init__( - self, - id: UUID, - topic_name: str, - start: int, - end: int, - callback: ConsumerCallbackFn, - ): - self.id = id - self.topic_name = topic_name - self.start = start - self.end = end - self.callback = callback - - _subscriptions: Dict[str, Set[Subscription]] - - def __init__(self, system: System): - self._subscriptions = defaultdict(set) - super().__init__(system) - - @override - def reset_state(self) -> None: - super().reset_state() - self._subscriptions = defaultdict(set) - - @override - def create_topic(self, topic_name: str) -> None: - # Topic creation is implicit for this impl - pass - - @override - def delete_topic(self, topic_name: str) -> None: - t = Table("embeddings_queue") - q = ( - self.querybuilder() - .from_(t) - .where(t.topic == ParameterValue(topic_name)) - .delete() - ) - with self.tx() as cur: - sql, params = get_sql(q, self.parameter_format()) - cur.execute(sql, params) - - @override - def submit_embedding( - self, topic_name: str, embedding: SubmitEmbeddingRecord - ) -> SeqId: - if not self._running: - raise RuntimeError("Component not running") - - if embedding["embedding"]: - encoding_type = cast(ScalarEncoding, embedding["encoding"]) - encoding = encoding_type.value - embedding_bytes = encode_vector(embedding["embedding"], encoding_type) - - else: - embedding_bytes = None - encoding = None - metadata = json.dumps(embedding["metadata"]) if embedding["metadata"] else None - - t = Table("embeddings_queue") - insert = ( - self.querybuilder() - .into(t) - .columns(t.operation, t.topic, t.id, t.vector, t.encoding, t.metadata) - .insert( - ParameterValue(_operation_codes[embedding["operation"]]), - ParameterValue(topic_name), - ParameterValue(embedding["id"]), - ParameterValue(embedding_bytes), - ParameterValue(encoding), - ParameterValue(metadata), - ) - ) - with self.tx() as cur: - sql, params = get_sql(insert, self.parameter_format()) - sql = f"{sql} RETURNING seq_id" # Pypika doesn't support RETURNING - seq_id = int(cur.execute(sql, params).fetchone()[0]) - embedding_record = EmbeddingRecord( - id=embedding["id"], - seq_id=seq_id, - embedding=embedding["embedding"], - encoding=embedding["encoding"], - metadata=embedding["metadata"], - operation=embedding["operation"], - ) - self._notify_all(topic_name, embedding_record) - return seq_id - - @override - def subscribe( - self, - topic_name: str, - consume_fn: ConsumerCallbackFn, - start: Optional[SeqId] = None, - end: Optional[SeqId] = None, - id: Optional[UUID] = None, - ) -> UUID: - if not self._running: - raise RuntimeError("Component not running") - - subscription_id = id or uuid.uuid4() - start, end = self._validate_range(start, end) - - subscription = self.Subscription( - subscription_id, topic_name, start, end, consume_fn - ) - - # Backfill first, so if it errors we do not add the subscription - self._backfill(subscription) - self._subscriptions[topic_name].add(subscription) - - return subscription_id - - @override - def unsubscribe(self, subscription_id: UUID) -> None: - for topic_name, subscriptions in self._subscriptions.items(): - for subscription in subscriptions: - if subscription.id == subscription_id: - subscriptions.remove(subscription) - if len(subscriptions) == 0: - del self._subscriptions[topic_name] - return - - @override - def min_seqid(self) -> SeqId: - return -1 - - @override - def max_seqid(self) -> SeqId: - return 2**63 - 1 - - def _backfill(self, subscription: Subscription) -> None: - """Backfill the given subscription with any currently matching records in the - DB""" - t = Table("embeddings_queue") - q = ( - self.querybuilder() - .from_(t) - .where(t.topic == ParameterValue(subscription.topic_name)) - .where(t.seq_id > ParameterValue(subscription.start)) - .where(t.seq_id <= ParameterValue(subscription.end)) - .select(t.seq_id, t.operation, t.id, t.vector, t.encoding, t.metadata) - .orderby(t.seq_id) - ) - with self.tx() as cur: - sql, params = get_sql(q, self.parameter_format()) - cur.execute(sql, params) - rows = cur.fetchall() - for row in rows: - if row[3]: - encoding = ScalarEncoding(row[4]) - vector = decode_vector(row[3], encoding) - else: - encoding = None - vector = None - self._notify_one( - subscription, - EmbeddingRecord( - seq_id=row[0], - operation=_operation_codes_inv[row[1]], - id=row[2], - embedding=vector, - encoding=encoding, - metadata=json.loads(row[5]) if row[5] else None, - ), - ) - - def _validate_range( - self, start: Optional[SeqId], end: Optional[SeqId] - ) -> Tuple[int, int]: - """Validate and normalize the start and end SeqIDs for a subscription using this - impl.""" - start = start or self._next_seq_id() - end = end or self.max_seqid() - if not isinstance(start, int) or not isinstance(end, int): - raise ValueError("SeqIDs must be integers for sql-based EmbeddingsDB") - if start >= end: - raise ValueError(f"Invalid SeqID range: {start} to {end}") - return start, end - - def _next_seq_id(self) -> int: - """Get the next SeqID for this database.""" - t = Table("embeddings_queue") - q = self.querybuilder().from_(t).select(functions.Max(t.seq_id)) - with self.tx() as cur: - cur.execute(q.get_sql()) - return int(cur.fetchone()[0]) + 1 - - def _notify_all(self, topic: str, embedding: EmbeddingRecord) -> None: - """Send a notification to each subscriber of the given topic.""" - if self._running: - for sub in self._subscriptions[topic]: - self._notify_one(sub, embedding) - - def _notify_one(self, sub: Subscription, embedding: EmbeddingRecord) -> None: - """Send a notification to a single subscriber.""" - if embedding["seq_id"] > sub.end: - self.unsubscribe(sub.id) - return - - if embedding["seq_id"] <= sub.start: - return - - # Log errors instead of throwing them to preserve async semantics - # for consistency between local and distributed configurations - try: - sub.callback([embedding]) - except BaseException as e: - id = embedding.get("id", embedding.get("delete_id")) - logger.error( - f"Exception occurred invoking consumer for subscription {sub.id}" - + f"to topic {sub.topic_name} for embedding id {id} ", - e, - ) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/oxml/document.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/oxml/document.py deleted file mode 100644 index 4211b8ed13f4ebdf90daa4d805d5dc7686061a21..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/oxml/document.py +++ /dev/null @@ -1,67 +0,0 @@ -# encoding: utf-8 - -""" -Custom element classes that correspond to the document part, e.g. -. -""" - -from .xmlchemy import BaseOxmlElement, ZeroOrOne, ZeroOrMore - - -class CT_Document(BaseOxmlElement): - """ - ```` element, the root element of a document.xml file. - """ - body = ZeroOrOne('w:body') - - @property - def sectPr_lst(self): - """ - Return a list containing a reference to each ```` element - in the document, in the order encountered. - """ - return self.xpath('.//w:sectPr') - - -class CT_Body(BaseOxmlElement): - """ - ````, the container element for the main document story in - ``document.xml``. - """ - p = ZeroOrMore('w:p', successors=('w:sectPr',)) - tbl = ZeroOrMore('w:tbl', successors=('w:sectPr',)) - sectPr = ZeroOrOne('w:sectPr', successors=()) - - def add_section_break(self): - """Return `w:sectPr` element for new section added at end of document. - - The last `w:sectPr` becomes the second-to-last, with the new `w:sectPr` being an - exact clone of the previous one, except that all header and footer references - are removed (and are therefore now "inherited" from the prior section). - - A copy of the previously-last `w:sectPr` will now appear in a new `w:p` at the - end of the document. The returned `w:sectPr` is the sentinel `w:sectPr` for the - document (and as implemented, *is* the prior sentinel `w:sectPr` with headers - and footers removed). - """ - # ---get the sectPr at file-end, which controls last section (sections[-1])--- - sentinel_sectPr = self.get_or_add_sectPr() - # ---add exact copy to new `w:p` element; that is now second-to last section--- - self.add_p().set_sectPr(sentinel_sectPr.clone()) - # ---remove any header or footer references from "new" last section--- - for hdrftr_ref in sentinel_sectPr.xpath("w:headerReference|w:footerReference"): - sentinel_sectPr.remove(hdrftr_ref) - # ---the sentinel `w:sectPr` now controls the new last section--- - return sentinel_sectPr - - def clear_content(self): - """ - Remove all content child elements from this element. Leave - the element if it is present. - """ - if self.sectPr is not None: - content_elms = self[:-1] - else: - content_elms = self[:] - for content_elm in content_elms: - self.remove(content_elm) diff --git a/spaces/cihyFjudo/fairness-paper-search/Download Shapath Full Movie in Hindi Free The Story of a Cops Fight Against Corruption.md b/spaces/cihyFjudo/fairness-paper-search/Download Shapath Full Movie in Hindi Free The Story of a Cops Fight Against Corruption.md deleted file mode 100644 index c96a9359df9f96d4e054ea1337692aab500da348..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Download Shapath Full Movie in Hindi Free The Story of a Cops Fight Against Corruption.md +++ /dev/null @@ -1,10 +0,0 @@ - -

    Welcome to MovieMora.com with the new address Bookmark the URL, because you don't have to search to another place anymore to freely watch and download the movie Shapath. Direct link for downloading or online streaming movie Shapath on your mobile phone or laptop.

    -

    download Shapath full movie in hindi free


    Download Zip ○○○ https://tinurli.com/2uwkGe



    -

    download Mithun Fighting unlimited Movies and videos Download Here.Mithun Fighting Hd,3gp. mp4 320p and More Videos You Can Download Easyly. tamilrockers and movierulz, tamilgun, filmywap, and pagalworld videos and Movies download.

    -

    Watch the movie Shapath on the free film streaming website www.onlinemovieshindi.com (new web URL: ). Online streaming or downloading the video file easily. Watch or download Shapath online movie Hindi dubbed here.

    -

    Dear visitor, you can download the movie Shapath on this onlinemovieshindi website. It will download the HD video file by just clicking on the button below. The video file is the same file for the online streaming above when you directly click to play. The decision to download is entirely your choice and your personal responsibility when dealing with the legality of file ownership

    -

    The same as other websites such as hdmovieslatest, filmypunjab, moviemora, fridaybug and etc. You can watch the free online movie Hindi dubbed here. HD movies latest to see without a proxy unblocker app.

    -

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Melodious Etudes For Trombone Rochut Pdf Free A Must-Have Resource for Every Trombonist.md b/spaces/cihyFjudo/fairness-paper-search/Melodious Etudes For Trombone Rochut Pdf Free A Must-Have Resource for Every Trombonist.md deleted file mode 100644 index f17cd7d26a9b334340977745ab3a815eade98669..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Melodious Etudes For Trombone Rochut Pdf Free A Must-Have Resource for Every Trombonist.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Melodious Etudes For Trombone Rochut Pdf Free


    Download 🗸🗸🗸 https://tinurli.com/2uwibC



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/cihyFjudo/fairness-paper-search/Shards Of Azuria [Xforce] !!TOP!!.md b/spaces/cihyFjudo/fairness-paper-search/Shards Of Azuria [Xforce] !!TOP!!.md deleted file mode 100644 index 21c9302f2afc189134017dbc55e9b3ddf83c8fc3..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Shards Of Azuria [Xforce] !!TOP!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Shards of Azuria [Xforce]


    Download File ✒ ✒ ✒ https://tinurli.com/2uwjSR



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/cleanmaster/akagi-sovits3/flask_api.py b/spaces/cleanmaster/akagi-sovits3/flask_api.py deleted file mode 100644 index 8cc236a1c34c9ddeddea99bcea13024fb0ccc90b..0000000000000000000000000000000000000000 --- a/spaces/cleanmaster/akagi-sovits3/flask_api.py +++ /dev/null @@ -1,56 +0,0 @@ -import io -import logging - -import soundfile -import torch -import torchaudio -from flask import Flask, request, send_file -from flask_cors import CORS - -from inference.infer_tool import Svc, RealTimeVC - -app = Flask(__name__) - -CORS(app) - -logging.getLogger('numba').setLevel(logging.WARNING) - - -@app.route("/voiceChangeModel", methods=["POST"]) -def voice_change_model(): - request_form = request.form - wave_file = request.files.get("sample", None) - # 变调信息 - f_pitch_change = float(request_form.get("fPitchChange", 0)) - # DAW所需的采样率 - daw_sample = int(float(request_form.get("sampleRate", 0))) - speaker_id = int(float(request_form.get("sSpeakId", 0))) - # http获得wav文件并转换 - input_wav_path = io.BytesIO(wave_file.read()) - - # 模型推理 - if raw_infer: - out_audio, out_sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path) - tar_audio = torchaudio.functional.resample(out_audio, svc_model.target_sample, daw_sample) - else: - out_audio = svc.process(svc_model, speaker_id, f_pitch_change, input_wav_path) - tar_audio = torchaudio.functional.resample(torch.from_numpy(out_audio), svc_model.target_sample, daw_sample) - # 返回音频 - out_wav_path = io.BytesIO() - soundfile.write(out_wav_path, tar_audio.cpu().numpy(), daw_sample, format="wav") - out_wav_path.seek(0) - return send_file(out_wav_path, download_name="temp.wav", as_attachment=True) - - -if __name__ == '__main__': - # 启用则为直接切片合成,False为交叉淡化方式 - # vst插件调整0.3-0.5s切片时间可以降低延迟,直接切片方法会有连接处爆音、交叉淡化会有轻微重叠声音 - # 自行选择能接受的方法,或将vst最大切片时间调整为1s,此处设为Ture,延迟大音质稳定一些 - raw_infer = True - # 每个模型和config是唯一对应的 - model_name = "logs/32k/G_174000-Copy1.pth" - config_name = "configs/config.json" - svc_model = Svc(model_name, config_name) - svc = RealTimeVC() - # 此处与vst插件对应,不建议更改 - app.run(port=6842, host="0.0.0.0", debug=False, threaded=False) diff --git a/spaces/clip-italian/clip-italian-demo/localization.py b/spaces/clip-italian/clip-italian-demo/localization.py deleted file mode 100644 index 562963075f0535f06d61f22928775b8ec6b91f47..0000000000000000000000000000000000000000 --- a/spaces/clip-italian/clip-italian-demo/localization.py +++ /dev/null @@ -1,238 +0,0 @@ -import streamlit as st -from text2image import get_model, get_tokenizer, get_image_transform -from utils import text_encoder -from torchvision import transforms -from PIL import Image -from jax import numpy as jnp -import pandas as pd -import numpy as np -import requests -import psutil -import time -import jax -import gc - - -headers = { - "User-Agent": - "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582" -} - -preprocess = transforms.Compose( - [ - transforms.ToTensor(), - transforms.Resize(224), - transforms.Normalize( - (0.48145466, 0.4578275, 0.40821073), - (0.26862954, 0.26130258, 0.27577711) - ), - ] -) - - -def resize_longer(image, longer_size=224): - old_size = image.size - ratio = float(longer_size) / max(old_size) - new_size = tuple([int(x * ratio) for x in old_size]) - image = image.resize(new_size, Image.ANTIALIAS) - return image - - -def pad_to_square(image): - (a,b)=image.shape[:2] - if a 50: - time.sleep(sleep_time) - - if not caption or not image_url: - st.error("Please choose one image and at least one label") - else: - with st.spinner( - "Computing... This might take up to a few minutes depending on the current load 😕 \n" - "Otherwise, you can use this [Colab notebook](https://colab.research.google.com/drive/10neENr1DEAFq_GzsLqBDo0gZ50hOhkOr?usp=sharing)" - ): - heatmap, image = get_heatmap(image_url, caption, pixel_size, iterations) - - with col1: - st.image(image, use_column_width=True) - st.image(heatmap, use_column_width=True) - st.image(np.asarray(image) / 255.0 * heatmap, use_column_width=True) - gc.collect() - - elif image_url: - image = requests.get( - image_url, - headers=headers, - stream=True, - ).raw - image = Image.open(image).convert("RGB") - with col1: - st.image(image) diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/filelock/_api.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/filelock/_api.py deleted file mode 100644 index 7754f084fc7b656a44dfb4e2a0b6d0a10f112eaf..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/filelock/_api.py +++ /dev/null @@ -1,281 +0,0 @@ -from __future__ import annotations - -import contextlib -import logging -import os -import time -import warnings -from abc import ABC, abstractmethod -from dataclasses import dataclass -from threading import local -from typing import TYPE_CHECKING, Any - -from ._error import Timeout - -if TYPE_CHECKING: - from types import TracebackType - -_LOGGER = logging.getLogger("filelock") - - -# This is a helper class which is returned by :meth:`BaseFileLock.acquire` and wraps the lock to make sure __enter__ -# is not called twice when entering the with statement. If we would simply return *self*, the lock would be acquired -# again in the *__enter__* method of the BaseFileLock, but not released again automatically. issue #37 (memory leak) -class AcquireReturnProxy: - """A context aware object that will release the lock file when exiting.""" - - def __init__(self, lock: BaseFileLock) -> None: - self.lock = lock - - def __enter__(self) -> BaseFileLock: - return self.lock - - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_value: BaseException | None, - traceback: TracebackType | None, - ) -> None: - self.lock.release() - - -@dataclass -class FileLockContext: - """A dataclass which holds the context for a ``BaseFileLock`` object.""" - - # The context is held in a separate class to allow optional use of thread local storage via the - # ThreadLocalFileContext class. - - #: The path to the lock file. - lock_file: str - - #: The default timeout value. - timeout: float - - #: The mode for the lock files - mode: int - - #: The file descriptor for the *_lock_file* as it is returned by the os.open() function, not None when lock held - lock_file_fd: int | None = None - - #: The lock counter is used for implementing the nested locking mechanism. - lock_counter: int = 0 # When the lock is acquired is increased and the lock is only released, when this value is 0 - - -class ThreadLocalFileContext(FileLockContext, local): - """A thread local version of the ``FileLockContext`` class.""" - - -class BaseFileLock(ABC, contextlib.ContextDecorator): - """Abstract base class for a file lock object.""" - - def __init__( - self, - lock_file: str | os.PathLike[Any], - timeout: float = -1, - mode: int = 0o644, - thread_local: bool = True, # noqa: FBT001, FBT002 - ) -> None: - """ - Create a new lock object. - - :param lock_file: path to the file - :param timeout: default timeout when acquiring the lock, in seconds. It will be used as fallback value in - the acquire method, if no timeout value (``None``) is given. If you want to disable the timeout, set it - to a negative value. A timeout of 0 means, that there is exactly one attempt to acquire the file lock. - :param mode: file permissions for the lockfile. - :param thread_local: Whether this object's internal context should be thread local or not. - If this is set to ``False`` then the lock will be reentrant across threads. - """ - self._is_thread_local = thread_local - - # Create the context. Note that external code should not work with the context directly and should instead use - # properties of this class. - kwargs: dict[str, Any] = { - "lock_file": os.fspath(lock_file), - "timeout": timeout, - "mode": mode, - } - self._context: FileLockContext = (ThreadLocalFileContext if thread_local else FileLockContext)(**kwargs) - - def is_thread_local(self) -> bool: - """:return: a flag indicating if this lock is thread local or not""" - return self._is_thread_local - - @property - def lock_file(self) -> str: - """:return: path to the lock file""" - return self._context.lock_file - - @property - def timeout(self) -> float: - """ - :return: the default timeout value, in seconds - - .. versionadded:: 2.0.0 - """ - return self._context.timeout - - @timeout.setter - def timeout(self, value: float | str) -> None: - """ - Change the default timeout value. - - :param value: the new value, in seconds - """ - self._context.timeout = float(value) - - @abstractmethod - def _acquire(self) -> None: - """If the file lock could be acquired, self._context.lock_file_fd holds the file descriptor of the lock file.""" - raise NotImplementedError - - @abstractmethod - def _release(self) -> None: - """Releases the lock and sets self._context.lock_file_fd to None.""" - raise NotImplementedError - - @property - def is_locked(self) -> bool: - """ - - :return: A boolean indicating if the lock file is holding the lock currently. - - .. versionchanged:: 2.0.0 - - This was previously a method and is now a property. - """ - return self._context.lock_file_fd is not None - - @property - def lock_counter(self) -> int: - """:return: The number of times this lock has been acquired (but not yet released).""" - return self._context.lock_counter - - def acquire( - self, - timeout: float | None = None, - poll_interval: float = 0.05, - *, - poll_intervall: float | None = None, - blocking: bool = True, - ) -> AcquireReturnProxy: - """ - Try to acquire the file lock. - - :param timeout: maximum wait time for acquiring the lock, ``None`` means use the default :attr:`~timeout` is and - if ``timeout < 0``, there is no timeout and this method will block until the lock could be acquired - :param poll_interval: interval of trying to acquire the lock file - :param poll_intervall: deprecated, kept for backwards compatibility, use ``poll_interval`` instead - :param blocking: defaults to True. If False, function will return immediately if it cannot obtain a lock on the - first attempt. Otherwise, this method will block until the timeout expires or the lock is acquired. - :raises Timeout: if fails to acquire lock within the timeout period - :return: a context object that will unlock the file when the context is exited - - .. code-block:: python - - # You can use this method in the context manager (recommended) - with lock.acquire(): - pass - - # Or use an equivalent try-finally construct: - lock.acquire() - try: - pass - finally: - lock.release() - - .. versionchanged:: 2.0.0 - - This method returns now a *proxy* object instead of *self*, - so that it can be used in a with statement without side effects. - - """ - # Use the default timeout, if no timeout is provided. - if timeout is None: - timeout = self._context.timeout - - if poll_intervall is not None: - msg = "use poll_interval instead of poll_intervall" - warnings.warn(msg, DeprecationWarning, stacklevel=2) - poll_interval = poll_intervall - - # Increment the number right at the beginning. We can still undo it, if something fails. - self._context.lock_counter += 1 - - lock_id = id(self) - lock_filename = self.lock_file - start_time = time.perf_counter() - try: - while True: - if not self.is_locked: - _LOGGER.debug("Attempting to acquire lock %s on %s", lock_id, lock_filename) - self._acquire() - if self.is_locked: - _LOGGER.debug("Lock %s acquired on %s", lock_id, lock_filename) - break - if blocking is False: - _LOGGER.debug("Failed to immediately acquire lock %s on %s", lock_id, lock_filename) - raise Timeout(lock_filename) # noqa: TRY301 - if 0 <= timeout < time.perf_counter() - start_time: - _LOGGER.debug("Timeout on acquiring lock %s on %s", lock_id, lock_filename) - raise Timeout(lock_filename) # noqa: TRY301 - msg = "Lock %s not acquired on %s, waiting %s seconds ..." - _LOGGER.debug(msg, lock_id, lock_filename, poll_interval) - time.sleep(poll_interval) - except BaseException: # Something did go wrong, so decrement the counter. - self._context.lock_counter = max(0, self._context.lock_counter - 1) - raise - return AcquireReturnProxy(lock=self) - - def release(self, force: bool = False) -> None: # noqa: FBT001, FBT002 - """ - Releases the file lock. Please note, that the lock is only completely released, if the lock counter is 0. Also - note, that the lock file itself is not automatically deleted. - - :param force: If true, the lock counter is ignored and the lock is released in every case/ - """ - if self.is_locked: - self._context.lock_counter -= 1 - - if self._context.lock_counter == 0 or force: - lock_id, lock_filename = id(self), self.lock_file - - _LOGGER.debug("Attempting to release lock %s on %s", lock_id, lock_filename) - self._release() - self._context.lock_counter = 0 - _LOGGER.debug("Lock %s released on %s", lock_id, lock_filename) - - def __enter__(self) -> BaseFileLock: - """ - Acquire the lock. - - :return: the lock object - """ - self.acquire() - return self - - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_value: BaseException | None, - traceback: TracebackType | None, - ) -> None: - """ - Release the lock. - - :param exc_type: the exception type if raised - :param exc_value: the exception value if raised - :param traceback: the exception traceback if raised - """ - self.release() - - def __del__(self) -> None: - """Called when the lock object is deleted.""" - self.release(force=True) - - -__all__ = [ - "BaseFileLock", - "AcquireReturnProxy", -] diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/svgLib/path/parser.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/svgLib/path/parser.py deleted file mode 100644 index 70ae4c17eac8bb1e0deb7f8584e979be65dfd09b..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/svgLib/path/parser.py +++ /dev/null @@ -1,321 +0,0 @@ -# SVG Path specification parser. -# This is an adaptation from 'svg.path' by Lennart Regebro (@regebro), -# modified so that the parser takes a FontTools Pen object instead of -# returning a list of svg.path Path objects. -# The original code can be found at: -# https://github.com/regebro/svg.path/blob/4f9b6e3/src/svg/path/parser.py -# Copyright (c) 2013-2014 Lennart Regebro -# License: MIT - -from .arc import EllipticalArc -import re - - -COMMANDS = set("MmZzLlHhVvCcSsQqTtAa") -ARC_COMMANDS = set("Aa") -UPPERCASE = set("MZLHVCSQTA") - -COMMAND_RE = re.compile("([MmZzLlHhVvCcSsQqTtAa])") - -# https://www.w3.org/TR/css-syntax-3/#number-token-diagram -# but -6.e-5 will be tokenized as "-6" then "-5" and confuse parsing -FLOAT_RE = re.compile( - r"[-+]?" # optional sign - r"(?:" - r"(?:0|[1-9][0-9]*)(?:\.[0-9]+)?(?:[eE][-+]?[0-9]+)?" # int/float - r"|" - r"(?:\.[0-9]+(?:[eE][-+]?[0-9]+)?)" # float with leading dot (e.g. '.42') - r")" -) -BOOL_RE = re.compile("^[01]") -SEPARATOR_RE = re.compile(f"[, \t]") - - -def _tokenize_path(pathdef): - arc_cmd = None - for x in COMMAND_RE.split(pathdef): - if x in COMMANDS: - arc_cmd = x if x in ARC_COMMANDS else None - yield x - continue - - if arc_cmd: - try: - yield from _tokenize_arc_arguments(x) - except ValueError as e: - raise ValueError(f"Invalid arc command: '{arc_cmd}{x}'") from e - else: - for token in FLOAT_RE.findall(x): - yield token - - -ARC_ARGUMENT_TYPES = ( - ("rx", FLOAT_RE), - ("ry", FLOAT_RE), - ("x-axis-rotation", FLOAT_RE), - ("large-arc-flag", BOOL_RE), - ("sweep-flag", BOOL_RE), - ("x", FLOAT_RE), - ("y", FLOAT_RE), -) - - -def _tokenize_arc_arguments(arcdef): - raw_args = [s for s in SEPARATOR_RE.split(arcdef) if s] - if not raw_args: - raise ValueError(f"Not enough arguments: '{arcdef}'") - raw_args.reverse() - - i = 0 - while raw_args: - arg = raw_args.pop() - - name, pattern = ARC_ARGUMENT_TYPES[i] - match = pattern.search(arg) - if not match: - raise ValueError(f"Invalid argument for '{name}' parameter: {arg!r}") - - j, k = match.span() - yield arg[j:k] - arg = arg[k:] - - if arg: - raw_args.append(arg) - - # wrap around every 7 consecutive arguments - if i == 6: - i = 0 - else: - i += 1 - - if i != 0: - raise ValueError(f"Not enough arguments: '{arcdef}'") - - -def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc): - """Parse SVG path definition (i.e. "d" attribute of elements) - and call a 'pen' object's moveTo, lineTo, curveTo, qCurveTo and closePath - methods. - - If 'current_pos' (2-float tuple) is provided, the initial moveTo will - be relative to that instead being absolute. - - If the pen has an "arcTo" method, it is called with the original values - of the elliptical arc curve commands: - - pen.arcTo(rx, ry, rotation, arc_large, arc_sweep, (x, y)) - - Otherwise, the arcs are approximated by series of cubic Bezier segments - ("curveTo"), one every 90 degrees. - """ - # In the SVG specs, initial movetos are absolute, even if - # specified as 'm'. This is the default behavior here as well. - # But if you pass in a current_pos variable, the initial moveto - # will be relative to that current_pos. This is useful. - current_pos = complex(*current_pos) - - elements = list(_tokenize_path(pathdef)) - # Reverse for easy use of .pop() - elements.reverse() - - start_pos = None - command = None - last_control = None - - have_arcTo = hasattr(pen, "arcTo") - - while elements: - - if elements[-1] in COMMANDS: - # New command. - last_command = command # Used by S and T - command = elements.pop() - absolute = command in UPPERCASE - command = command.upper() - else: - # If this element starts with numbers, it is an implicit command - # and we don't change the command. Check that it's allowed: - if command is None: - raise ValueError( - "Unallowed implicit command in %s, position %s" - % (pathdef, len(pathdef.split()) - len(elements)) - ) - last_command = command # Used by S and T - - if command == "M": - # Moveto command. - x = elements.pop() - y = elements.pop() - pos = float(x) + float(y) * 1j - if absolute: - current_pos = pos - else: - current_pos += pos - - # M is not preceded by Z; it's an open subpath - if start_pos is not None: - pen.endPath() - - pen.moveTo((current_pos.real, current_pos.imag)) - - # when M is called, reset start_pos - # This behavior of Z is defined in svg spec: - # http://www.w3.org/TR/SVG/paths.html#PathDataClosePathCommand - start_pos = current_pos - - # Implicit moveto commands are treated as lineto commands. - # So we set command to lineto here, in case there are - # further implicit commands after this moveto. - command = "L" - - elif command == "Z": - # Close path - if current_pos != start_pos: - pen.lineTo((start_pos.real, start_pos.imag)) - pen.closePath() - current_pos = start_pos - start_pos = None - command = None # You can't have implicit commands after closing. - - elif command == "L": - x = elements.pop() - y = elements.pop() - pos = float(x) + float(y) * 1j - if not absolute: - pos += current_pos - pen.lineTo((pos.real, pos.imag)) - current_pos = pos - - elif command == "H": - x = elements.pop() - pos = float(x) + current_pos.imag * 1j - if not absolute: - pos += current_pos.real - pen.lineTo((pos.real, pos.imag)) - current_pos = pos - - elif command == "V": - y = elements.pop() - pos = current_pos.real + float(y) * 1j - if not absolute: - pos += current_pos.imag * 1j - pen.lineTo((pos.real, pos.imag)) - current_pos = pos - - elif command == "C": - control1 = float(elements.pop()) + float(elements.pop()) * 1j - control2 = float(elements.pop()) + float(elements.pop()) * 1j - end = float(elements.pop()) + float(elements.pop()) * 1j - - if not absolute: - control1 += current_pos - control2 += current_pos - end += current_pos - - pen.curveTo( - (control1.real, control1.imag), - (control2.real, control2.imag), - (end.real, end.imag), - ) - current_pos = end - last_control = control2 - - elif command == "S": - # Smooth curve. First control point is the "reflection" of - # the second control point in the previous path. - - if last_command not in "CS": - # If there is no previous command or if the previous command - # was not an C, c, S or s, assume the first control point is - # coincident with the current point. - control1 = current_pos - else: - # The first control point is assumed to be the reflection of - # the second control point on the previous command relative - # to the current point. - control1 = current_pos + current_pos - last_control - - control2 = float(elements.pop()) + float(elements.pop()) * 1j - end = float(elements.pop()) + float(elements.pop()) * 1j - - if not absolute: - control2 += current_pos - end += current_pos - - pen.curveTo( - (control1.real, control1.imag), - (control2.real, control2.imag), - (end.real, end.imag), - ) - current_pos = end - last_control = control2 - - elif command == "Q": - control = float(elements.pop()) + float(elements.pop()) * 1j - end = float(elements.pop()) + float(elements.pop()) * 1j - - if not absolute: - control += current_pos - end += current_pos - - pen.qCurveTo((control.real, control.imag), (end.real, end.imag)) - current_pos = end - last_control = control - - elif command == "T": - # Smooth curve. Control point is the "reflection" of - # the second control point in the previous path. - - if last_command not in "QT": - # If there is no previous command or if the previous command - # was not an Q, q, T or t, assume the first control point is - # coincident with the current point. - control = current_pos - else: - # The control point is assumed to be the reflection of - # the control point on the previous command relative - # to the current point. - control = current_pos + current_pos - last_control - - end = float(elements.pop()) + float(elements.pop()) * 1j - - if not absolute: - end += current_pos - - pen.qCurveTo((control.real, control.imag), (end.real, end.imag)) - current_pos = end - last_control = control - - elif command == "A": - rx = abs(float(elements.pop())) - ry = abs(float(elements.pop())) - rotation = float(elements.pop()) - arc_large = bool(int(elements.pop())) - arc_sweep = bool(int(elements.pop())) - end = float(elements.pop()) + float(elements.pop()) * 1j - - if not absolute: - end += current_pos - - # if the pen supports arcs, pass the values unchanged, otherwise - # approximate the arc with a series of cubic bezier curves - if have_arcTo: - pen.arcTo( - rx, - ry, - rotation, - arc_large, - arc_sweep, - (end.real, end.imag), - ) - else: - arc = arc_class( - current_pos, rx, ry, rotation, arc_large, arc_sweep, end - ) - arc.draw(pen) - - current_pos = end - - # no final Z command, it's an open path - if start_pos is not None: - pen.endPath() diff --git a/spaces/codertoro/gpt-academic/app.py b/spaces/codertoro/gpt-academic/app.py deleted file mode 100644 index 5474fe16da074a06ac5a83c1c66cd897e78b96a2..0000000000000000000000000000000000000000 --- a/spaces/codertoro/gpt-academic/app.py +++ /dev/null @@ -1,175 +0,0 @@ -import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 -import gradio as gr -from request_llm.bridge_chatgpt import predict -from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith - -# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到 -proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \ - get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY') - -# 如果WEB_PORT是-1, 则随机选取WEB端口 -PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT -if not AUTHENTICATION: AUTHENTICATION = None - -from check_proxy import get_current_version -initial_prompt = "Serve me as a writing and programming assistant." -title_html = f"

    ChatGPT 学术优化 {get_current_version()}

    " -description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)""" - -# 问询记录, python 版本建议3.9+(越新越好) -import logging -os.makedirs("gpt_log", exist_ok=True) -try:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8") -except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO) -print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!") - -# 一些普通功能模块 -from core_functional import get_core_functions -functional = get_core_functions() - -# 高级函数插件 -from crazy_functional import get_crazy_functions -crazy_fns = get_crazy_functions() - -# 处理markdown文本格式的转变 -gr.Chatbot.postprocess = format_io - -# 做一些外观色彩上的调整 -from theme import adjust_theme, advanced_css -set_theme = adjust_theme() - -# 代理与自动更新 -from check_proxy import check_proxy, auto_update -proxy_info = check_proxy(proxies) - -gr_L1 = lambda: gr.Row().style() -gr_L2 = lambda scale: gr.Column(scale=scale) -if LAYOUT == "TOP-DOWN": - gr_L1 = lambda: DummyWith() - gr_L2 = lambda scale: gr.Row() - CHATBOT_HEIGHT /= 2 - -cancel_handles = [] -with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css, api_key = gr.Textbox(show_label=False, value="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").style(container=False)) as demo: - gr.HTML(title_html) - #gr.HTML('''
    Duplicate Space请您打开此页面后务必点击上方的“复制空间”(Duplicate Space)按钮!打开页面后请在输入框内输入API-KEY然后回车。
    切忌在“复制空间”(Duplicate Space)之前填入API_KEY或进行提问,否则您的API_KEY将极可能被空间所有者攫取
    ''') - cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL}) - with gr_L1(): - with gr_L2(scale=2): - chatbot = gr.Chatbot() - chatbot.style(height=CHATBOT_HEIGHT) - history = gr.State([]) - with gr_L2(scale=1): - with gr.Accordion("输入区", open=True) as area_input_primary: - with gr.Row(): - txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False) - with gr.Row(): - submitBtn = gr.Button("提交", variant="primary") - with gr.Row(): - resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm") - stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm") - with gr.Row(): - status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}") - with gr.Accordion("基础功能区", open=True) as area_basic_fn: - with gr.Row(): - for k in functional: - variant = functional[k]["Color"] if "Color" in functional[k] else "secondary" - functional[k]["Button"] = gr.Button(k, variant=variant) - with gr.Accordion("函数插件区", open=True) as area_crazy_fn: - with gr.Row(): - gr.Markdown("注意:以下“红颜色”标识的函数插件需从输入区读取路径作为参数.") - with gr.Row(): - for k in crazy_fns: - if not crazy_fns[k].get("AsButton", True): continue - variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary" - crazy_fns[k]["Button"] = gr.Button(k, variant=variant) - crazy_fns[k]["Button"].style(size="sm") - with gr.Row(): - with gr.Accordion("更多函数插件", open=True): - dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)] - with gr.Column(scale=1): - dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(container=False) - with gr.Column(scale=1): - switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary") - with gr.Row(): - with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up: - file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple") - with gr.Accordion("展开SysPrompt & 交互界面布局 & Github地址", open=(LAYOUT == "TOP-DOWN")): - system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt) - top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",) - temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",) - checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区") - gr.Markdown(description) - with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary: - with gr.Row(): - txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False) - with gr.Row(): - submitBtn2 = gr.Button("提交", variant="primary") - with gr.Row(): - resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm") - stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm") - # 功能区显示开关与功能区的互动 - def fn_area_visibility(a): - ret = {} - ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))}) - ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))}) - ret.update({area_input_primary: gr.update(visible=("底部输入区" not in a))}) - ret.update({area_input_secondary: gr.update(visible=("底部输入区" in a))}) - if "底部输入区" in a: ret.update({txt: gr.update(value="")}) - return ret - checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2] ) - # 整理反复出现的控件句柄组合 - input_combo = [cookies, txt, txt2, top_p, temperature, chatbot, history, system_prompt] - output_combo = [cookies, chatbot, history, status] - predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo) - # 提交按钮、重置按钮 - cancel_handles.append(txt.submit(**predict_args)) - cancel_handles.append(txt2.submit(**predict_args)) - cancel_handles.append(submitBtn.click(**predict_args)) - cancel_handles.append(submitBtn2.click(**predict_args)) - resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) - resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) - # 基础功能区的回调函数注册 - for k in functional: - click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo) - cancel_handles.append(click_handle) - # 文件上传区,接收文件后与chatbot的互动 - file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt]) - # 函数插件-固定按钮区 - for k in crazy_fns: - if not crazy_fns[k].get("AsButton", True): continue - click_handle = crazy_fns[k]["Button"].click(ArgsGeneralWrapper(crazy_fns[k]["Function"]), [*input_combo, gr.State(PORT)], output_combo) - click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot]) - cancel_handles.append(click_handle) - # 函数插件-下拉菜单与随变按钮的互动 - def on_dropdown_changed(k): - variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary" - return {switchy_bt: gr.update(value=k, variant=variant)} - dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt] ) - # 随变按钮的回调函数注册 - def route(k, *args, **kwargs): - if k in [r"打开插件列表", r"请先从插件列表中选择"]: return - yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs) - click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo) - click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot]) - # def expand_file_area(file_upload, area_file_up): - # if len(file_upload)>0: return {area_file_up: gr.update(open=True)} - # click_handle.then(expand_file_area, [file_upload, area_file_up], [area_file_up]) - cancel_handles.append(click_handle) - # 终止按钮的回调函数注册 - stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles) - stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles) -# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数 -def auto_opentab_delay(): - import threading, webbrowser, time - print(f"如果浏览器没有自动打开,请复制并转到以下URL:") - print(f"\t(亮色主题): http://localhost:{PORT}") - print(f"\t(暗色主题): http://localhost:{PORT}/?__dark-theme=true") - def open(): - time.sleep(2) # 打开浏览器 - webbrowser.open_new_tab(f"http://localhost:{PORT}/?__dark-theme=true") - threading.Thread(target=open, name="open-browser", daemon=True).start() - threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start() - -auto_opentab_delay() -demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=False) \ No newline at end of file diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dv_profile.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dv_profile.h deleted file mode 100644 index 4365f1b4b1492b6e7ea9218697aa0f7b8f39722c..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dv_profile.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_DV_PROFILE_H -#define AVCODEC_DV_PROFILE_H - -#include - -#include "libavutil/pixfmt.h" -#include "libavutil/rational.h" - -/* minimum number of bytes to read from a DV stream in order to - * determine the profile */ -#define DV_PROFILE_BYTES (6 * 80) /* 6 DIF blocks */ - - -/* - * AVDVProfile is used to express the differences between various - * DV flavors. For now it's primarily used for differentiating - * 525/60 and 625/50, but the plans are to use it for various - * DV specs as well (e.g. SMPTE314M vs. IEC 61834). - */ -typedef struct AVDVProfile { - int dsf; /* value of the dsf in the DV header */ - int video_stype; /* stype for VAUX source pack */ - int frame_size; /* total size of one frame in bytes */ - int difseg_size; /* number of DIF segments per DIF channel */ - int n_difchan; /* number of DIF channels per frame */ - AVRational time_base; /* 1/framerate */ - int ltc_divisor; /* FPS from the LTS standpoint */ - int height; /* picture height in pixels */ - int width; /* picture width in pixels */ - AVRational sar[2]; /* sample aspect ratios for 4:3 and 16:9 */ - enum AVPixelFormat pix_fmt; /* picture pixel format */ - int bpm; /* blocks per macroblock */ - const uint8_t *block_sizes; /* AC block sizes, in bits */ - int audio_stride; /* size of audio_shuffle table */ - int audio_min_samples[3]; /* min amount of audio samples */ - /* for 48kHz, 44.1kHz and 32kHz */ - int audio_samples_dist[5]; /* how many samples are supposed to be */ - /* in each frame in a 5 frames window */ - const uint8_t (*audio_shuffle)[9]; /* PCM shuffling table */ -} AVDVProfile; - -/** - * Get a DV profile for the provided compressed frame. - * - * @param sys the profile used for the previous frame, may be NULL - * @param frame the compressed data buffer - * @param buf_size size of the buffer in bytes - * @return the DV profile for the supplied data or NULL on failure - */ -const AVDVProfile *av_dv_frame_profile(const AVDVProfile *sys, - const uint8_t *frame, unsigned buf_size); - -/** - * Get a DV profile for the provided stream parameters. - */ -const AVDVProfile *av_dv_codec_profile(int width, int height, enum AVPixelFormat pix_fmt); - -/** - * Get a DV profile for the provided stream parameters. - * The frame rate is used as a best-effort parameter. - */ -const AVDVProfile *av_dv_codec_profile2(int width, int height, enum AVPixelFormat pix_fmt, AVRational frame_rate); - -#endif /* AVCODEC_DV_PROFILE_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/gsm.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/gsm.h deleted file mode 100644 index 53d65c4dc75e9a3015b305a3b503c7b860209447..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/gsm.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * GSM common header - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_GSM_H -#define AVCODEC_GSM_H - -/* bytes per block */ -#define GSM_BLOCK_SIZE 33 -#define GSM_MS_BLOCK_SIZE 65 -#define MSN_MIN_BLOCK_SIZE 41 - -/* samples per block */ -#define GSM_FRAME_SIZE 160 - -enum GSMModes { - GSM_13000 = 0, - MSN_12400, - MSN_11800, - MSN_11200, - MSN_10600, - MSN_10000, - MSN_9400, - MSN_8800, - MSN_8200, - NUM_GSM_MODES -}; - -#endif /* AVCODEC_GSM_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/h263.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/h263.c deleted file mode 100644 index b30ffaf878d42c431ea5c709d2b6dc7fd9a2a4a1..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/h263.c +++ /dev/null @@ -1,246 +0,0 @@ -/* - * H.263/MPEG-4 backend for encoder and decoder - * Copyright (c) 2000,2001 Fabrice Bellard - * H.263+ support. - * Copyright (c) 2001 Juan J. Sierralta P - * Copyright (c) 2002-2004 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * H.263/MPEG-4 codec. - */ - -#include "libavutil/thread.h" -#include "mpegvideo.h" -#include "h263.h" -#include "h263data.h" -#include "h263dsp.h" -#include "idctdsp.h" -#include "mathops.h" -#include "mpegpicture.h" -#include "mpegutils.h" -#include "rl.h" - -static av_cold void h263_init_rl_inter(void) -{ - static uint8_t h263_rl_inter_table[2][2 * MAX_RUN + MAX_LEVEL + 3]; - ff_rl_init(&ff_h263_rl_inter, h263_rl_inter_table); -} - -av_cold void ff_h263_init_rl_inter(void) -{ - static AVOnce init_static_once = AV_ONCE_INIT; - ff_thread_once(&init_static_once, h263_init_rl_inter); -} - -void ff_h263_update_motion_val(MpegEncContext * s){ - const int mb_xy = s->mb_y * s->mb_stride + s->mb_x; - //FIXME a lot of that is only needed for !low_delay - const int wrap = s->b8_stride; - const int xy = s->block_index[0]; - - s->current_picture.mbskip_table[mb_xy] = s->mb_skipped; - - if(s->mv_type != MV_TYPE_8X8){ - int motion_x, motion_y; - if (s->mb_intra) { - motion_x = 0; - motion_y = 0; - } else if (s->mv_type == MV_TYPE_16X16) { - motion_x = s->mv[0][0][0]; - motion_y = s->mv[0][0][1]; - } else /*if (s->mv_type == MV_TYPE_FIELD)*/ { - int i; - motion_x = s->mv[0][0][0] + s->mv[0][1][0]; - motion_y = s->mv[0][0][1] + s->mv[0][1][1]; - motion_x = (motion_x>>1) | (motion_x&1); - for(i=0; i<2; i++){ - s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0]; - s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1]; - } - s->current_picture.ref_index[0][4*mb_xy ] = - s->current_picture.ref_index[0][4*mb_xy + 1] = s->field_select[0][0]; - s->current_picture.ref_index[0][4*mb_xy + 2] = - s->current_picture.ref_index[0][4*mb_xy + 3] = s->field_select[0][1]; - } - - /* no update if 8X8 because it has been done during parsing */ - s->current_picture.motion_val[0][xy][0] = motion_x; - s->current_picture.motion_val[0][xy][1] = motion_y; - s->current_picture.motion_val[0][xy + 1][0] = motion_x; - s->current_picture.motion_val[0][xy + 1][1] = motion_y; - s->current_picture.motion_val[0][xy + wrap][0] = motion_x; - s->current_picture.motion_val[0][xy + wrap][1] = motion_y; - s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x; - s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y; - } - - if(s->encoding){ //FIXME encoding MUST be cleaned up - if (s->mv_type == MV_TYPE_8X8) - s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8; - else if(s->mb_intra) - s->current_picture.mb_type[mb_xy] = MB_TYPE_INTRA; - else - s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16; - } -} - -void ff_h263_loop_filter(MpegEncContext * s){ - int qp_c; - const int linesize = s->linesize; - const int uvlinesize= s->uvlinesize; - const int xy = s->mb_y * s->mb_stride + s->mb_x; - uint8_t *dest_y = s->dest[0]; - uint8_t *dest_cb= s->dest[1]; - uint8_t *dest_cr= s->dest[2]; - - /* - Diag Top - Left Center - */ - if (!IS_SKIP(s->current_picture.mb_type[xy])) { - qp_c= s->qscale; - s->h263dsp.h263_v_loop_filter(dest_y + 8 * linesize, linesize, qp_c); - s->h263dsp.h263_v_loop_filter(dest_y + 8 * linesize + 8, linesize, qp_c); - }else - qp_c= 0; - - if(s->mb_y){ - int qp_dt, qp_tt, qp_tc; - - if (IS_SKIP(s->current_picture.mb_type[xy - s->mb_stride])) - qp_tt=0; - else - qp_tt = s->current_picture.qscale_table[xy - s->mb_stride]; - - if(qp_c) - qp_tc= qp_c; - else - qp_tc= qp_tt; - - if(qp_tc){ - const int chroma_qp= s->chroma_qscale_table[qp_tc]; - s->h263dsp.h263_v_loop_filter(dest_y, linesize, qp_tc); - s->h263dsp.h263_v_loop_filter(dest_y + 8, linesize, qp_tc); - - s->h263dsp.h263_v_loop_filter(dest_cb, uvlinesize, chroma_qp); - s->h263dsp.h263_v_loop_filter(dest_cr, uvlinesize, chroma_qp); - } - - if(qp_tt) - s->h263dsp.h263_h_loop_filter(dest_y - 8 * linesize + 8, linesize, qp_tt); - - if(s->mb_x){ - if (qp_tt || IS_SKIP(s->current_picture.mb_type[xy - 1 - s->mb_stride])) - qp_dt= qp_tt; - else - qp_dt = s->current_picture.qscale_table[xy - 1 - s->mb_stride]; - - if(qp_dt){ - const int chroma_qp= s->chroma_qscale_table[qp_dt]; - s->h263dsp.h263_h_loop_filter(dest_y - 8 * linesize, linesize, qp_dt); - s->h263dsp.h263_h_loop_filter(dest_cb - 8 * uvlinesize, uvlinesize, chroma_qp); - s->h263dsp.h263_h_loop_filter(dest_cr - 8 * uvlinesize, uvlinesize, chroma_qp); - } - } - } - - if(qp_c){ - s->h263dsp.h263_h_loop_filter(dest_y + 8, linesize, qp_c); - if(s->mb_y + 1 == s->mb_height) - s->h263dsp.h263_h_loop_filter(dest_y + 8 * linesize + 8, linesize, qp_c); - } - - if(s->mb_x){ - int qp_lc; - if (qp_c || IS_SKIP(s->current_picture.mb_type[xy - 1])) - qp_lc= qp_c; - else - qp_lc = s->current_picture.qscale_table[xy - 1]; - - if(qp_lc){ - s->h263dsp.h263_h_loop_filter(dest_y, linesize, qp_lc); - if(s->mb_y + 1 == s->mb_height){ - const int chroma_qp= s->chroma_qscale_table[qp_lc]; - s->h263dsp.h263_h_loop_filter(dest_y + 8 * linesize, linesize, qp_lc); - s->h263dsp.h263_h_loop_filter(dest_cb, uvlinesize, chroma_qp); - s->h263dsp.h263_h_loop_filter(dest_cr, uvlinesize, chroma_qp); - } - } - } -} - -int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir, - int *px, int *py) -{ - int wrap; - int16_t *A, *B, *C, (*mot_val)[2]; - static const int off[4]= {2, 1, 1, -1}; - - wrap = s->b8_stride; - mot_val = s->current_picture.motion_val[dir] + s->block_index[block]; - - A = mot_val[ - 1]; - /* special case for first (slice) line */ - if (s->first_slice_line && block<3) { - // we can't just change some MVs to simulate that as we need them for the B-frames (and ME) - // and if we ever support non rectangular objects than we need to do a few ifs here anyway :( - if(block==0){ //most common case - if(s->mb_x == s->resync_mb_x){ //rare - *px= *py = 0; - }else if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare - C = mot_val[off[block] - wrap]; - if(s->mb_x==0){ - *px = C[0]; - *py = C[1]; - }else{ - *px = mid_pred(A[0], 0, C[0]); - *py = mid_pred(A[1], 0, C[1]); - } - }else{ - *px = A[0]; - *py = A[1]; - } - }else if(block==1){ - if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare - C = mot_val[off[block] - wrap]; - *px = mid_pred(A[0], 0, C[0]); - *py = mid_pred(A[1], 0, C[1]); - }else{ - *px = A[0]; - *py = A[1]; - } - }else{ /* block==2*/ - B = mot_val[ - wrap]; - C = mot_val[off[block] - wrap]; - if(s->mb_x == s->resync_mb_x) //rare - A[0]=A[1]=0; - - *px = mid_pred(A[0], B[0], C[0]); - *py = mid_pred(A[1], B[1], C[1]); - } - } else { - B = mot_val[ - wrap]; - C = mot_val[off[block] - wrap]; - *px = mid_pred(A[0], B[0], C[0]); - *py = mid_pred(A[1], B[1], C[1]); - } - return *mot_val; -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/hevcpred_init_mips.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/hevcpred_init_mips.c deleted file mode 100644 index f7ecb34dcc46545bbd0b6bc8121924425a2960f7..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/hevcpred_init_mips.c +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2015 Shivraj Patil (Shivraj.Patil@imgtec.com) - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavutil/mips/cpu.h" -#include "config.h" -#include "libavutil/attributes.h" -#include "libavcodec/mips/hevcpred_mips.h" - -void ff_hevc_pred_init_mips(HEVCPredContext *c, const int bit_depth) -{ - int cpu_flags = av_get_cpu_flags(); - - if (have_msa(cpu_flags)) { - if (bit_depth == 8) { - c->intra_pred[2] = ff_intra_pred_8_16x16_msa; - c->intra_pred[3] = ff_intra_pred_8_32x32_msa; - c->pred_planar[0] = ff_hevc_intra_pred_planar_0_msa; - c->pred_planar[1] = ff_hevc_intra_pred_planar_1_msa; - c->pred_planar[2] = ff_hevc_intra_pred_planar_2_msa; - c->pred_planar[3] = ff_hevc_intra_pred_planar_3_msa; - c->pred_dc = ff_hevc_intra_pred_dc_msa; - c->pred_angular[0] = ff_pred_intra_pred_angular_0_msa; - c->pred_angular[1] = ff_pred_intra_pred_angular_1_msa; - c->pred_angular[2] = ff_pred_intra_pred_angular_2_msa; - c->pred_angular[3] = ff_pred_intra_pred_angular_3_msa; - } - } -} diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Dragon Throne Battle of Red Cliffs The Best Real-Time Strategy Game with Economic Elements.md b/spaces/congsaPfin/Manga-OCR/logs/Download Dragon Throne Battle of Red Cliffs The Best Real-Time Strategy Game with Economic Elements.md deleted file mode 100644 index 48188e34238932b03ac90cc1ffb4b75c76e97d16..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Dragon Throne Battle of Red Cliffs The Best Real-Time Strategy Game with Economic Elements.md +++ /dev/null @@ -1,72 +0,0 @@ -
    -

    Download Dragon Throne Battle of Red Cliffs: A Guide for RTS Fans

    -

    If you are a fan of real-time strategy games, especially those that are set in ancient China, you might want to check out Dragon Throne Battle of Red Cliffs. This game is a hidden gem that offers a unique and immersive experience of commanding armies, managing resources, and conquering territories in the turbulent era of the Three Kingdoms. In this article, we will give you a brief introduction to the game, explain why you should download it, and show you how to do it.

    -

    download dragon throne battle of red cliffs


    Download File ……… https://urlca.com/2uO97k



    -

    What is Dragon Throne Battle of Red Cliffs?

    -

    Dragon Throne Battle of Red Cliffs is a real-time strategy game with extensive economic elements. It was developed by Object Software and published by Strategy First in 2002. The game is based on the historical events and characters of the Three Kingdoms period (220-280 AD), when China was divided into three rival states: Shu, Wei, and Wu. The game's title refers to the famous Battle of Red Cliffs, which was a decisive naval victory for the allied forces of Shu and Wu against Wei in 208 AD.

    -

    A brief overview of the game's features and setting

    -

    The game follows a civil war fought in medieval China. You can choose to play as one of three warlords: Liu Bei of Shu, Cao Cao of Wei, or Sun Quan of Wu. Each warlord has his own campaign, which consists of several missions that follow the historical timeline. You will have to complete various objectives, such as capturing cities, defeating enemies, forming alliances, or defending your territory.

    -

    The game features a realistic and detailed map of China, which covers more than 50 provinces and 200 cities. You can zoom in and out to see different levels of detail, from the whole country to individual buildings. You can also interact with various elements on the map, such as rivers, mountains, forests, bridges, or fortifications.

    -

    The game also features a complex and dynamic economic system, which requires you to manage your resources carefully. You will have to collect wood, stone, grain, iron, gold, silk, horses, and population. You will also have to build different types of structures, such as farms, mines, markets, barracks, temples, palaces, or walls. You will have to balance your production and consumption, as well as trade with other factions or merchants.

    -

    The three campaigns and the warlords you can play as

    -

    The game offers three campaigns that let you experience the story from different perspectives. Each campaign has its own challenges and opportunities, as well as its own set of commanders that you can recruit and upgrade. Your commanders are your most valuable assets in the game, as they can lead your troops into battle, perform special abilities, or influence your diplomacy. Your commanders can gain

    The gameplay mechanics and the challenges you will face

    -

    The game is a real-time strategy game, which means that you will have to make quick decisions and react to changing situations. You will have to control your units, which can be infantry, cavalry, archers, catapults, or ships. You can also use special units, such as spies, assassins, monks, or heroes. You can give your units various commands, such as move, attack, defend, or retreat. You can also use formations, such as wedge, square, or line.

    -

    download dragon throne battle of red cliffs steam
    -dragon throne battle of red cliffs free download full version
    -dragon throne battle of red cliffs pc game download
    -how to download dragon throne battle of red cliffs
    -download dragon throne battle of red cliffs for windows 10
    -dragon throne battle of red cliffs download full game
    -dragon throne battle of red cliffs crack download
    -dragon throne battle of red cliffs iso download
    -download game dragon throne battle of red cliffs offline
    -dragon throne battle of red cliffs mods download
    -download dragon throne battle of red cliffs highly compressed
    -dragon throne battle of red cliffs patch download
    -dragon throne battle of red cliffs cheats download
    -download dragon throne battle of red cliffs for android
    -dragon throne battle of red cliffs trainer download
    -download dragon throne battle of red cliffs for mac
    -dragon throne battle of red cliffs gameplay download
    -dragon throne battle of red cliffs soundtrack download
    -download game pc dragon throne battle of red cliffs full rip
    -dragon throne battle of red cliffs no cd download
    -download game strategy dragon throne battle of red cliffs
    -dragon throne battle of red cliffs hd mod download
    -dragon throne battle of red cliffs online download
    -download game perang dragon throne battle of red cliffs
    -dragon throne battle of red cliffs english patch download
    -download game rpg dragon throne battle of red cliffs
    -dragon throne battle of red cliffs windows 7 download
    -download game three kingdoms dragon throne battle of red cliffs
    -dragon throne battle of red cliffs demo download
    -download game gratis dragon throne battle of red cliffs
    -dragon throne battle of red cliffs multiplayer download
    -download game balap kuda dragon throne battle of red cliffs
    -dragon throne battle of red cliffs map editor download
    -download game petualangan dragon throne battle of red cliffs
    -dragon throne battle of red cliffs windows 8 download
    -download game sejarah china dragon throne battle of red cliffs
    -dragon throne battle of red cliffs save game download
    -download game simulasi perang dragon throne battle of red cliffs
    -dragon throne battle of red cliffs windows 10 fix download
    -download game kerajaan china dragon throne battle of red cliffs
    -dragon throne battle of red cliffs skidrow download
    -download game action adventure dragon throne battle of red cliffs
    -dragon throne battle of red cliffs steam key download
    -download game keren dan seru dragon throne battle of red cliffs
    -dragon throne battle of red cliffs gog download
    -download game klasik dan legendaris dragon throne battle of red cliffs

    -

    The game is not easy, as you will have to face many challenges and threats. You will have to deal with enemy attacks, natural disasters, rebellions, diseases, or famines. You will also have to consider the morale and loyalty of your troops and commanders, as well as the happiness and satisfaction of your people. You will have to use diplomacy and negotiation to forge alliances or avoid conflicts. You will also have to follow the rules of war and honor, such as respecting the truce or accepting the surrender.

    -

    Why should you download Dragon Throne Battle of Red Cliffs?

    -

    If you are still not convinced that Dragon Throne Battle of Red Cliffs is a game worth playing, here are some reasons why you should download it:

    -

    The game's strengths and advantages over other RTS games

    -

    Dragon Throne Battle of Red Cliffs is a game that stands out from other RTS games for several reasons. First of all, it has a unique and captivating setting that immerses you in the history and culture of ancient China. Second, it has a rich and diverse gameplay that combines strategy, tactics, economics, and politics. Third, it has a high level of realism and detail that makes you feel like you are really in charge of a kingdom. Fourth, it has a challenging and rewarding difficulty that tests your skills and intelligence.

    -

    The game's historical accuracy and cultural richness

    -

    Dragon Throne Battle of Red Cliffs is a game that respects and honors the historical facts and figures of the Three Kingdoms period. The game features many authentic characters, events, locations, and artifacts that are based on historical records and sources. The game also showcases the cultural diversity and richness of ancient China, such as the different customs, beliefs, arts, and philosophies of each faction. The game also incorporates elements of Chinese mythology and folklore, such as dragons, phoenixes, or gods.

    -

    The game's replay value and modding potential

    -

    Dragon Throne Battle of Red Cliffs is a game that offers a lot of replay value and modding potential. The game has three campaigns that let you play as different warlords with different stories and outcomes. The game also has a skirmish mode that lets you play against the AI or other players on various maps and scenarios. The game also has a map editor that lets you create your own maps and missions. The game also supports modding, which means that you can customize or enhance the game with new content or features created by yourself or other fans.

    game easier or more fun. For example, you can use the cheat codes that are built-in in the game, such as "iamgreatcornholio" to get unlimited resources, or "everybodylovesme" to make all factions friendly. You can also use some hacks that are available on the internet, such as trainers, editors, or injectors.

    -

    Where can I find more information and guides for Dragon Throne Battle of Red Cliffs?

    -

    You can find more information and guides for Dragon Throne Battle of Red Cliffs on various websites and forums that are dedicated to the game or the genre. For example, you can visit the official website of the developer, Object Software, or the official website of the publisher, Strategy First. You can also visit some fan sites, such as Dragon Throne Fansite, or some gaming sites, such as GameFAQs or IGN.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Fire Red ROM and Relive the Nostalgia of Pokemon.md b/spaces/congsaPfin/Manga-OCR/logs/Download Fire Red ROM and Relive the Nostalgia of Pokemon.md deleted file mode 100644 index fbf215bbaf1199487e6841ccba1da904c484f02c..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Fire Red ROM and Relive the Nostalgia of Pokemon.md +++ /dev/null @@ -1,118 +0,0 @@ -
    -

    How to Download ROM Fire Red and Play Pokemon on Your PC or Mobile Device

    -

    If you are a fan of Pokemon games, you might have heard of ROM Fire Red, a remake of the classic Pokemon Red for the Game Boy Advance. This game lets you relive the adventure of catching and training Pokemon in the Kanto region, with improved graphics, sound, and gameplay. But did you know that you can also play this game on your PC or mobile device, without needing a physical console or cartridge? In this article, we will show you how to download ROM Fire Red and play it on an emulator, so you can enjoy this amazing game anytime, anywhere.

    -

    What is ROM Fire Red?

    -

    ROM Fire Red is a file that contains a copy of the actual game Pokemon Fire Red for the Game Boy Advance. It is also known as a ROM hack, which means that it is a modified version of the original game, with some changes and improvements. For example, ROM Fire Red has some new features, such as:

    -

    download rom fire red


    Download File ○○○ https://urlca.com/2uOdbE



    -
      -
    • A contextual help menu that gives you useful information about the game
    • -
    • A new region called the Sevii Islands that you can access after completing the main story
    • -
    • A compatibility with the Game Boy Advance Wireless Adapter, which allows you to connect with other players wirelessly
    • -
    -

    ROM Fire Red is one of the most popular and well-made Pokemon games ever, and it has received many positive reviews from critics and fans alike. It is also part of the third generation of Pokemon games, which means that it can interact with other games from that generation, such as Pokemon Ruby, Sapphire, Emerald, Colosseum, and XD.

    -

    Why Download ROM Fire Red?

    -

    There are many reasons why you might want to download ROM Fire Red and play it on an emulator. Here are some of them:

    -
      -
    • You can play the game on any device that supports an emulator, such as Windows, Mac, Android, or iPhone. This means that you don't need to buy or carry a Game Boy Advance console or cartridge.
    • -
    • You can save your progress anytime and anywhere, without worrying about losing your data or battery life. You can also load your game from different points and try different scenarios.
    • -
    • You can customize your gameplay with various options and settings, such as changing the speed, sound, graphics, controls, cheats, codes, etc.
    • -
    • You can connect with other players online and trade or battle with them using emulators that support multiplayer mode.
    • -
    • You can experience a nostalgic feeling of playing one of the best Pokemon games ever made.
    • -
    -

    How to Download ROM Fire Red?

    -

    Now that you know what ROM Fire Red is and why you should download it, let's see how you can actually do it. There are two main steps involved in downloading and playing ROM Fire Red:

    -
      -
    1. Finding and downloading ROM Fire Red from a reliable source
    2. -
    3. Choosing and installing an emulator that can run ROM Fire Red

      Where to Find ROM Fire Red?

      -

      Before you can play ROM Fire Red, you need to download the file from a reliable source. There are many websites that offer ROM files for various games, but not all of them are safe and legal. Some of them may contain viruses, malware, or unwanted software that can harm your device or compromise your privacy. Some of them may also violate the intellectual property rights of the game developers and publishers, which can get you in trouble with the law.

      -

      Therefore, you should be careful when choosing a source for downloading ROM Fire Red. You should only download from reputable and trustworthy websites that have positive reviews and feedback from other users. You should also scan the file with an antivirus program before opening it, and avoid clicking on any suspicious links or ads that may appear on the website.

      -

      One of the best sources for downloading ROM Fire Red is Reddit, where you can find various subreddits dedicated to Pokemon ROM hacks and emulators. For example, you can check out [r/PokemonROMhacks](^1^), where you can find a list of the best Pokemon FireRed ROM hacks, including ROM Fire Red. You can also ask for recommendations and advice from other users who have experience with downloading and playing ROM Fire Red.

      -

      download pokemon fire red rom for gba
      -download fire red rom with cheats
      -download fire red rom for android
      -download fire red rom hack
      -download fire red rom for pc
      -download fire red rom for mac
      -download fire red rom for ios
      -download fire red rom with mega evolution
      -download fire red rom for my boy emulator
      -download fire red rom for visual boy advance
      -download fire red rom with randomizer
      -download fire red rom with all pokemon
      -download fire red rom for gba4ios
      -download fire red rom omega
      -download fire red rom zip file
      -download fire red rom english version
      -download fire red rom for nds
      -download fire red rom with trade evolution
      -download fire red rom for psp
      -download fire red rom for john gba lite
      -download fire red rom for drastic emulator
      -download fire red rom with shiny pokemon
      -download fire red rom with alola forms
      -download fire red rom for gameboy color
      -download fire red rom with gen 6 pokemon
      -download fire red rom 1.1 version
      -download fire red rom with mega charizard x
      -download fire red rom for no$gba emulator
      -download fire red rom with gen 7 pokemon
      -download fire red rom with ash greninja
      -download fire red rom with primal forms
      -download fire red rom for gameboy advance sp
      -download fire red rom with z moves
      -download fire red rom for retroarch emulator
      -download fire red rom with gen 8 pokemon
      -download fire red rom with dynamax and gigantamax
      -download fire red rom for citra emulator
      -download fire red rom with fairy type pokemon
      -download fire red rom with shadow pokemon
      -download fire red rom for mGBA emulator

      -

      Another good source for downloading ROM Fire Red is [Roms](^2^), a website that offers a large collection of ROM files for various consoles and systems. You can easily find ROM Fire Red by searching for it on the website, and download it with a single click. The website also provides information about the game, such as the release date, genre, rating, and description. However, you should be aware that this website may not have the permission to distribute ROM files, and you may be violating the law by downloading them.

      -

      How to Choose an Emulator?

      -

      An emulator is a software program that mimics the functionality of a console or system, allowing you to play games that are not compatible with your device. For example, if you want to play ROM Fire Red on your PC or mobile device, you need an emulator that can run Game Boy Advance games.

      -

      There are many emulators available for different platforms and devices, but not all of them are equally good. Some of them may have better performance, compatibility, features, and user interface than others. Some of them may also require more technical skills and knowledge to set up and use.

      -

      Therefore, you should choose an emulator that suits your needs and preferences. You should consider factors such as:

      -
        -
      • The device and operating system that you are using
      • -
      • The quality and speed of the emulation
      • -
      • The compatibility and support for ROM Fire Red and other games
      • -
      • The ease of installation and configuration
      • -
      • The availability of options and settings
      • -
      • The legality and safety of the emulator
      • -
      -

      One of the best emulators for playing ROM Fire Red is [DeSmuME](^4^), which is a free and open-source emulator for Nintendo DS games. It works on Windows, Linux, and Android devices, and it has high compatibility and performance for ROM Fire Red and other games. It also has many features, such as save states, cheats, screenshots, video recording, microphone input, touch screen emulation, etc. You can download DeSmuME from its official website or from other sources online.

      -

      Another good emulator for playing ROM Fire Red is [Visual Boy Advance](^5^), which is one of the most popular and widely used emulators for Game Boy Advance games. It works on Windows devices, and it has excellent compatibility and performance for ROM Fire Red and other games. It also has many features, such as save states, cheats, screenshots, video recording, link cable emulation, etc. You can download Visual Boy Advance from its official website or from other sources online.

      -

      How to Install ROM Fire Red on Your Emulator?

      -

      After you have downloaded ROM Fire Red and chosen an emulator, you need to install the game on your emulator so that you can play it. The process may vary depending on the emulator that you are using, but generally it involves these steps:

      -
        -
      1. Extract the ROM Fire Red file from the zip or rar archive that you downloaded. You should get a file with a .gba extension.
      2. -
      3. Launch your emulator and locate the ROM Fire Red file on your device.
      4. -
      5. Select the file and open it with your emulator.
      6. -
      7. Configure the settings of your emulator according to your preferences. You can change the controls, graphics, sound, speed, etc.
      8. -
      9. Enjoy playing ROM Fire Red on your emulator!
      10. -
      your emulator.
    4. -
    5. Select the multiplayer mode option and choose whether you want to join or host a server or a room.
    6. -
    7. If you want to join a server or a room, enter the IP address or the name of the server or the room that you want to join. You may also need to enter a password or a username.
    8. -
    9. If you want to host a server or a room, create a name and a password for your server or room. You may also need to configure some settings, such as the number of players, the game mode, the latency, etc.
    10. -
    11. Confirm your choice and wait for the emulator to connect you with other players.
    12. -
    13. To trade or battle with other players, access the in-game menu and select the option to link with other players. You can then choose whether you want to trade or battle, and select the Pokemon that you want to use.
    14. -
    -

    Conclusion

    -

    ROM Fire Red is a great game that you can play on your PC or mobile device using an emulator. It is a remake of the classic Pokemon Red for the Game Boy Advance, with improved graphics, sound, and gameplay. It also has some new features, such as a contextual help menu, a new region, and a wireless compatibility. By downloading ROM Fire Red and playing it on an emulator, you can enjoy this game anytime, anywhere, without needing a physical console or cartridge. You can also save your progress, use cheats and codes, and connect with other players online.

    -

    If you are interested in playing ROM Fire Red, you can follow the steps that we have outlined in this article. You need to find and download ROM Fire Red from a reliable source, choose and install an emulator that can run ROM Fire Red, and install the game on your emulator. You can then play the game and have fun with its features. We hope that this article has helped you learn how to download ROM Fire Red and play Pokemon on your PC or mobile device.

    -

    FAQs

    -

    Here are some frequently asked questions and answers about ROM Fire Red and emulators:

    -
      -
    • Q: Is ROM Fire Red legal?
    • -
    • A: ROM Fire Red is not legal in most countries, as it is a copy of the original game Pokemon Fire Red that is distributed without the permission of the game developers and publishers. However, some countries may have exceptions or loopholes that allow you to download ROM Fire Red for personal use or backup purposes. You should check the laws of your country before downloading ROM Fire Red.
    • -
    • Q: Is ROM Fire Red safe?
    • -
    • A: ROM Fire Red is safe if you download it from a reputable and trustworthy source, such as Reddit or Roms. However, some sources may contain viruses, malware, or unwanted software that can harm your device or compromise your privacy. You should always scan the file with an antivirus program before opening it, and avoid clicking on any suspicious links or ads that may appear on the website.
    • -
    • Q: Which emulator is best for ROM Fire Red?
    • -
    • A: The best emulator for ROM Fire Red depends on your device and preferences. However, some of the most popular and widely used emulators for ROM Fire Red are DeSmuME for Nintendo DS games and Visual Boy Advance for Game Boy Advance games. These emulators have high compatibility and performance for ROM Fire Red and other games, as well as many features and options.
    • -
    • Q: How do I update ROM Fire Red?
    • -
    • A: ROM Fire Red does not have any official updates from the game developers or publishers. However, some unofficial updates may be available from the ROM hackers or modders who created ROM Fire Red. These updates may fix some bugs or glitches, add some features or content, or improve some aspects of the game. You can find these updates online, such as on Reddit or Roms. However, you should be careful when downloading these updates, as they may not be compatible with your version of ROM Fire Red or your emulator.
    • -
    • Q: How do I delete ROM Fire Red?
    • -
    • A: To delete ROM Fire Red from your device, you need to delete the file that contains the game data. This file has a .gba extension and is usually located in the same folder as your emulator. You can also delete any save states or cheats that you have created for ROM Fire Red using your emulator's menu.
    • -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Maps PubG Mobile How to Access the New Aftermath Map and More.md b/spaces/congsaPfin/Manga-OCR/logs/Download Maps PubG Mobile How to Access the New Aftermath Map and More.md deleted file mode 100644 index ec7a34d4f6486ff65f1f58239a20556c816e3111..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Maps PubG Mobile How to Access the New Aftermath Map and More.md +++ /dev/null @@ -1,122 +0,0 @@ -
    -

    Download Maps PUBG Mobile: How to Get More Content and Fun

    -

    PUBG Mobile is one of the most popular and addictive games in the world. It is a battle royale game where you have to survive against 99 other players on a shrinking map. You can play solo, duo, or squad mode, and choose from different modes, such as classic, arcade, or arena. But did you know that you can also download additional maps and resources to make your game more diverse and enjoyable? In this article, we will show you how to download maps PUBG Mobile, as well as some tips and tricks to make the process easier. We will also tell you about the benefits of downloading maps PUBG Mobile, and why you should do it right now.

    -

    download maps pubg mobile


    DOWNLOAD ··· https://urlca.com/2uObPG



    -

    Introduction

    -

    What is PUBG Mobile and why do you need to download maps?

    -

    PUBG Mobile is a mobile version of the popular PC game PlayerUnknown's Battlegrounds (PUBG). It is developed by Tencent Games and PUBG Corporation, and was released in 2018. The game has over 100 million downloads on Google Play Store, and has won several awards, such as the Best Game of 2018 by Google Play.

    -

    The game is based on the concept of battle royale, which is a genre of online multiplayer games where a large number of players compete against each other until only one remains. The game starts with 100 players parachuting onto an island, where they have to scavenge for weapons, armor, vehicles, and other items. The map gradually shrinks as a blue zone closes in, forcing the players to move towards a safe zone. The last player or team standing wins the match.

    -

    One of the main features of PUBG Mobile is that it offers different maps for different modes. The default map is Erangel, which is based on a fictional Russian island. Other maps include Miramar (a desert map), Sanhok (a jungle map), Vikendi (a snow map), Livik (a Nordic map), Karakin (a rocky map), and Metro Royale (a post-apocalyptic map). Each map has its own terrain, weather, loot, vehicles, and secrets.

    -

    However, not all maps are available by default when you install the game. To reduce the size of the app, some maps are separate downloads that you have to manually download from the game settings. This way, you can choose which maps you want to play, and save some storage space on your device. Downloading maps PUBG Mobile also allows you to access more content and features, such as new modes, events, missions, skins, outfits, weapons, vehicles, and more.

    -

    How to download maps and resources on pubg mobile
    -Pubg mobile map download problem
    -Pubg mobile new map livik download
    -Pubg mobile sanhok map download
    -Pubg mobile miramar map download
    -Pubg mobile vikendi map download
    -Pubg mobile erangel 2.0 map download
    -Pubg mobile karakin map download
    -Pubg mobile metro royale map download
    -Pubg mobile payload mode map download
    -Best pubg mobile maps to download
    -How to download pubg mobile maps faster
    -How to download pubg mobile maps offline
    -How to download pubg mobile maps on pc
    -How to download pubg mobile maps on ios
    -How to download pubg mobile maps on android
    -How to download pubg mobile maps on emulator
    -How to download pubg mobile maps on bluestacks
    -How to download pubg mobile maps on ldplayer
    -How to download pubg mobile maps on gameloop
    -Download pubg mobile lite all maps
    -Download pubg mobile hd maps
    -Download pubg mobile kr version maps
    -Download pubg mobile vn version maps
    -Download pubg mobile tw version maps
    -Download pubg mobile global version maps
    -Download pubg mobile beta version maps
    -Download pubg mobile chinese version maps
    -Download pubg mobile korean version maps
    -Download pubg mobile japanese version maps
    -Download pubg mobile india version maps
    -Download pubg mobile new state maps
    -Download pubg mobile zombie mode maps
    -Download pubg mobile night mode maps
    -Download pubg mobile arcade mode maps
    -Download pubg mobile classic mode maps
    -Download pubg mobile ranked mode maps
    -Download pubg mobile tdm mode maps
    -Download pubg mobile war mode maps
    -Download pubg mobile sniper training mode maps
    -Download pubg mobile quick match mode maps
    -Download pubg mobile infection mode maps
    -Download pubg mobile cheer park map
    -Download pubg mobile training ground map
    -Download pubg mobile arena training map
    -Download pubg mobile library map
    -Download pubg mobile warehouse map
    -Download pubg mobile hangar map
    -Download custom maps for pubg mobile
    -Best websites to download pubg mobile maps

    -

    How to download maps PUBG Mobile

    -

    Step 1: Go to settings and select download

    -

    The first step to download maps PUBG Mobile is to go to the game settings. You can do this by tapping the up arrow button on the bottom right corner of the main dashboard, and then selecting settings. In settings, select download from the right sidebar. Here you can see the available resource packs available to download.

    -

    Step 2

    Step 2: Choose the maps and resources you want to download

    -

    The next step to download maps PUBG Mobile is to choose the maps and resources you want to download. You can see the size, description, and preview of each resource pack. You can also see the recommended packs based on your device performance and network speed. To select a resource pack, simply tap on the download button next to it. You can select multiple packs at once, or download them one by one.

    -

    Step 3: Wait for the download to complete and enjoy the game

    -

    The final step to download maps PUBG Mobile is to wait for the download to complete. You can see the progress of the download on the top of the screen. You can also pause or resume the download at any time. Once the download is complete, you can tap on the play button to start the game. You can now access the new maps and resources from the mode selection screen. Enjoy the game with more content and fun!

    -

    Tips and tricks for downloading maps PUBG Mobile

    -

    Tip 1: Make sure you have enough storage space and a stable Wi-Fi connection

    -

    One of the most important tips for downloading maps PUBG Mobile is to make sure you have enough storage space and a stable Wi-Fi connection. Some of the resource packs are quite large, ranging from 100 MB to 1 GB. Therefore, you need to have enough free space on your device to store them. You can check your storage space by going to settings > storage on your device. You can also delete some unwanted apps or files to free up some space.

    -

    Another important tip is to use a stable Wi-Fi connection when downloading maps PUBG Mobile. This will ensure that your download is fast and smooth, and that you don't incur any extra data charges. You can also turn on the auto-download option in settings > download, which will automatically download new resource packs when you are connected to Wi-Fi.

    -

    Tip 2: Save the maps to your device and not on an SD card

    -

    Another tip for downloading maps PUBG Mobile is to save the maps to your device and not on an SD card. This will improve the performance and loading speed of the game, as well as prevent any corruption or loss of data. To save the maps to your device, go to settings > download > storage location, and select internal storage.

    -

    Tip 3: Update your device's operating system and the game regularly

    -

    The last tip for downloading maps PUBG Mobile is to update your device's operating system and the game regularly. This will ensure that your device is compatible with the latest version of the game, and that you can enjoy all the new features and bug fixes. To update your device's operating system, go to settings > system > system update, and check for any available updates.

    -

    To update the game, go to Google Play Store or App Store, and search for PUBG Mobile. Tap on update if there is a new version available.

    -

    Benefits of downloading maps PUBG Mobile

    -

    Benefit 1: Explore new locations and scenarios

    -

    One of the main benefits of downloading maps PUBG Mobile is that you can explore new locations and scenarios. Each map has its own unique features and challenges, such as different terrain, weather, loot, vehicles, and secrets. For example, you can experience a desert storm in Miramar, a snowstorm in Vikendi, a volcanic eruption in Livik, or a subway system in Metro Royale.

    -

    By exploring new locations and scenarios, you can discover new strategies and tactics, as well as have more fun and excitement with your friends.

    -

    Benefit 2: Enhance your gameplay and skills

    -

    Another benefit of downloading maps PUBG Mobile is that you can enhance your gameplay and skills. Each map requires different skills and techniques, such as sniping, close combat, stealth, driving, or survival. For example, you can practice your sniping skills in Miramar, your close combat skills in Sanhok, your stealth skills in Vikendi, your driving skills in Karakin, or your survival skills in Metro Royale.

    -

    By enhancing your gameplay and skills, you can improve your chances of winning matches, as well as earn more rewards and achievements.

    -

    Benefit 3: Have more fun and excitement with your friends

    -

    The last benefit of downloading maps PUBG Mobile is that you can have more fun and excitement with your friends. Each map offers different modes and events, such as classic, arcade, arena, or special modes. You can also customize your own matches with different settings, such as map, mode, team size, weather, and more. You can invite your friends to join your matches, or join other matches hosted by other players. You can also chat with your friends using voice or text messages, and share your highlights and screenshots with them.

    -

    By having more fun and excitement with your friends, you can strengthen your bond and teamwork, as well as make new friends and memories.

    -

    Conclusion

    -

    Summary of the main points

    -

    In conclusion, downloading maps PUBG Mobile is a great way to get more content and fun from the game. You can download maps PUBG Mobile by following these simple steps:

    -
      -
    • Go to settings and select download
    • -
    • Choose the maps and resources you want to download
    • -
    • Wait for the download to complete and enjoy the game
    • -
    -

    You can also follow these tips and tricks to make the process easier:

    -
      -
    • Make sure you have enough storage space and a stable Wi-Fi connection
    • -
    • Save the maps to your device and not on an SD card
    • -
    • Update your device's operating system and the game regularly
    • -
    -

    By downloading maps PUBG Mobile, you can enjoy these benefits:

    -
      -
    • Explore new locations and scenarios
    • -
    • Enhance your gameplay and skills
    • -
    • Have more fun and excitement with your friends
    • -
    -

    Call to action and final thoughts

    -

    If you are a fan of PUBG Mobile, you should definitely download maps PUBG Mobile to get the most out of the game. You will not regret it, as you will discover new ways to play and have fun. You will also improve your performance and rank in the game, as well as impress your friends with your skills and knowledge.

    -

    So what are you waiting for? Download maps PUBG Mobile today and enjoy the ultimate battle royale experience!

    -

    Frequently Asked Questions (FAQs)

    -

    Q: How many maps are there in PUBG Mobile?

    -

    A: There are currently seven maps in PUBG Mobile: Erangel, Miramar, Sanhok, Vikendi, Livik, Karakin, and Metro Royale. Each map has its own size, terrain, weather, loot, vehicles, and secrets.

    -

    Q: How can I change the map in PUBG Mobile?

    -

    A: You can change the map in PUBG Mobile by tapping on the map icon on the top left corner of the mode selection screen. You can then select the map you want to play from the list of available maps. You can also select random map if you want to play on a random map.

    -

    Q: Which map is the best in PUBG Mobile?

    -

    A: There is no definitive answer to this question, as different maps suit different players' preferences and styles. However, some of the most popular maps in PUBG Mobile are Erangel (the original map), Sanhok (the smallest map), Vikendi (the snow map), and Livik (the newest map).

    -

    Q: How can I delete a map in PUBG Mobile?

    -

    A: You can delete a map in PUBG Mobile by going to settings > download > resource pack management. Here you can see the list of downloaded resource packs. To delete a resource pack, simply tap on the delete button next to it. You can also delete all resource packs at once by tapping on delete all.

    -

    Q: How often do new maps come out in PUBG Mobile?

    -

    A: There is no fixed schedule for new maps in PUBG Mobile, but usually there is a new map every few months. The latest map in PUBG Mobile is Karakin, which was released in April 2021.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Tekken 3 APK and Fight Your Way to Victory on Android.md b/spaces/congsaPfin/Manga-OCR/logs/Download Tekken 3 APK and Fight Your Way to Victory on Android.md deleted file mode 100644 index 57ce50c769e6a53b9e507e7101f4d62fd986a0a8..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Tekken 3 APK and Fight Your Way to Victory on Android.md +++ /dev/null @@ -1,138 +0,0 @@ - -

    Download Tekken 3 APK Game: The Best Fighting Game for Android

    -

    Do you love fighting games? Do you want to relive the classic arcade game that has captivated millions of players around the world? If yes, then you should download Tekken 3 APK game on your android device today. Tekken 3 is one of the best fighting games of all time, and now you can play it on your mobile phone with ease. In this article, we will tell you everything you need to know about Tekken 3, how to download and install it, what are its features and benefits, and how to master the game and win every fight. Let's get started!

    -

    How to Download and Install Tekken 3 APK on Your Android Device?

    -

    Tekken 3 was originally developed by Namco for the PlayStation console in 1997. It was not officially launched for the mobile app version, but you can still get it via an APK file and easily play it with an external or built-in phone emulator app. An APK file is a compressed file that contains all the data and code of an android application. You can download it from various sources on the internet, such as [APKCombo](^1^), [AndroidAPKs](^2^), or [Tekken](^3^). Here are the steps to download and install Tekken 3 APK on your android device:

    -

    download tekken 3 apk game


    Download Zip ❤❤❤ https://urlca.com/2uOdd4



    -
      -
    • First, you need to enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.
    • -
    • Next, you need to download an emulator app that can run PlayStation games on your device. There are many options available, such as ePSXe, FPse, or RetroArch. You can find them on Google Play Store or other websites.
    • -
    • Then, you need to download the Tekken 3 APK file from one of the links mentioned above. Make sure you choose a reliable and safe source.
    • -
    • After that, you need to locate the downloaded file on your device and tap on it to install it.
    • -
    • Finally, you need to open the emulator app and load the Tekken 3 game from its menu. You may need to adjust some settings, such as graphics, sound, or controls, according to your preference.
    • -
    -

    Congratulations! You have successfully downloaded and installed Tekken 3 APK game on your android device. Now you can enjoy playing this amazing game anytime and anywhere.

    -

    What are the Features and Benefits of Playing Tekken 3 on Your Mobile Phone?

    -

    Tekken 3 is not just a simple fighting game. It is a masterpiece that has many features and benefits that make it stand out from other games in its genre. Here are some of them:

    -
      -
    • Tekken 3 has a huge roster of characters, each with their own unique style, moves, and personality. You can choose from over 20 fighters such as Jin, Heihachi, Nina, Paul, Yoshimitsu, King, Lei, and many more. You can also unlock some secret characters, such as Dr. Bosconovitch, Gon, and Tiger.
    • -
    • Tekken 3 has a variety of game modes, such as Arcade, Versus, Team Battle, Time Attack, Survival, Practice, and Tekken Force. You can play solo or with a friend, and challenge yourself with different levels of difficulty and objectives.
    • -
    • Tekken 3 has a captivating story that follows the events of the previous games and introduces new characters and conflicts. You can learn more about the background and motivations of each fighter by completing their story mode.
    • -
    • Tekken 3 has a lot of fun and entertainment value that will keep you hooked for hours. You can enjoy the fast-paced and thrilling action, the stunning graphics and sound effects, the smooth and responsive controls, and the addictive gameplay.
    • -
    -

    As you can see, Tekken 3 is a game that has something for everyone. Whether you are a fan of fighting games or not, you will surely find something to love about this game.

    -

    Tekken 3 Gameplay: How to Select Characters and Enter Matches

    -

    Now that you have downloaded and installed Tekken 3 APK game on your android device, you may be wondering how to play it. Don't worry, we will guide you through the basics of the gameplay and help you get started. Here are some tips on how to select characters and enter matches:

    -
      -
    • To select a character, you need to go to the main menu and choose the game mode you want to play. Then, you will see a screen with all the available characters. You can scroll left or right to see more options. To select a character, simply tap on their portrait. You can also change their costume by tapping on the square button below their portrait.
    • -
    • To enter a match, you need to select your opponent after choosing your character. You can either play against the computer or another player. If you want to play against the computer, you can choose from four difficulty levels: Easy, Medium, Hard, or Very Hard. If you want to play against another player, you need to connect your device with another device via Bluetooth or Wi-Fi. Then, you can choose from two options: Single Match or Team Battle.
    • -
    • To start a match, you need to tap on the Start button after selecting your opponent. Then, you will see a loading screen with some tips and hints. After that, you will enter the match screen where you can see your character and your opponent's character on opposite sides of the screen. You can also see your health bars, your timer, and your score at the top of the screen.
    • -
    -

    That's it! You are now ready to play Tekken 3 on your android device. Have fun!

    The Controls and Interface of Tekken 3 on Android

    -

    One of the most important aspects of playing Tekken 3 on your android device is to learn the controls and interface of the game. You need to know how to move, attack, defend, and perform special moves with your character. You also need to know how to navigate the menus and options of the game. Here are some tips on how to use the controls and interface of Tekken 3 on android:

    -

    download tekken 3 apk game for android
    -download tekken 3 apk game free
    -download tekken 3 apk game offline
    -download tekken 3 apk game full version
    -download tekken 3 apk game mod
    -download tekken 3 apk game with bios
    -download tekken 3 apk game highly compressed
    -download tekken 3 apk game latest version
    -download tekken 3 apk game from apkpure
    -download tekken 3 apk game for pc
    -download tekken 3 apk game for ios
    -download tekken 3 apk game with cheats
    -download tekken 3 apk game no emulator
    -download tekken 3 apk game unlimited money
    -download tekken 3 apk game in hd
    -download tekken 3 apk game original
    -download tekken 3 apk game by bandai namco
    -download tekken 3 apk game for mobile
    -download tekken 3 apk game without internet
    -download tekken 3 apk game easy install
    -download tekken 3 apk game best graphics
    -download tekken 3 apk game all characters unlocked
    -download tekken 3 apk game direct link
    -download tekken 3 apk game fast and secure
    -download tekken 3 apk game new update
    -download tekken 3 apk game old version
    -download tekken 3 apk game androidapks.com[^2^]
    -download tekken 3 apk game banafshedev.com[^1^]
    -download tekken 3 apk game ir.behbahan.com[^3^]
    -download tekken 3 apk game epsxe emulator
    -download tekken 3 apk game play store
    -download tekken 3 apk game multiplayer mode
    -download tekken 3 apk game android oreo
    -download tekken 3 apk game android pie
    -download tekken 3 apk game android q
    -download tekken 3 apk game android r
    -download tekken 3 apk game android s
    -download tekken 3 apk game android t
    -download tekken 3 apk game android u
    -download tekken 3 apk game android v
    -download tekken 3 apk game android w
    -download tekken 3 apk game android x
    -download tekken 3 apk game android y
    -download tekken 3 apk game android z

    -
      -
    • To move your character, you need to use the directional pad on the left side of the screen. You can move up, down, left, or right by tapping or sliding on the pad. You can also dash or sidestep by double-tapping or sliding quickly on the pad.
    • -
    • To attack your opponent, you need to use the four buttons on the right side of the screen. Each button corresponds to a limb of your character: left punch, right punch, left kick, and right kick. You can tap or hold the buttons to perform different attacks. You can also combine the buttons to perform combos and special moves.
    • -
    • To defend yourself, you need to use the block button on the bottom center of the screen. You can tap or hold the button to block high or mid attacks from your opponent. You can also release the button and press down on the directional pad to block low attacks.
    • -
    • To perform special moves, you need to use specific combinations of buttons and directions. Each character has their own set of special moves that you can learn by checking their move list in the pause menu. Some special moves require timing, precision, and skill to execute.
    • -
    • To navigate the menus and options of the game, you need to use the touch screen or the back button on your device. You can swipe left or right to scroll through the menus and options. You can tap on an option to select it or go back. You can also access the pause menu by tapping on the pause button on the top right corner of the screen.
    • -
    -

    These are the basic controls and interface of Tekken 3 on android. You may need some practice and experimentation to get used to them and master them. You can also customize them according to your preference in the settings menu.

    The Speed and Smoothness of the Gameplay

    -

    One of the most impressive aspects of playing Tekken 3 on your android device is the speed and smoothness of the gameplay. You will be amazed by how fast and fluid the game runs on your phone, without any lag or glitches. You will feel like you are playing on a real arcade machine, with the same level of excitement and adrenaline. Here are some tips on how to enjoy the speed and smoothness of the gameplay:

    -
      -
    • To ensure the optimal performance of the game, you need to have a compatible device that meets the minimum requirements of the game. You can check the compatibility of your device by visiting the website of the emulator app that you are using. You can also adjust some settings, such as frame rate, resolution, or sound quality, to improve the speed and smoothness of the game.
    • -
    • To experience the full potential of the game, you need to play it in full screen mode. You can do this by tapping on the full screen button on the top left corner of the screen. This will remove any distractions and give you a better view of the game.
    • -
    • To appreciate the fast and fluid movements and reactions of the characters, you need to pay attention to their animations and expressions. You will notice how each character has their own unique style, personality, and emotion. You will also see how they react to different situations, such as getting hit, winning, or losing. You will feel more immersed and connected to the game.
    • -
    -

    These are some tips on how to enjoy the speed and smoothness of the gameplay. You will be surprised by how much fun and satisfaction you can get from playing Tekken 3 on your android device.

    Tekken 3 Graphics: How to Enjoy Arcade Quality Graphics on Your Phone

    -

    Another amazing aspect of playing Tekken 3 on your android device is the graphics. You will be blown away by the arcade quality graphics that the game offers, even on a small screen. You will see every detail and color of the characters, the stages, and the effects. You will hear every sound and music of the game, creating a realistic and immersive atmosphere. Here are some tips on how to enjoy the graphics of Tekken 3 on your phone:

    -
      -
    • To experience the best graphics of the game, you need to have a device that has a high-resolution screen and a powerful processor. You can check the specifications of your device by going to Settings > About Phone. You can also enhance the graphics of the game by changing some settings, such as texture quality, anti-aliasing, or filtering, in the emulator app that you are using.
    • -
    • To appreciate the beauty and diversity of the game, you need to explore the different stages and backgrounds that the game offers. You will see a variety of locations, such as temples, forests, cities, islands, and more. You will also see different weather and lighting effects, such as rain, snow, day, or night. You will feel like you are traveling around the world with your favorite characters.
    • -
    • To enjoy the no-add-on required feature of the game, you need to know that Tekken 3 does not require any additional files or data to run on your device. You only need to download and install the APK file and the emulator app, and you are good to go. You do not need to worry about any compatibility issues or storage space problems.
    • -
    -

    These are some tips on how to enjoy the graphics of Tekken 3 on your phone. You will be amazed by how much quality and variety the game delivers, even on a mobile platform.

    The Amazing Visual Experience of Watching Someone Else Play the Game

    -

    One of the most enjoyable aspects of playing Tekken 3 on your android device is the amazing visual experience of watching someone else play the game. You will be entertained and impressed by the skills and strategies of other players, whether they are your friends or strangers. You will also learn a lot from their moves and mistakes, and improve your own game. Here are some tips on how to watch someone else play Tekken 3 on your phone:

    -
      -
    • To watch someone else play the game, you need to have a device that has a screen mirroring or casting feature. You can check if your device has this feature by going to Settings > Display > Cast or Screen Mirroring. You can also use an external device, such as a Chromecast, a Roku, or an Apple TV, to connect your phone to a bigger screen, such as a TV or a monitor.
    • -
    • To find someone to watch, you need to use the online multiplayer option of the game. You can do this by tapping on the online button on the main menu of the game. Then, you can choose from two options: Join or Host. If you want to join an existing match, you can browse through the list of available matches and tap on one to enter. If you want to host your own match, you can create a room and invite other players to join.
    • -
    • To enjoy watching someone else play the game, you need to pay attention to their actions and reactions. You will see how they choose their characters, how they enter the matches, how they fight their opponents, and how they win or lose. You will also hear their comments and feedback, if they have a microphone or a chat feature enabled. You will feel like you are watching a live show or a tournament.
    • -
    -

    These are some tips on how to watch someone else play Tekken 3 on your phone. You will be amazed by how much fun and excitement you can get from watching other players play this awesome game.

    Tekken 3 Tips and Tricks: How to Master the Game and Win Every Fight

    -

    The last aspect of playing Tekken 3 on your android device is the tips and tricks. You will want to know how to master the game and win every fight, whether you are a beginner or an expert. You will want to learn the best characters and combos, the hidden modes and secrets, and the online multiplayer option. Here are some tips and tricks on how to master Tekken 3 and win every fight:

    -
      -
    • To choose the best characters and combos, you need to experiment with different fighters and find the ones that suit your style and preference. You can also check their move list and practice their moves in the practice mode. Some of the most popular and powerful characters in Tekken 3 are Jin, Heihachi, Paul, Nina, Law, Hwoarang, and Eddy. Some of the most effective and easy combos in Tekken 3 are Jin's 1,2,3,4, Paul's qcf+2, Nina's df+1,2, Law's b+2,1, Hwoarang's 3~4,4,4,4, and Eddy's b+1+2.
    • -
    • To unlock the hidden modes and secrets, you need to complete certain tasks or achievements in the game. You can unlock new characters by beating them in the arcade mode or by playing a certain number of matches. You can unlock new modes by completing the arcade mode or by playing the Tekken Force mode. Some of the hidden modes and secrets in Tekken 3 are Tekken Ball mode, Theater mode, Gon mode, Tiger mode, and Alternate costumes.
    • -
    • To play online multiplayer, you need to have a stable internet connection and a compatible device. You can play online multiplayer by tapping on the online button on the main menu of the game. Then, you can choose from two options: Join or Host. If you want to join an existing match, you can browse through the list of available matches and tap on one to enter. If you want to host your own match, you can create a room and invite other players to join. You can also chat with other players and send them messages or emojis.
    • -
    -

    These are some tips and tricks on how to master Tekken 3 and win every fight. You will be amazed by how much skill and strategy you can develop by playing this awesome game.

    -

    Conclusion: Why You Should Download Tekken 3 APK Game Today

    -

    In conclusion, Tekken 3 is one of the best fighting games of all time, and you should download it on your android device today. You will be able to enjoy playing this classic arcade game on your mobile phone with ease. You will be able to experience the following benefits:

    -
      -
    • You will be able to download and install Tekken 3 APK game on your android device with simple steps.
    • -
    • You will be able to play Tekken 3 with a huge roster of characters, a variety of game modes, a captivating story, and a lot of fun and entertainment value.
    • -
    • You will be able to enjoy Tekken 3 with arcade quality graphics, speed and smoothness of gameplay, and amazing visual experience of watching someone else play the game.
    • -
    • You will be able to master Tekken 3 with tips and tricks on how to choose the best characters and combos, unlock hidden modes and secrets, and play online multiplayer.
    • -
    -

    So what are you waiting for? Download Tekken 3 APK game today and enjoy playing this amazing game anytime and anywhere. You will not regret it!

    -

    FAQs

    -

    Here are some frequently asked questions about Tekken 3 APK game:

    -
      -
    • Q: Is Tekken 3 APK game safe to download?
    • -
    • A: Yes, Tekken 3 APK game is safe to download as long as you choose a reliable and trusted source. You can also scan the file with an antivirus app before installing it.
    • -
    • Q: Is Tekken 3 APK game compatible with all android devices?
    • -
    • A: No, Tekken 3 APK game may not be compatible with some android devices that do not meet the minimum requirements of the game or the emulator app. You can check the compatibility of your device by visiting the website of the emulator app that you are using.
    • -
    • Q: Is Tekken 3 APK game free to play?
    • -
    • A: Yes, Tekken 3 APK game is free to play as long as you have downloaded and installed it on your device. You do not need to pay any fees or charges to play it.
    • -
    • Q: Is Tekken 3 APK game legal to play?
    • -
    • A: A: It depends on the laws and regulations of your country or region. Tekken 3 is a copyrighted game that belongs to Namco and Sony. You may need to obtain a license or permission to play it legally. You may also need to own the original game or console to play it legally. You should consult a legal expert before playing Tekken 3 APK game.
    • -
    • Q: How can I contact the developer or the support team of Tekken 3 APK game?
    • -
    • A: You can contact the developer or the support team of Tekken 3 APK game by visiting their official website or social media pages. You can also send them an email or a message with your feedback, suggestions, or complaints.
    • -
    -

    I hope you found this article helpful and informative. If you have any questions or comments, please feel free to leave them below. I would love to hear from you. Thank you for reading and happy gaming!

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download TurboMiner - BTC Cloud Mining APK and Start Earning Crypto.md b/spaces/congsaPfin/Manga-OCR/logs/Download TurboMiner - BTC Cloud Mining APK and Start Earning Crypto.md deleted file mode 100644 index 17027efa49d4589417751fef22ccd1687614ae6b..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download TurboMiner - BTC Cloud Mining APK and Start Earning Crypto.md +++ /dev/null @@ -1,185 +0,0 @@ - -

    Bitcoin Mining Mod APK Download: What You Need to Know

    -

    Bitcoin mining is one of the most popular and profitable ways to earn cryptocurrency. However, it is also a complex and resource-intensive process that requires specialized hardware, software, and electricity. That's why some people resort to using mod APKs, which are modified versions of Android applications that claim to offer enhanced features and functions for Bitcoin mining. But are these mod APKs safe and effective? How can you download and install them on your device? And what are the best ones available in the market? In this article, we will answer these questions and more, as we explore everything you need to know about Bitcoin mining mod APK download.

    -

    What is Bitcoin Mining and How Does It Work?

    -

    Before we dive into the topic of mod APKs, let's first understand what Bitcoin mining is and how it works. Bitcoin mining is the process of adding transaction records to Bitcoin's public ledger of past transactions or blockchain. This ledger of past transactions is called the block chain as it is a chain of blocks. The block chain serves to confirm transactions to the rest of the network as having taken place.

    -

    bitcoin mining mod apk download


    Download File ✔✔✔ https://urlca.com/2uO8nc



    -

    The Basics of Bitcoin Mining

    -

    Bitcoin mining is performed using sophisticated hardware that solves an extremely complex computational math problem. The first computer to find the solution to the problem receives the next block of bitcoins and the process begins again. Cryptocurrency mining is painstaking, costly, and only sporadically rewarding. Nonetheless, mining has a magnetic appeal for many investors who are interested in cryptocurrency because of the fact that miners receive rewards for their work with crypto tokens.

    -

    The Challenges and Rewards of Bitcoin Mining

    -

    Bitcoin mining is not easy, as it requires a lot of computing power, time, and energy. The difficulty of the math problem adjusts automatically depending on how many miners are participating in the network, to maintain a steady rate of one block every 10 minutes. As more miners join the network, the difficulty increases, making it harder to find a valid solution. As less miners participate, the difficulty decreases, making it easier to mine bitcoins.

    -

    The reward for finding a new block also changes over time, as part of Bitcoin's protocol. When Bitcoin was first launched in 2009, the reward was 50 bitcoins per block. However, every 210,000 blocks (approximately every four years), the reward halves. This means that in 2020, the reward was reduced from 12.5 bitcoins to 6.25 bitcoins per block. This process will continue until the maximum supply of 21 million bitcoins is reached, which is expected to happen around the year 2140.

    -

    Despite these challenges, many people are still attracted to Bitcoin mining because of its potential rewards. According to Investopedia[^

    What is a Mod APK and Why Do People Use It?

    -

    Now that we have a basic understanding of Bitcoin mining, let's move on to the topic of mod APKs. A mod APK is a modified version of an original Android application that has been altered by a third-party developer to provide additional features and functions that are not available in the official version. For example, a mod APK may offer unlimited coins, gems, lives, or other resources in a game, or remove ads, unlock premium content, or bypass restrictions in an app.

    -

    The Definition and Purpose of a Mod APK

    -

    An APK (short for Android Package Kit) is the file format used by Android devices to distribute and install applications. It contains all the elements that an app needs to run on your device, such as the code, resources, assets, certificates, and manifest. A mod APK is simply an APK that has been modified by someone other than the original developer, usually for the purpose of enhancing the user experience or gaining an unfair advantage in a game or app.

    -

    The Risks and Benefits of Using a Mod APK

    -

    Using a mod APK may seem tempting, especially if you want to enjoy more features and functions in your favorite apps or games. However, there are also some risks and drawbacks that you should be aware of before downloading and installing a mod APK on your device. Here are some of the pros and cons of using a mod APK:

    -
      -
    • Pros: -
        -
      • You can access more features and functions that are not available in the official version of the app or game.
      • -
      • You can save money by getting free resources, premium content, or in-app purchases without spending real money.
      • -
      • You can customize your app or game according to your preferences and needs.
      • -
      -
    • -
    • Cons: -
        -
      • You may expose your device to malware, viruses, spyware, or other harmful software that can damage your device or steal your personal information.
      • -
      • You may violate the terms and conditions of the original app or game developer and risk getting banned or suspended from using their services.
      • -
      • You may compromise the quality and performance of the app or game, as the mod APK may not be compatible with your device or the latest updates.
      • -
      -
    • -

    How to Download and Install a Bitcoin Mining Mod APK

    -

    If you have decided to use a Bitcoin mining mod APK, you need to know how to download and install it on your device. There are many websites and platforms that offer mod APKs for various apps and games, but not all of them are trustworthy and reliable. Some of them may contain malware or viruses that can harm your device or steal your data. Therefore, you need to be careful and cautious when choosing a source for your mod APK download.

    -

    The Steps to Download a Bitcoin Mining Mod APK

    -

    Here are the general steps to download a Bitcoin mining mod APK from a reputable website:

    -

    bitcoin mining simulator mod apk download
    -bitcoin mining farm mod apk download
    -bitcoin mining game mod apk download
    -bitcoin mining tycoon mod apk download
    -bitcoin mining hack mod apk download
    -bitcoin mining software mod apk download
    -bitcoin mining machine mod apk download
    -bitcoin mining pool mod apk download
    -bitcoin mining robot mod apk download
    -bitcoin mining online mod apk download
    -bitcoin mining pro mod apk download
    -bitcoin mining free mod apk download
    -bitcoin mining generator mod apk download
    -bitcoin mining booster mod apk download
    -bitcoin mining manager mod apk download
    -bitcoin mining master mod apk download
    -bitcoin mining idle mod apk download
    -bitcoin mining clicker mod apk download
    -bitcoin mining adventure mod apk download
    -bitcoin mining empire mod apk download
    -bitcoin mining cloud mod apk download
    -bitcoin mining calculator mod apk download
    -bitcoin mining wallet mod apk download
    -bitcoin mining reward mod apk download
    -bitcoin mining network mod apk download
    -bitcoin mining device mod apk download
    -bitcoin mining system mod apk download
    -bitcoin mining server mod apk download
    -bitcoin mining platform mod apk download
    -bitcoin mining tool mod apk download
    -bitcoin mining app mod apk download
    -bitcoin mining website mod apk download
    -bitcoin mining guide mod apk download
    -bitcoin mining tutorial mod apk download
    -bitcoin mining strategy mod apk download
    -bitcoin mining tips mod apk download
    -bitcoin mining tricks mod apk download
    -bitcoin mining secrets mod apk download
    -bitcoin mining cheats mod apk download
    -bitcoin mining reviews mod apk download

    -
      -
    1. Find a website that offers Bitcoin mining mod APKs and has positive reviews and ratings from other users.
    2. -
    3. Select the mod APK that you want to download and check its features, compatibility, and requirements.
    4. -
    5. Click on the download button or link and wait for the file to be downloaded on your device.
    6. -
    7. Locate the downloaded file in your device's file manager or downloads folder and tap on it to open it.
    8. -
    9. Allow the installation of unknown sources if prompted by your device's security settings.
    10. -
    11. Follow the instructions on the screen to install the mod APK on your device.
    12. -
    13. Launch the app or game and enjoy the modded features and functions.
    14. -
    -

    The Precautions to Take Before Installing a Bitcoin Mining Mod APK

    -

    Before you install a Bitcoin mining mod APK on your device, you should take some precautions to ensure your safety and security. Here are some tips to follow before installing a mod APK:

    -
      -
    • Backup your device's data in case something goes wrong during the installation process or the mod APK causes any issues or errors.
    • -
    • Scan the mod APK file with an antivirus or anti-malware software to detect and remove any potential threats or infections.
    • -
    • Read the permissions and access requests that the mod APK asks for and only grant them if they are necessary and reasonable for the app or game.
    • -
    • Disable any automatic updates or syncs for the app or game that you are modding, as they may overwrite or delete the modded features and functions.
    • -
    • Use a VPN or proxy service to hide your IP address and location when using the mod APK, as some apps or games may track your online activity and ban you from their services.
    • -

    What are the Best Bitcoin Mining Mod APKs Available?

    -

    Now that you know how to download and install a Bitcoin mining mod APK, you may be wondering which one to choose from the many options available. There are hundreds of mod APKs that claim to offer Bitcoin mining features and functions, but not all of them are reliable and effective. Some of them may not work properly, have bugs or glitches, or even contain malware or viruses. Therefore, you need to do some research and comparison before deciding on the best Bitcoin mining mod APK for your device.

    -

    A List of Top Bitcoin Mining Mod APKs with Features and Ratings

    -

    To help you with your decision, we have compiled a list of some of the top Bitcoin mining mod APKs that you can download and use on your device. These mod APKs have been tested and verified by other users and have positive reviews and ratings. They also offer various features and functions that can enhance your Bitcoin mining experience and performance. Here are some of the best Bitcoin mining mod APKs that you can try:

    -
      -
    • Bitcoin Miner - Earn Free BTC Mod APK: This mod APK allows you to mine bitcoins on your device without any special hardware or software. It uses your device's CPU and GPU power to solve complex math problems and earn bitcoins. You can also withdraw your earnings to your Bitcoin wallet or exchange them for other cryptocurrencies. The mod APK also offers features such as daily bonuses, referral rewards, cloud mining, and offline mining. You can download it from .
    • -
    • Bitcoin Mining Simulator Mod APK: This mod APK is a fun and educational game that simulates the process of Bitcoin mining. You can create your own virtual mining farm, upgrade your equipment, hire workers, and compete with other players. You can also earn bitcoins by completing tasks, watching ads, or inviting friends. The mod APK also offers features such as realistic graphics, sound effects, leaderboards, and achievements. You can download it from .
    • -
    • Bitcoin Billionaire Mod APK: This mod APK is a casual and idle game that lets you become a Bitcoin billionaire. You can tap on the screen to mine bitcoins, invest in various businesses, upgrade your technology, and customize your character. You can also travel through time, meet famous characters, and unlock new achievements. The mod APK also offers features such as unlimited money, hyperbits, gems, and auto-miner. You can download it from .
    • -
    -

    A Comparison Table of the Best Bitcoin Mining Mod APKs

    -

    To make it easier for you to compare the features and functions of the best Bitcoin mining mod APKs, we have created a table that summarizes the main aspects of each mod APK. You can use this table to decide which mod APK suits your needs and preferences the best. Here is the comparison table of the best Bitcoin mining mod APKs:

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Mod APKTypeFeaturesRatings
    Bitcoin Miner - Earn Free BTC Mod APKReal Bitcoin Mining App- Mine bitcoins on your device
    - Withdraw or exchange your earnings
    - Get daily bonuses and referral rewards
    - Use cloud mining and offline mining
    4.5/5 stars on Google Play Store
    Bitcoin Mining Simulator Mod APKSimulation Game- Create your own virtual mining farm
    - Upgrade your equipment and hire workers
    - Compete with other players
    - Earn bitcoins by completing tasks, watching ads, or inviting friends
    4.2/5 stars on Google Play Store
    Bitcoin Billionaire Mod APKCasual Idle Game- Tap on the screen to mine bitcoins
    - Invest in various businesses and upgrade your technology
    - Customize your character and travel through time
    - Unlock new achievements and meet famous characters
    - Get unlimited money, hyperbits, gems, and auto-miner
    4.6/5 stars on Google Play Store

    What are the Alternatives to Bitcoin Mining Mod APKs?

    -

    While Bitcoin mining mod APKs may seem like an easy and convenient way to earn bitcoins, they are not the only option available. In fact, there are some alternatives that may be more eco-friendly and legitimate than using mod APKs. These alternatives may not offer the same features and functions as mod APKs, but they may also have some advantages and benefits that mod APKs do not. Let's take a look at some of the alternatives to Bitcoin mining mod APKs and how they compare.

    -

    The Drawbacks of Bitcoin Mining Mod APKs

    -

    Before we explore the alternatives, let's first review some of the drawbacks of using Bitcoin mining mod APKs. As we have discussed earlier, using mod APKs can pose some risks and challenges, such as:

    -
      -
    • Malware or virus infection: Some mod APKs may contain malicious software that can damage your device or steal your data. You may also expose your device to hackers or cybercriminals who can access your personal information or Bitcoin wallet.
    • -
    • Ban or suspension: Some mod APKs may violate the terms and conditions of the original app or game developer and risk getting banned or suspended from using their services. You may also lose your progress, achievements, or rewards if you get caught using a mod APK.
    • -
    • Performance or compatibility issues: Some mod APKs may not work properly, have bugs or glitches, or even crash your device. They may also not be compatible with your device or the latest updates of the app or game.
    • -
    • Environmental impact: Bitcoin mining mod APKs use a lot of electricity and generate a lot of heat, which can contribute to global warming and climate change. According to a study by Cambridge University, Bitcoin mining consumes more electricity than some countries, such as Argentina, Sweden, or Pakistan.
    • -
    -

    The Eco-Friendly and Legitimate Options for Bitcoin Mining

    -

    If you want to avoid these drawbacks and still earn bitcoins, you may want to consider some of the eco-friendly and legitimate options for Bitcoin mining. These options may not be as easy or convenient as using mod APKs, but they may also offer some benefits and advantages that mod APKs do not. Here are some of the eco-friendly and legitimate options for Bitcoin mining:

    -
      -
    • Cloud mining: Cloud mining is a service that allows you to rent computing power from a remote data center that mines bitcoins on your behalf. You do not need to buy or maintain any hardware or software, as everything is done by the cloud mining provider. You only need to pay a fee for the service and receive your share of the mined bitcoins. Cloud mining is more eco-friendly than using mod APKs, as it reduces the energy consumption and heat generation of your device. However, cloud mining also has some drawbacks, such as high fees, low profits, fraud risks, and lack of control.
    • -
    • Green energy mining: Green energy mining is a method that uses renewable energy sources, such as solar, wind, hydro, or geothermal power, to mine bitcoins. This way, you can reduce the environmental impact of Bitcoin mining and save money on electricity bills. You can either buy or build your own green energy system, or join a green energy mining pool that shares the costs and rewards of using renewable energy. Green energy mining is more eco-friendly than using mod APKs, as it reduces the carbon footprint and greenhouse gas emissions of Bitcoin mining. However, green energy mining also has some drawbacks, such as high initial costs, technical difficulties, weather dependence, and location limitations.
    • -
    • Faucets: Faucets are websites or apps that give away small amounts of bitcoins for free in exchange for completing tasks, watching ads, or playing games. You do not need to mine bitcoins yourself, as you only need to visit the faucet website or app and claim your reward. Faucets are more eco-friendly than using mod APKs, as they do not use any electricity or generate any heat from your device. However, faucets also have some drawbacks, such as low payouts, time limits, spam risks, and scam risks.
    • -
    -

    Conclusion

    -

    Bitcoin mining is one of the most popular and profitable ways to earn cryptocurrency. However, it is also a complex and resource-intensive process that requires specialized hardware, software, and electricity. That's why some people resort to using mod APKs, which are modified versions of Android applications that claim to offer enhanced features and functions for Bitcoin mining.

    -

    A Summary of the Main Points of the Article

    -

    In this article, we have covered everything you need to know about Bitcoin mining mod APK download. We have explained what Bitcoin mining is and how it works, what a mod APK is and why people use it , how to download and install a Bitcoin mining mod APK, what are the best Bitcoin mining mod APKs available, and what are the alternatives to Bitcoin mining mod APKs. We have also provided a list of top Bitcoin mining mod APKs with features and ratings, a comparison table of the best Bitcoin mining mod APKs, and some precautions to take before installing a Bitcoin mining mod APK.

    -

    A Call to Action for the Readers

    -

    We hope that this article has been helpful and informative for you. If you are interested in trying out Bitcoin mining mod APKs, we suggest that you follow the steps and tips that we have provided in this article. However, we also advise that you be careful and cautious when using mod APKs, as they may pose some risks and challenges for your device and your security. You may also want to consider some of the eco-friendly and legitimate options for Bitcoin mining that we have discussed in this article.

    -

    If you have any questions, comments, or feedback about this article or Bitcoin mining mod APKs, please feel free to leave them below. We would love to hear from you and help you with your Bitcoin mining journey. Thank you for reading and happy mining!

    -

    FAQs

    -

    Here are some of the frequently asked questions about Bitcoin mining mod APKs and their answers:

    -

    Q1. Is Bitcoin mining legal?

    -

    A1. The legality of Bitcoin mining depends on your location and the laws and regulations of your country or region. In general, Bitcoin mining is legal in most countries, as long as you follow the rules and pay the taxes. However, some countries or regions may prohibit or restrict Bitcoin mining, such as China, Iran, Venezuela, or Russia. Therefore, you should check the legal status of Bitcoin mining in your area before engaging in it.

    -

    Q2. How much can I earn from Bitcoin mining mod APKs?

    -

    A2. The amount of money that you can earn from Bitcoin mining mod APKs depends on several factors, such as the difficulty of the math problem, the reward for finding a new block, the price of Bitcoin, the fees that you pay for the service or the electricity, and the features and functions of the mod APK. In general, Bitcoin mining mod APKs may not offer a lot of earnings, as they may have low performance, high fees, or limited resources. You may also lose money if the mod APK is fraudulent or infected with malware.

    -

    Q3. How can I tell if a Bitcoin mining mod APK is safe and reliable?

    -

    A3. There is no definitive way to tell if a Bitcoin mining mod APK is safe and reliable, as some of them may look legitimate but contain hidden threats or defects. However, there are some signs and indicators that you can look for to determine the quality and credibility of a mod APK, such as:

    -
      -
    • The source: You should only download mod APKs from reputable and trustworthy websites or platforms that have positive reviews and ratings from other users.
    • -
    • The file: You should scan the mod APK file with an antivirus or anti-malware software to detect and remove any potential threats or infections.
    • -
    • The permissions: You should read the permissions and access requests that the mod APK asks for and only grant them if they are necessary and reasonable for the app or game.
    • -
    • The feedback: You should check the feedback and comments from other users who have used the mod APK and see if they have encountered any issues or problems.
    • -
    -

    Q4. How can I optimize my Bitcoin mining performance and efficiency?

    -

    A4. There are some tips and tricks that you can use to optimize your Bitcoin mining performance and efficiency, such as:

    -
      -
    • Choose a suitable mod APK: You should choose a mod APK that matches your device's specifications, compatibility, and requirements.
    • -
    • Update your device: You should update your device's software, firmware, drivers, and security patches to ensure its optimal performance and security.
    • -
    • Close other apps: You should close other apps or programs that are running in the background or consuming your device's resources.
    • -
    • Clean your device: You should clean your device's cache, memory, storage, and battery to improve its speed and durability.
    • -
    • Cool your device: You should cool your device's temperature by using a fan, a cooler, or an air conditioner to prevent overheating or damage.
    • -
    -

    Q5. What are the future trends and developments in Bitcoin mining?

    -

    A5. Bitcoin mining is constantly evolving and changing as new technologies, innovations, and challenges emerge. Some of the future trends and developments in Bitcoin mining are:

    -
      -
    • Halving: The next halving event is expected to happen in 2024, when the reward for finding a new block will be reduced from 6.25 bitcoins to 3.125 bitcoins. This will affect the profitability and difficulty of Bitcoin mining, as well as the supply and demand of bitcoins.
    • -
    • ASICs: ASICs (short for Application-Specific Integrated Circuits) are specialized hardware devices that are designed to perform Bitcoin mining faster and more efficiently than general-purpose computers. ASICs are constantly being improved and upgraded by manufacturers and miners, resulting in higher performance and lower costs. However, ASICs also pose some challenges, such as centralization, environmental impact, and compatibility issues.
    • -
    • Lightning Network: The Lightning Network is a second-layer solution that aims to improve the scalability, speed, and efficiency of Bitcoin transactions. It allows users to create and use payment channels that bypass the blockchain and enable instant and low-cost transactions. The Lightning Network can also benefit Bitcoin mining, as it can reduce the congestion and fees on the blockchain, increase the transaction volume and revenue for miners, and enable new mining opportunities and services.
    • -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/VERIFIED Download 80 39s 90 39s Music.md b/spaces/congsaPfin/Manga-OCR/logs/VERIFIED Download 80 39s 90 39s Music.md deleted file mode 100644 index 60a8251aef0356b8ec44387c38a58788c9647653..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/VERIFIED Download 80 39s 90 39s Music.md +++ /dev/null @@ -1,37 +0,0 @@ -
    -

    How to Download 80's 90's Music for Free

    -

    If you are a fan of 80's 90's music, you might want to download some of your favorite songs and albums to enjoy them offline. Whether you want to relive the nostalgia, discover new artists, or create your own playlists, downloading 80's 90's music can be a fun and rewarding experience. However, you might also face some challenges, such as finding reliable sources, avoiding malware and viruses, and respecting the rights of the creators. In this article, we will show you how to download 80's 90's music for free from various websites and apps, as well as how to do it safely and legally.

    -

    download 80 39;s 90 39;s music


    Downloadhttps://urlca.com/2uO4HJ



    -

    The Best Websites for Downloading Free Music Online

    -

    One of the easiest ways to download 80's 90's music for free is to use websites that offer free music downloads. There are many websites that allow you to search, listen, and download music from different genres, artists, and eras. However, not all of them are trustworthy or legal. Here are some of the best websites that we recommend for downloading free music online:

    -

    Bandcamp

    -

    Bandcamp is a website that allows independent artists to upload their music and set their own prices. You can find a lot of 80's 90's music from various genres, such as rock, pop, synthwave, metal, punk, and more. To download free music from Bandcamp, you can search for tags like "Free" and "Free Download". You can also browse by genre or by curated collections. Some artists may ask you to share your email address or pay what you want for their music. You can also support them by buying their physical albums or merchandise.

    -

    DatPiff

    -

    DatPiff is a website that specializes in hip-hop and rap music. It offers free downloads of mixtapes that are uploaded by emerging and well-known artists. You can find a lot of 80's 90's hip-hop and rap music from artists like Drake, Future, Lil Wayne, Meek Mills, The Weeknd, Wiz Khalifa, and more. To download free mixtapes from DatPiff, you can browse by featured, newest, most popular, or most listened. You can also search by artist name or keyword. You may need to create an account or log in with Facebook or Twitter to access some downloads.

    -

    -

    Free Music Archive

    -

    Free Music Archive is a website that provides free access to music that can be downloaded and used in other creative works under Creative Commons licenses. You can find a variety of music from different genres and sources on Free Music Archive. You can search by curated collections, specific genres, or trending music. You can also find some 80's 90's music from large library of music videos and playlists from different genres, artists, and eras. You can find a lot of 80's 90's music on YouTube Music, such as videos by Whitney Houston, Guns N' Roses, Mariah Carey, or Backstreet Boys. To download free music from YouTube Music, you need to create an account and use the app. You can download any video or playlist that has a download button next to it. However, you need to have a YouTube Premium subscription to download music videos and playlists. You can also watch them online or create your own playlists.

    -

    The Best Tips for Downloading Free Music Safely and Legally

    -

    While downloading 80's 90's music for free can be fun and rewarding, it can also be risky and illegal. You might encounter malware and viruses that can harm your device or steal your personal information. You might also violate the copyright laws and the rights of the creators. Here are some of the best tips for downloading free music safely and legally:

    -

    Check the license and terms of use of the music before downloading

    -

    Before you download any music from any website or app, you should check the license and terms of use of the music. Some music may be free to download for personal use only, while others may require attribution or permission from the creator. Some music may be under Creative Commons licenses, which allow you to use the music for certain purposes as long as you follow the conditions. You can check the license and terms of use of the music by looking for icons, links, or descriptions on the website or app.

    -

    Use a reliable antivirus software and a VPN service to protect your device and privacy

    -

    When you download any music from any website or app, you should use a reliable antivirus software and a VPN service to protect your device and privacy. An antivirus software can scan and remove any malware or viruses that may come with the downloaded files. A VPN service can encrypt your internet connection and hide your IP address, so that no one can track or spy on your online activities. You can find many antivirus software and VPN services online, but make sure to choose ones that are trustworthy and reputable.

    -

    Support the artists by buying their music or donating to them if you like their work

    -

    When you download any music from any website or app, you should support the artists by buying their music or donating to them if you like their work. Downloading free music does not mean that you own the music or that you can use it for any purpose. The artists still deserve recognition and compensation for their work. You can support them by buying their physical albums or digital downloads, subscribing to their streaming services or channels, attending their live shows or events, or donating to their websites or platforms.

    -

    Conclusion

    -

    Downloading 80's 90's music for free can be a great way to enjoy your favorite songs and albums offline. You can find a lot of 80's 90's music from various websites and apps that offer free music downloads. However, you should also be careful and respectful when downloading free music. You should check the license and terms of use of the music before downloading, use a reliable antivirus software and a VPN service to protect your device and privacy, and support the artists by buying their music or donating to them if you like their work. By following these tips, you can download 80's 90's music for free safely and legally.

    -

    Frequently Asked Questions

    -

    Here are some of the frequently asked questions about downloading 80's 90's music for free:

    -

    Q: What is the best format for downloading free music?

    -

    A: The best format for downloading free music depends on your preference and device. Some of the common formats are MP3, WAV, FLAC, AAC, OGG, and WMA. MP3 is the most popular format because it is compatible with most devices and has a small file size. However, it also has a lower sound quality than other formats. WAV, FLAC, AAC, OGG, and WMA are other formats that have higher sound quality but larger file sizes.

    -

    Q: How can I convert downloaded music files to different formats?

    -

    A: You can convert downloaded music files to different formats by using online tools or software. Online tools are websites that allow you to upload your files and choose the output format. Some examples are Online Audio Converter, Zamzar, CloudConvert, and Convertio. Software are programs that you can install on your device and use offline. Some examples are Audacity, VLC Media Player, Freemake Audio Converter, and Any Audio Converter.

    -

    Q: How can I transfer downloaded music files to my smartphone?

    -

    A: You can transfer downloaded music files to your smartphone by using a USB cable, a Bluetooth connection, or a cloud service. A USB cable is a cord that connects your device to your computer. You can use it to copy and paste your files from your computer to your device. A Bluetooth connection is a wireless connection that allows you to send and receive files from your device to another device. You can use it to pair your device with another device and share your files. A cloud service is a website or app that allows you to store and access your files online. You can use it to upload your files from your computer to the cloud and download them from the cloud to your device.

    -

    Q: How can I edit downloaded music files to make my own remixes or mashups?

    -

    A: You can edit downloaded music files to make your own remixes or mashups by using online tools or software. Online tools are websites that allow you to upload your files and edit them online. Some examples are Soundation, AudioTool, Soundtrap, and Audiotool. Software are programs that you can install on your device and use offline. Some examples are GarageBand, FL Studio, Ableton Live, and Mixcraft.

    -

    Q: How can I find more 80's 90's music to download for free?

    -

    A: You can find more 80's 90's music to download for free by using search engines, social media, blogs, forums, or podcasts. Search engines are websites that allow you to search for keywords or phrases related to 80's 90's music. Some examples are Google, Bing, Yahoo, and DuckDuckGo. Social media are websites or apps that allow you to connect with other people who share your interests in 80's 90's music. Some examples are Facebook, Twitter, Instagram, and TikTok. Blogs are websites that provide information, opinions, or reviews about 80's 90's music. Some examples are Pitchfork, Rolling Stone, Stereogum, and NME. Forums are websites that allow you to discuss topics related to 80's 90's music with other users. Some examples are Reddit, Quora, Stack Exchange, and MusicBanter. Podcasts are audio or video programs that talk about 80's 90's music with guests or hosts. Some examples are The 80s Music Podcast, The 90s Music Podcast, The Throwback Podcast, and The Retro Hour Podcast.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Ch4 Sex Story Fifty Shades Of Grey PDTV XviD AC3 Diddls Geburtstagsgr 2021.md b/spaces/contluForse/HuggingGPT/assets/Ch4 Sex Story Fifty Shades Of Grey PDTV XviD AC3 Diddls Geburtstagsgr 2021.md deleted file mode 100644 index f84ef44cb462671eafac63d2941db1f121400f80..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Ch4 Sex Story Fifty Shades Of Grey PDTV XviD AC3 Diddls Geburtstagsgr 2021.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Ch4 Sex Story Fifty Shades Of Grey PDTV XviD AC3 diddls geburtstagsgr


    Download File ⚙⚙⚙ https://ssurll.com/2uzxkL



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/cooelf/Multimodal-CoT/timm/models/layers/adaptive_avgmax_pool.py b/spaces/cooelf/Multimodal-CoT/timm/models/layers/adaptive_avgmax_pool.py deleted file mode 100644 index ebc6ada8c5b28c7eac5785b0cc2933eb01a15d46..0000000000000000000000000000000000000000 --- a/spaces/cooelf/Multimodal-CoT/timm/models/layers/adaptive_avgmax_pool.py +++ /dev/null @@ -1,118 +0,0 @@ -""" PyTorch selectable adaptive pooling -Adaptive pooling with the ability to select the type of pooling from: - * 'avg' - Average pooling - * 'max' - Max pooling - * 'avgmax' - Sum of average and max pooling re-scaled by 0.5 - * 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim - -Both a functional and a nn.Module version of the pooling is provided. - -Hacked together by / Copyright 2020 Ross Wightman -""" -import torch -import torch.nn as nn -import torch.nn.functional as F - - -def adaptive_pool_feat_mult(pool_type='avg'): - if pool_type == 'catavgmax': - return 2 - else: - return 1 - - -def adaptive_avgmax_pool2d(x, output_size=1): - x_avg = F.adaptive_avg_pool2d(x, output_size) - x_max = F.adaptive_max_pool2d(x, output_size) - return 0.5 * (x_avg + x_max) - - -def adaptive_catavgmax_pool2d(x, output_size=1): - x_avg = F.adaptive_avg_pool2d(x, output_size) - x_max = F.adaptive_max_pool2d(x, output_size) - return torch.cat((x_avg, x_max), 1) - - -def select_adaptive_pool2d(x, pool_type='avg', output_size=1): - """Selectable global pooling function with dynamic input kernel size - """ - if pool_type == 'avg': - x = F.adaptive_avg_pool2d(x, output_size) - elif pool_type == 'avgmax': - x = adaptive_avgmax_pool2d(x, output_size) - elif pool_type == 'catavgmax': - x = adaptive_catavgmax_pool2d(x, output_size) - elif pool_type == 'max': - x = F.adaptive_max_pool2d(x, output_size) - else: - assert False, 'Invalid pool type: %s' % pool_type - return x - - -class FastAdaptiveAvgPool2d(nn.Module): - def __init__(self, flatten=False): - super(FastAdaptiveAvgPool2d, self).__init__() - self.flatten = flatten - - def forward(self, x): - return x.mean((2, 3), keepdim=not self.flatten) - - -class AdaptiveAvgMaxPool2d(nn.Module): - def __init__(self, output_size=1): - super(AdaptiveAvgMaxPool2d, self).__init__() - self.output_size = output_size - - def forward(self, x): - return adaptive_avgmax_pool2d(x, self.output_size) - - -class AdaptiveCatAvgMaxPool2d(nn.Module): - def __init__(self, output_size=1): - super(AdaptiveCatAvgMaxPool2d, self).__init__() - self.output_size = output_size - - def forward(self, x): - return adaptive_catavgmax_pool2d(x, self.output_size) - - -class SelectAdaptivePool2d(nn.Module): - """Selectable global pooling layer with dynamic input kernel size - """ - def __init__(self, output_size=1, pool_type='fast', flatten=False): - super(SelectAdaptivePool2d, self).__init__() - self.pool_type = pool_type or '' # convert other falsy values to empty string for consistent TS typing - self.flatten = nn.Flatten(1) if flatten else nn.Identity() - if pool_type == '': - self.pool = nn.Identity() # pass through - elif pool_type == 'fast': - assert output_size == 1 - self.pool = FastAdaptiveAvgPool2d(flatten) - self.flatten = nn.Identity() - elif pool_type == 'avg': - self.pool = nn.AdaptiveAvgPool2d(output_size) - elif pool_type == 'avgmax': - self.pool = AdaptiveAvgMaxPool2d(output_size) - elif pool_type == 'catavgmax': - self.pool = AdaptiveCatAvgMaxPool2d(output_size) - elif pool_type == 'max': - self.pool = nn.AdaptiveMaxPool2d(output_size) - else: - assert False, 'Invalid pool type: %s' % pool_type - - def is_identity(self): - return not self.pool_type - - def forward(self, x): - x = self.pool(x) - x = self.flatten(x) - return x - - def feat_mult(self): - return adaptive_pool_feat_mult(self.pool_type) - - def __repr__(self): - return self.__class__.__name__ + ' (' \ - + 'pool_type=' + self.pool_type \ - + ', flatten=' + str(self.flatten) + ')' - diff --git a/spaces/cooelf/Multimodal-CoT/timm/utils/cuda.py b/spaces/cooelf/Multimodal-CoT/timm/utils/cuda.py deleted file mode 100644 index 9e7bddf30463a7be7186c7def47c4e4dfb9993aa..0000000000000000000000000000000000000000 --- a/spaces/cooelf/Multimodal-CoT/timm/utils/cuda.py +++ /dev/null @@ -1,55 +0,0 @@ -""" CUDA / AMP utils - -Hacked together by / Copyright 2020 Ross Wightman -""" -import torch - -try: - from apex import amp - has_apex = True -except ImportError: - amp = None - has_apex = False - -from .clip_grad import dispatch_clip_grad - - -class ApexScaler: - state_dict_key = "amp" - - def __call__(self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False): - with amp.scale_loss(loss, optimizer) as scaled_loss: - scaled_loss.backward(create_graph=create_graph) - if clip_grad is not None: - dispatch_clip_grad(amp.master_params(optimizer), clip_grad, mode=clip_mode) - optimizer.step() - - def state_dict(self): - if 'state_dict' in amp.__dict__: - return amp.state_dict() - - def load_state_dict(self, state_dict): - if 'load_state_dict' in amp.__dict__: - amp.load_state_dict(state_dict) - - -class NativeScaler: - state_dict_key = "amp_scaler" - - def __init__(self): - self._scaler = torch.cuda.amp.GradScaler() - - def __call__(self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False): - self._scaler.scale(loss).backward(create_graph=create_graph) - if clip_grad is not None: - assert parameters is not None - self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place - dispatch_clip_grad(parameters, clip_grad, mode=clip_mode) - self._scaler.step(optimizer) - self._scaler.update() - - def state_dict(self): - return self._scaler.state_dict() - - def load_state_dict(self, state_dict): - self._scaler.load_state_dict(state_dict) diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/util.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/util.py deleted file mode 100644 index e0b217ef9adf92dd5b1fe0debcfb07d0f241a4cb..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/util.py +++ /dev/null @@ -1,98 +0,0 @@ -import random - -import numpy as np -import cv2 -import os - - -annotator_ckpts_path = os.path.join(os.path.dirname(__file__), 'ckpts') - - -def HWC3(x): - assert x.dtype == np.uint8 - if x.ndim == 2: - x = x[:, :, None] - assert x.ndim == 3 - H, W, C = x.shape - assert C == 1 or C == 3 or C == 4 - if C == 3: - return x - if C == 1: - return np.concatenate([x, x, x], axis=2) - if C == 4: - color = x[:, :, 0:3].astype(np.float32) - alpha = x[:, :, 3:4].astype(np.float32) / 255.0 - y = color * alpha + 255.0 * (1.0 - alpha) - y = y.clip(0, 255).astype(np.uint8) - return y - - -def resize_image(input_image, resolution): - H, W, C = input_image.shape - H = float(H) - W = float(W) - k = float(resolution) / min(H, W) - H *= k - W *= k - H = int(np.round(H / 64.0)) * 64 - W = int(np.round(W / 64.0)) * 64 - img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA) - return img - - -def nms(x, t, s): - x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s) - - f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8) - f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8) - f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8) - f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8) - - y = np.zeros_like(x) - - for f in [f1, f2, f3, f4]: - np.putmask(y, cv2.dilate(x, kernel=f) == x, x) - - z = np.zeros_like(y, dtype=np.uint8) - z[y > t] = 255 - return z - - -def make_noise_disk(H, W, C, F): - noise = np.random.uniform(low=0, high=1, size=((H // F) + 2, (W // F) + 2, C)) - noise = cv2.resize(noise, (W + 2 * F, H + 2 * F), interpolation=cv2.INTER_CUBIC) - noise = noise[F: F + H, F: F + W] - noise -= np.min(noise) - noise /= np.max(noise) - if C == 1: - noise = noise[:, :, None] - return noise - - -def min_max_norm(x): - x -= np.min(x) - x /= np.maximum(np.max(x), 1e-5) - return x - - -def safe_step(x, step=2): - y = x.astype(np.float32) * float(step + 1) - y = y.astype(np.int32).astype(np.float32) / float(step) - return y - - -def img2mask(img, H, W, low=10, high=90): - assert img.ndim == 3 or img.ndim == 2 - assert img.dtype == np.uint8 - - if img.ndim == 3: - y = img[:, :, random.randrange(0, img.shape[2])] - else: - y = img - - y = cv2.resize(y, (W, H), interpolation=cv2.INTER_CUBIC) - - if random.uniform(0, 1) < 0.5: - y = 255 - y - - return y < np.percentile(y, random.randrange(low, high)) diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/lib_support/src/main/java/org/tensorflow/lite/examples/classification/tflite/Classifier.java b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/lib_support/src/main/java/org/tensorflow/lite/examples/classification/tflite/Classifier.java deleted file mode 100644 index 24ec573e7d184e7d64118a723d6645fd92d6e6d9..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/lib_support/src/main/java/org/tensorflow/lite/examples/classification/tflite/Classifier.java +++ /dev/null @@ -1,376 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -package org.tensorflow.lite.examples.classification.tflite; - -import static java.lang.Math.min; - -import android.app.Activity; -import android.graphics.Bitmap; -import android.graphics.RectF; -import android.os.SystemClock; -import android.os.Trace; -import android.util.Log; -import android.view.TextureView; -import android.view.ViewStub; - -import java.io.IOException; -import java.nio.MappedByteBuffer; -import java.util.ArrayList; -import java.util.Comparator; -import java.util.List; -import java.util.Map; -import java.util.PriorityQueue; -import org.tensorflow.lite.DataType; -import org.tensorflow.lite.Interpreter; -import org.tensorflow.lite.examples.classification.tflite.Classifier.Device; -import org.tensorflow.lite.gpu.GpuDelegate; -import org.tensorflow.lite.nnapi.NnApiDelegate; -import org.tensorflow.lite.support.common.FileUtil; -import org.tensorflow.lite.support.common.TensorOperator; -import org.tensorflow.lite.support.common.TensorProcessor; -import org.tensorflow.lite.support.image.ImageProcessor; -import org.tensorflow.lite.support.image.TensorImage; -import org.tensorflow.lite.support.image.ops.ResizeOp; -import org.tensorflow.lite.support.image.ops.ResizeOp.ResizeMethod; -import org.tensorflow.lite.support.image.ops.ResizeWithCropOrPadOp; -import org.tensorflow.lite.support.image.ops.Rot90Op; -import org.tensorflow.lite.support.label.TensorLabel; -import org.tensorflow.lite.support.tensorbuffer.TensorBuffer; - -/** A classifier specialized to label images using TensorFlow Lite. */ -public abstract class Classifier { - public static final String TAG = "ClassifierWithSupport"; - - /** The model type used for classification. */ - public enum Model { - FLOAT_MOBILENET, - QUANTIZED_MOBILENET, - QUANTIZED_EFFICIENTNET, - FLOAT_EFFICIENTNET - } - - /** The runtime device type used for executing classification. */ - public enum Device { - CPU, - NNAPI, - GPU - } - - /** Number of results to show in the UI. */ - private static final int MAX_RESULTS = 3; - - /** The loaded TensorFlow Lite model. */ - - /** Image size along the x axis. */ - private final int imageSizeX; - - /** Image size along the y axis. */ - private final int imageSizeY; - - /** Optional GPU delegate for accleration. */ - private GpuDelegate gpuDelegate = null; - - /** Optional NNAPI delegate for accleration. */ - private NnApiDelegate nnApiDelegate = null; - - /** An instance of the driver class to run model inference with Tensorflow Lite. */ - protected Interpreter tflite; - - /** Options for configuring the Interpreter. */ - private final Interpreter.Options tfliteOptions = new Interpreter.Options(); - - /** Labels corresponding to the output of the vision model. */ - private final List labels; - - /** Input image TensorBuffer. */ - private TensorImage inputImageBuffer; - - /** Output probability TensorBuffer. */ - private final TensorBuffer outputProbabilityBuffer; - - /** Processer to apply post processing of the output probability. */ - private final TensorProcessor probabilityProcessor; - - /** - * Creates a classifier with the provided configuration. - * - * @param activity The current Activity. - * @param model The model to use for classification. - * @param device The device to use for classification. - * @param numThreads The number of threads to use for classification. - * @return A classifier with the desired configuration. - */ - public static Classifier create(Activity activity, Model model, Device device, int numThreads) - throws IOException { - if (model == Model.QUANTIZED_MOBILENET) { - return new ClassifierQuantizedMobileNet(activity, device, numThreads); - } else if (model == Model.FLOAT_MOBILENET) { - return new ClassifierFloatMobileNet(activity, device, numThreads); - } else if (model == Model.FLOAT_EFFICIENTNET) { - return new ClassifierFloatEfficientNet(activity, device, numThreads); - } else if (model == Model.QUANTIZED_EFFICIENTNET) { - return new ClassifierQuantizedEfficientNet(activity, device, numThreads); - } else { - throw new UnsupportedOperationException(); - } - } - - /** An immutable result returned by a Classifier describing what was recognized. */ - public static class Recognition { - /** - * A unique identifier for what has been recognized. Specific to the class, not the instance of - * the object. - */ - private final String id; - - /** Display name for the recognition. */ - private final String title; - - /** - * A sortable score for how good the recognition is relative to others. Higher should be better. - */ - private final Float confidence; - - /** Optional location within the source image for the location of the recognized object. */ - private RectF location; - - public Recognition( - final String id, final String title, final Float confidence, final RectF location) { - this.id = id; - this.title = title; - this.confidence = confidence; - this.location = location; - } - - public String getId() { - return id; - } - - public String getTitle() { - return title; - } - - public Float getConfidence() { - return confidence; - } - - public RectF getLocation() { - return new RectF(location); - } - - public void setLocation(RectF location) { - this.location = location; - } - - @Override - public String toString() { - String resultString = ""; - if (id != null) { - resultString += "[" + id + "] "; - } - - if (title != null) { - resultString += title + " "; - } - - if (confidence != null) { - resultString += String.format("(%.1f%%) ", confidence * 100.0f); - } - - if (location != null) { - resultString += location + " "; - } - - return resultString.trim(); - } - } - - /** Initializes a {@code Classifier}. */ - protected Classifier(Activity activity, Device device, int numThreads) throws IOException { - MappedByteBuffer tfliteModel = FileUtil.loadMappedFile(activity, getModelPath()); - switch (device) { - case NNAPI: - nnApiDelegate = new NnApiDelegate(); - tfliteOptions.addDelegate(nnApiDelegate); - break; - case GPU: - gpuDelegate = new GpuDelegate(); - tfliteOptions.addDelegate(gpuDelegate); - break; - case CPU: - break; - } - tfliteOptions.setNumThreads(numThreads); - tflite = new Interpreter(tfliteModel, tfliteOptions); - - // Loads labels out from the label file. - labels = FileUtil.loadLabels(activity, getLabelPath()); - - // Reads type and shape of input and output tensors, respectively. - int imageTensorIndex = 0; - int[] imageShape = tflite.getInputTensor(imageTensorIndex).shape(); // {1, height, width, 3} - if(imageShape[1] != imageShape[2]) { - imageSizeY = imageShape[2]; - imageSizeX = imageShape[3]; - } else { - imageSizeY = imageShape[1]; - imageSizeX = imageShape[2]; - } - DataType imageDataType = tflite.getInputTensor(imageTensorIndex).dataType(); - int probabilityTensorIndex = 0; - int[] probabilityShape = - tflite.getOutputTensor(probabilityTensorIndex).shape(); // {1, NUM_CLASSES} - DataType probabilityDataType = tflite.getOutputTensor(probabilityTensorIndex).dataType(); - - // Creates the input tensor. - inputImageBuffer = new TensorImage(imageDataType); - - // Creates the output tensor and its processor. - outputProbabilityBuffer = TensorBuffer.createFixedSize(probabilityShape, probabilityDataType); - - // Creates the post processor for the output probability. - probabilityProcessor = new TensorProcessor.Builder().add(getPostprocessNormalizeOp()).build(); - - Log.d(TAG, "Created a Tensorflow Lite Image Classifier."); - } - - /** Runs inference and returns the classification results. */ - //public List recognizeImage(final Bitmap bitmap, int sensorOrientation) { - public float[] recognizeImage(final Bitmap bitmap, int sensorOrientation) { - // Logs this method so that it can be analyzed with systrace. - Trace.beginSection("recognizeImage"); - - Trace.beginSection("loadImage"); - long startTimeForLoadImage = SystemClock.uptimeMillis(); - inputImageBuffer = loadImage(bitmap, sensorOrientation); - long endTimeForLoadImage = SystemClock.uptimeMillis(); - Trace.endSection(); - Log.v(TAG, "Timecost to load the image: " + (endTimeForLoadImage - startTimeForLoadImage)); - - // Runs the inference call. - Trace.beginSection("runInference"); - long startTimeForReference = SystemClock.uptimeMillis(); - tflite.run(inputImageBuffer.getBuffer(), outputProbabilityBuffer.getBuffer().rewind()); - long endTimeForReference = SystemClock.uptimeMillis(); - Trace.endSection(); - Log.v(TAG, "Timecost to run model inference: " + (endTimeForReference - startTimeForReference)); - - float[] img_array = outputProbabilityBuffer.getFloatArray(); - - // Gets the map of label and probability. - //Map labeledProbability = - // new TensorLabel(labels, probabilityProcessor.process(outputProbabilityBuffer)) - // .getMapWithFloatValue(); - Trace.endSection(); - - // Gets top-k results. - return img_array;//getTopKProbability(labeledProbability); - } - - /** Closes the interpreter and model to release resources. */ - public void close() { - if (tflite != null) { - tflite.close(); - tflite = null; - } - if (gpuDelegate != null) { - gpuDelegate.close(); - gpuDelegate = null; - } - if (nnApiDelegate != null) { - nnApiDelegate.close(); - nnApiDelegate = null; - } - } - - /** Get the image size along the x axis. */ - public int getImageSizeX() { - return imageSizeX; - } - - /** Get the image size along the y axis. */ - public int getImageSizeY() { - return imageSizeY; - } - - /** Loads input image, and applies preprocessing. */ - private TensorImage loadImage(final Bitmap bitmap, int sensorOrientation) { - // Loads bitmap into a TensorImage. - inputImageBuffer.load(bitmap); - - // Creates processor for the TensorImage. - int cropSize = min(bitmap.getWidth(), bitmap.getHeight()); - int numRotation = sensorOrientation / 90; - // TODO(b/143564309): Fuse ops inside ImageProcessor. - ImageProcessor imageProcessor = - new ImageProcessor.Builder() - .add(new ResizeWithCropOrPadOp(cropSize, cropSize)) - // TODO(b/169379396): investigate the impact of the resize algorithm on accuracy. - // To get the same inference results as lib_task_api, which is built on top of the Task - // Library, use ResizeMethod.BILINEAR. - .add(new ResizeOp(imageSizeX, imageSizeY, ResizeMethod.NEAREST_NEIGHBOR)) - //.add(new ResizeOp(224, 224, ResizeMethod.NEAREST_NEIGHBOR)) - .add(new Rot90Op(numRotation)) - .add(getPreprocessNormalizeOp()) - .build(); - return imageProcessor.process(inputImageBuffer); - } - - /** Gets the top-k results. */ - private static List getTopKProbability(Map labelProb) { - // Find the best classifications. - PriorityQueue pq = - new PriorityQueue<>( - MAX_RESULTS, - new Comparator() { - @Override - public int compare(Recognition lhs, Recognition rhs) { - // Intentionally reversed to put high confidence at the head of the queue. - return Float.compare(rhs.getConfidence(), lhs.getConfidence()); - } - }); - - for (Map.Entry entry : labelProb.entrySet()) { - pq.add(new Recognition("" + entry.getKey(), entry.getKey(), entry.getValue(), null)); - } - - final ArrayList recognitions = new ArrayList<>(); - int recognitionsSize = min(pq.size(), MAX_RESULTS); - for (int i = 0; i < recognitionsSize; ++i) { - recognitions.add(pq.poll()); - } - return recognitions; - } - - /** Gets the name of the model file stored in Assets. */ - protected abstract String getModelPath(); - - /** Gets the name of the label file stored in Assets. */ - protected abstract String getLabelPath(); - - /** Gets the TensorOperator to nomalize the input image in preprocessing. */ - protected abstract TensorOperator getPreprocessNormalizeOp(); - - /** - * Gets the TensorOperator to dequantize the output probability in post processing. - * - *

    For quantized model, we need de-quantize the prediction with NormalizeOp (as they are all - * essentially linear transformation). For float model, de-quantize is not required. But to - * uniform the API, de-quantize is added to float model too. Mean and std are set to 0.0f and - * 1.0f, respectively. - */ - protected abstract TensorOperator getPostprocessNormalizeOp(); -} diff --git a/spaces/cowboyonmars/Linaqruf-animagine-xl/README.md b/spaces/cowboyonmars/Linaqruf-animagine-xl/README.md deleted file mode 100644 index e7bec700ae686b66f57983cc1c29ef1513a53992..0000000000000000000000000000000000000000 --- a/spaces/cowboyonmars/Linaqruf-animagine-xl/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Linaqruf Animagine Xl -emoji: 👁 -colorFrom: purple -colorTo: gray -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/crystalai/EleutherAI-gpt-j-6b/app.py b/spaces/crystalai/EleutherAI-gpt-j-6b/app.py deleted file mode 100644 index 63843f9565f84472643a653354f8024857c03cf8..0000000000000000000000000000000000000000 --- a/spaces/crystalai/EleutherAI-gpt-j-6b/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/EleutherAI/gpt-j-6b").launch() \ No newline at end of file diff --git a/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/face3d/models/arcface_torch/eval/__init__.py b/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/face3d/models/arcface_torch/eval/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/danterivers/music-generation-samples/setup.py b/spaces/danterivers/music-generation-samples/setup.py deleted file mode 100644 index 78a172b7c90003b689bde40b49cc8fe1fb8107d4..0000000000000000000000000000000000000000 --- a/spaces/danterivers/music-generation-samples/setup.py +++ /dev/null @@ -1,65 +0,0 @@ -""" - Copyright (c) Meta Platforms, Inc. and affiliates. - All rights reserved. - - This source code is licensed under the license found in the - LICENSE file in the root directory of this source tree. - -""" - -from pathlib import Path - -from setuptools import setup, find_packages - - -NAME = 'audiocraft' -DESCRIPTION = 'Audio research library for PyTorch' - -URL = 'https://github.com/fairinternal/audiocraft' -AUTHOR = 'FAIR Speech & Audio' -EMAIL = 'defossez@meta.com' -REQUIRES_PYTHON = '>=3.8.0' - -for line in open('audiocraft/__init__.py'): - line = line.strip() - if '__version__' in line: - context = {} - exec(line, context) - VERSION = context['__version__'] - -HERE = Path(__file__).parent - -try: - with open(HERE / "README.md", encoding='utf-8') as f: - long_description = '\n' + f.read() -except FileNotFoundError: - long_description = DESCRIPTION - -REQUIRED = [i.strip() for i in open(HERE / 'requirements.txt') if not i.startswith('#')] - -setup( - name=NAME, - version=VERSION, - description=DESCRIPTION, - author_email=EMAIL, - long_description=long_description, - long_description_content_type='text/markdown', - author=AUTHOR, - url=URL, - python_requires=REQUIRES_PYTHON, - install_requires=REQUIRED, - extras_require={ - 'dev': ['coverage', 'flake8', 'mypy', 'pdoc3', 'pytest'], - }, - packages=find_packages(), - package_data={'audiocraft': ['py.typed']}, - include_package_data=True, - license='MIT License', - classifiers=[ - # Trove classifiers - # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers - 'License :: OSI Approved :: MIT License', - 'Topic :: Multimedia :: Sound/Audio', - 'Topic :: Scientific/Engineering :: Artificial Intelligence', - ], -) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/PalmImagePlugin.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/PalmImagePlugin.py deleted file mode 100644 index a88a907917dce5dace64fd1e38df86246c8e0305..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/PalmImagePlugin.py +++ /dev/null @@ -1,225 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# - -## -# Image plugin for Palm pixmap images (output only). -## - -from . import Image, ImageFile -from ._binary import o8 -from ._binary import o16be as o16b - -# fmt: off -_Palm8BitColormapValues = ( - (255, 255, 255), (255, 204, 255), (255, 153, 255), (255, 102, 255), - (255, 51, 255), (255, 0, 255), (255, 255, 204), (255, 204, 204), - (255, 153, 204), (255, 102, 204), (255, 51, 204), (255, 0, 204), - (255, 255, 153), (255, 204, 153), (255, 153, 153), (255, 102, 153), - (255, 51, 153), (255, 0, 153), (204, 255, 255), (204, 204, 255), - (204, 153, 255), (204, 102, 255), (204, 51, 255), (204, 0, 255), - (204, 255, 204), (204, 204, 204), (204, 153, 204), (204, 102, 204), - (204, 51, 204), (204, 0, 204), (204, 255, 153), (204, 204, 153), - (204, 153, 153), (204, 102, 153), (204, 51, 153), (204, 0, 153), - (153, 255, 255), (153, 204, 255), (153, 153, 255), (153, 102, 255), - (153, 51, 255), (153, 0, 255), (153, 255, 204), (153, 204, 204), - (153, 153, 204), (153, 102, 204), (153, 51, 204), (153, 0, 204), - (153, 255, 153), (153, 204, 153), (153, 153, 153), (153, 102, 153), - (153, 51, 153), (153, 0, 153), (102, 255, 255), (102, 204, 255), - (102, 153, 255), (102, 102, 255), (102, 51, 255), (102, 0, 255), - (102, 255, 204), (102, 204, 204), (102, 153, 204), (102, 102, 204), - (102, 51, 204), (102, 0, 204), (102, 255, 153), (102, 204, 153), - (102, 153, 153), (102, 102, 153), (102, 51, 153), (102, 0, 153), - (51, 255, 255), (51, 204, 255), (51, 153, 255), (51, 102, 255), - (51, 51, 255), (51, 0, 255), (51, 255, 204), (51, 204, 204), - (51, 153, 204), (51, 102, 204), (51, 51, 204), (51, 0, 204), - (51, 255, 153), (51, 204, 153), (51, 153, 153), (51, 102, 153), - (51, 51, 153), (51, 0, 153), (0, 255, 255), (0, 204, 255), - (0, 153, 255), (0, 102, 255), (0, 51, 255), (0, 0, 255), - (0, 255, 204), (0, 204, 204), (0, 153, 204), (0, 102, 204), - (0, 51, 204), (0, 0, 204), (0, 255, 153), (0, 204, 153), - (0, 153, 153), (0, 102, 153), (0, 51, 153), (0, 0, 153), - (255, 255, 102), (255, 204, 102), (255, 153, 102), (255, 102, 102), - (255, 51, 102), (255, 0, 102), (255, 255, 51), (255, 204, 51), - (255, 153, 51), (255, 102, 51), (255, 51, 51), (255, 0, 51), - (255, 255, 0), (255, 204, 0), (255, 153, 0), (255, 102, 0), - (255, 51, 0), (255, 0, 0), (204, 255, 102), (204, 204, 102), - (204, 153, 102), (204, 102, 102), (204, 51, 102), (204, 0, 102), - (204, 255, 51), (204, 204, 51), (204, 153, 51), (204, 102, 51), - (204, 51, 51), (204, 0, 51), (204, 255, 0), (204, 204, 0), - (204, 153, 0), (204, 102, 0), (204, 51, 0), (204, 0, 0), - (153, 255, 102), (153, 204, 102), (153, 153, 102), (153, 102, 102), - (153, 51, 102), (153, 0, 102), (153, 255, 51), (153, 204, 51), - (153, 153, 51), (153, 102, 51), (153, 51, 51), (153, 0, 51), - (153, 255, 0), (153, 204, 0), (153, 153, 0), (153, 102, 0), - (153, 51, 0), (153, 0, 0), (102, 255, 102), (102, 204, 102), - (102, 153, 102), (102, 102, 102), (102, 51, 102), (102, 0, 102), - (102, 255, 51), (102, 204, 51), (102, 153, 51), (102, 102, 51), - (102, 51, 51), (102, 0, 51), (102, 255, 0), (102, 204, 0), - (102, 153, 0), (102, 102, 0), (102, 51, 0), (102, 0, 0), - (51, 255, 102), (51, 204, 102), (51, 153, 102), (51, 102, 102), - (51, 51, 102), (51, 0, 102), (51, 255, 51), (51, 204, 51), - (51, 153, 51), (51, 102, 51), (51, 51, 51), (51, 0, 51), - (51, 255, 0), (51, 204, 0), (51, 153, 0), (51, 102, 0), - (51, 51, 0), (51, 0, 0), (0, 255, 102), (0, 204, 102), - (0, 153, 102), (0, 102, 102), (0, 51, 102), (0, 0, 102), - (0, 255, 51), (0, 204, 51), (0, 153, 51), (0, 102, 51), - (0, 51, 51), (0, 0, 51), (0, 255, 0), (0, 204, 0), - (0, 153, 0), (0, 102, 0), (0, 51, 0), (17, 17, 17), - (34, 34, 34), (68, 68, 68), (85, 85, 85), (119, 119, 119), - (136, 136, 136), (170, 170, 170), (187, 187, 187), (221, 221, 221), - (238, 238, 238), (192, 192, 192), (128, 0, 0), (128, 0, 128), - (0, 128, 0), (0, 128, 128), (0, 0, 0), (0, 0, 0), - (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), - (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), - (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), - (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), - (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), - (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)) -# fmt: on - - -# so build a prototype image to be used for palette resampling -def build_prototype_image(): - image = Image.new("L", (1, len(_Palm8BitColormapValues))) - image.putdata(list(range(len(_Palm8BitColormapValues)))) - palettedata = () - for colormapValue in _Palm8BitColormapValues: - palettedata += colormapValue - palettedata += (0, 0, 0) * (256 - len(_Palm8BitColormapValues)) - image.putpalette(palettedata) - return image - - -Palm8BitColormapImage = build_prototype_image() - -# OK, we now have in Palm8BitColormapImage, -# a "P"-mode image with the right palette -# -# -------------------------------------------------------------------- - -_FLAGS = {"custom-colormap": 0x4000, "is-compressed": 0x8000, "has-transparent": 0x2000} - -_COMPRESSION_TYPES = {"none": 0xFF, "rle": 0x01, "scanline": 0x00} - - -# -# -------------------------------------------------------------------- - -## -# (Internal) Image save plugin for the Palm format. - - -def _save(im, fp, filename): - if im.mode == "P": - # we assume this is a color Palm image with the standard colormap, - # unless the "info" dict has a "custom-colormap" field - - rawmode = "P" - bpp = 8 - version = 1 - - elif im.mode == "L": - if im.encoderinfo.get("bpp") in (1, 2, 4): - # this is 8-bit grayscale, so we shift it to get the high-order bits, - # and invert it because - # Palm does greyscale from white (0) to black (1) - bpp = im.encoderinfo["bpp"] - im = im.point( - lambda x, shift=8 - bpp, maxval=(1 << bpp) - 1: maxval - (x >> shift) - ) - elif im.info.get("bpp") in (1, 2, 4): - # here we assume that even though the inherent mode is 8-bit grayscale, - # only the lower bpp bits are significant. - # We invert them to match the Palm. - bpp = im.info["bpp"] - im = im.point(lambda x, maxval=(1 << bpp) - 1: maxval - (x & maxval)) - else: - msg = f"cannot write mode {im.mode} as Palm" - raise OSError(msg) - - # we ignore the palette here - im.mode = "P" - rawmode = "P;" + str(bpp) - version = 1 - - elif im.mode == "1": - # monochrome -- write it inverted, as is the Palm standard - rawmode = "1;I" - bpp = 1 - version = 0 - - else: - msg = f"cannot write mode {im.mode} as Palm" - raise OSError(msg) - - # - # make sure image data is available - im.load() - - # write header - - cols = im.size[0] - rows = im.size[1] - - rowbytes = int((cols + (16 // bpp - 1)) / (16 // bpp)) * 2 - transparent_index = 0 - compression_type = _COMPRESSION_TYPES["none"] - - flags = 0 - if im.mode == "P" and "custom-colormap" in im.info: - flags = flags & _FLAGS["custom-colormap"] - colormapsize = 4 * 256 + 2 - colormapmode = im.palette.mode - colormap = im.getdata().getpalette() - else: - colormapsize = 0 - - if "offset" in im.info: - offset = (rowbytes * rows + 16 + 3 + colormapsize) // 4 - else: - offset = 0 - - fp.write(o16b(cols) + o16b(rows) + o16b(rowbytes) + o16b(flags)) - fp.write(o8(bpp)) - fp.write(o8(version)) - fp.write(o16b(offset)) - fp.write(o8(transparent_index)) - fp.write(o8(compression_type)) - fp.write(o16b(0)) # reserved by Palm - - # now write colormap if necessary - - if colormapsize > 0: - fp.write(o16b(256)) - for i in range(256): - fp.write(o8(i)) - if colormapmode == "RGB": - fp.write( - o8(colormap[3 * i]) - + o8(colormap[3 * i + 1]) - + o8(colormap[3 * i + 2]) - ) - elif colormapmode == "RGBA": - fp.write( - o8(colormap[4 * i]) - + o8(colormap[4 * i + 1]) - + o8(colormap[4 * i + 2]) - ) - - # now convert data to raw form - ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, rowbytes, 1))]) - - if hasattr(fp, "flush"): - fp.flush() - - -# -# -------------------------------------------------------------------- - -Image.register_save("Palm", _save) - -Image.register_extension("Palm", ".palm") - -Image.register_mime("Palm", "image/palm") diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Copy-92242405.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Copy-92242405.js deleted file mode 100644 index 9b3815f89fb096006fbbc39e7b6d406aeb3f9e76..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Copy-92242405.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as h,e as p,s as c,f as a,g as e,h as u,j as i,n as o,k as g}from"./index-9e76ffee.js";function v(l){let t,s;return{c(){t=a("svg"),s=a("polyline"),e(s,"points","20 6 9 17 4 12"),e(t,"xmlns","http://www.w3.org/2000/svg"),e(t,"width","100%"),e(t,"height","100%"),e(t,"viewBox","0 0 24 24"),e(t,"fill","none"),e(t,"stroke","currentColor"),e(t,"stroke-width","3"),e(t,"stroke-linecap","round"),e(t,"stroke-linejoin","round")},m(n,r){u(n,t,r),i(t,s)},p:o,i:o,o,d(n){n&&g(t)}}}class m extends h{constructor(t){super(),p(this,t,null,v,c,{})}}function w(l){let t,s,n;return{c(){t=a("svg"),s=a("path"),n=a("path"),e(s,"fill","currentColor"),e(s,"d","M28 10v18H10V10h18m0-2H10a2 2 0 0 0-2 2v18a2 2 0 0 0 2 2h18a2 2 0 0 0 2-2V10a2 2 0 0 0-2-2Z"),e(n,"fill","currentColor"),e(n,"d","M4 18H2V4a2 2 0 0 1 2-2h14v2H4Z"),e(t,"xmlns","http://www.w3.org/2000/svg"),e(t,"width","100%"),e(t,"height","100%"),e(t,"viewBox","0 0 32 32")},m(r,d){u(r,t,d),i(t,s),i(t,n)},p:o,i:o,o,d(r){r&&g(t)}}}class x extends h{constructor(t){super(),p(this,t,null,w,c,{})}}export{x as C,m as a}; -//# sourceMappingURL=Copy-92242405.js.map diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Login-6c8affce.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Login-6c8affce.js deleted file mode 100644 index 64dc23cd5235fc224cd211139d923afb830e9bda..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Login-6c8affce.js +++ /dev/null @@ -1,3 +0,0 @@ -import{S as q,e as z,s as B,m as h,F as $,g as S,Y as F,h as c,G as v,w as x,u as w,k as p,H as k,o as g,t as j,j as G,x as P,N as L,O as N,T}from"./index-9e76ffee.js";import{S as Y}from"./StaticForm-775ac3c9.js";import{T as I}from"./index-85ff0bcb.js";import{a as A}from"./Button-30a08c0b.js";import{S as E}from"./StaticColumn-8964c3ef.js";import"./BlockTitle-af232cbc.js";import"./Info-77722665.js";import"./Copy-92242405.js";function D(l){let e,s;return{c(){e=h("p"),s=j(l[0]),S(e,"class","auth svelte-1ogxbi0")},m(a,o){c(a,e,o),G(e,s)},p(a,o){o&1&&P(s,a[0])},d(a){a&&p(e)}}}function H(l){let e;return{c(){e=h("p"),e.textContent=`If you are visiting a HuggingFace Space in Incognito mode, you must - enable third party cookies.`,S(e,"class","auth svelte-1ogxbi0")},m(s,a){c(s,e,a)},d(s){s&&p(e)}}}function O(l){let e;return{c(){e=h("p"),e.textContent="Incorrect Credentials",S(e,"class","creds svelte-1ogxbi0")},m(s,a){c(s,e,a)},d(s){s&&p(e)}}}function J(l){let e,s,a,o,r,m;function d(n){l[8](n)}let _={label:"username",lines:1,show_label:!0,max_lines:1,mode:"dynamic"};l[3]!==void 0&&(_.value=l[3]),e=new I({props:_}),L.push(()=>N(e,"value",d)),e.$on("submit",l[6]);function b(n){l[9](n)}let u={label:"password",lines:1,show_label:!0,max_lines:1,mode:"dynamic",type:"password"};return l[4]!==void 0&&(u.value=l[4]),o=new I({props:u}),L.push(()=>N(o,"value",b)),o.$on("submit",l[6]),{c(){$(e.$$.fragment),a=g(),$(o.$$.fragment)},m(n,f){v(e,n,f),c(n,a,f),v(o,n,f),m=!0},p(n,f){const t={};!s&&f&8&&(s=!0,t.value=n[3],T(()=>s=!1)),e.$set(t);const i={};!r&&f&16&&(r=!0,i.value=n[4],T(()=>r=!1)),o.$set(i)},i(n){m||(x(e.$$.fragment,n),x(o.$$.fragment,n),m=!0)},o(n){w(e.$$.fragment,n),w(o.$$.fragment,n),m=!1},d(n){n&&p(a),k(e,n),k(o,n)}}}function K(l){let e;return{c(){e=j("Login")},m(s,a){c(s,e,a)},d(s){s&&p(e)}}}function M(l){let e,s,a,o,r,m,d,_,b,u=l[0]&&D(l),n=l[2]&&H(),f=l[5]&&O();return m=new Y({props:{$$slots:{default:[J]},$$scope:{ctx:l}}}),_=new A({props:{size:"lg",variant:"primary",$$slots:{default:[K]},$$scope:{ctx:l}}}),_.$on("click",l[6]),{c(){e=h("h2"),e.textContent="Login",s=g(),u&&u.c(),a=g(),n&&n.c(),o=g(),f&&f.c(),r=g(),$(m.$$.fragment),d=g(),$(_.$$.fragment),S(e,"class","svelte-1ogxbi0")},m(t,i){c(t,e,i),c(t,s,i),u&&u.m(t,i),c(t,a,i),n&&n.m(t,i),c(t,o,i),f&&f.m(t,i),c(t,r,i),v(m,t,i),c(t,d,i),v(_,t,i),b=!0},p(t,i){t[0]?u?u.p(t,i):(u=D(t),u.c(),u.m(a.parentNode,a)):u&&(u.d(1),u=null),t[2]?n||(n=H(),n.c(),n.m(o.parentNode,o)):n&&(n.d(1),n=null),t[5]?f||(f=O(),f.c(),f.m(r.parentNode,r)):f&&(f.d(1),f=null);const C={};i&1048&&(C.$$scope={dirty:i,ctx:t}),m.$set(C);const y={};i&1024&&(y.$$scope={dirty:i,ctx:t}),_.$set(y)},i(t){b||(x(m.$$.fragment,t),x(_.$$.fragment,t),b=!0)},o(t){w(m.$$.fragment,t),w(_.$$.fragment,t),b=!1},d(t){t&&(p(e),p(s),p(a),p(o),p(r),p(d)),u&&u.d(t),n&&n.d(t),f&&f.d(t),k(m,t),k(_,t)}}}function Q(l){let e,s,a;return s=new E({props:{variant:"panel",min_width:480,$$slots:{default:[M]},$$scope:{ctx:l}}}),{c(){e=h("div"),$(s.$$.fragment),S(e,"class","wrap svelte-1ogxbi0"),F(e,"min-h-screen",l[1])},m(o,r){c(o,e,r),v(s,e,null),a=!0},p(o,[r]){const m={};r&1085&&(m.$$scope={dirty:r,ctx:o}),s.$set(m),(!a||r&2)&&F(e,"min-h-screen",o[1])},i(o){a||(x(s.$$.fragment,o),a=!0)},o(o){w(s.$$.fragment,o),a=!1},d(o){o&&p(e),k(s)}}}function R(l,e,s){let{root:a}=e,{auth_message:o}=e,{app_mode:r}=e,{space_id:m}=e,d="",_="",b=!1;const u=async()=>{const t=new FormData;t.append("username",d),t.append("password",_);let i=await fetch(a+"/login",{method:"POST",body:t});i.status===400?(s(5,b=!0),s(3,d=""),s(4,_="")):i.status==200&&location.reload()};function n(t){d=t,s(3,d)}function f(t){_=t,s(4,_)}return l.$$set=t=>{"root"in t&&s(7,a=t.root),"auth_message"in t&&s(0,o=t.auth_message),"app_mode"in t&&s(1,r=t.app_mode),"space_id"in t&&s(2,m=t.space_id)},[o,r,m,d,_,b,u,a,n,f]}class ne extends q{constructor(e){super(),z(this,e,R,Q,B,{root:7,auth_message:0,app_mode:1,space_id:2})}}export{ne as default}; -//# sourceMappingURL=Login-6c8affce.js.map diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/StaticTabs-00db98ac.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/StaticTabs-00db98ac.js deleted file mode 100644 index c2e02bb75be2e0778ce8ce0cd47ab16b662c1882..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/StaticTabs-00db98ac.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as H,e as J,s as M,I as C,a9 as O,m as T,o as S,g,Y as I,h as v,j as k,J as R,ay as V,ab as P,ac as U,ad as Y,w as y,u as w,k as j,a7 as A,a1 as E,C as z,U as W,P as N,aA as B,t as K,p as X,x as L,N as Z,O as x,F as $,G as ee,T as te,H as le,E as D}from"./index-9e76ffee.js";function F(l,e,s){const t=l.slice();return t[14]=e[s],t[16]=s,t}function se(l){let e,s=l[14].name+"",t,_,o,n;function i(){return l[12](l[14],l[16])}return{c(){e=T("button"),t=K(s),_=S(),g(e,"class","svelte-kqij2n")},m(u,r){v(u,e,r),k(e,t),k(e,_),o||(n=X(e,"click",i),o=!0)},p(u,r){l=u,r&8&&s!==(s=l[14].name+"")&&L(t,s)},d(u){u&&j(e),o=!1,n()}}}function ne(l){let e,s=l[14].name+"",t,_;return{c(){e=T("button"),t=K(s),_=S(),g(e,"class","selected svelte-kqij2n")},m(o,n){v(o,e,n),k(e,t),k(e,_)},p(o,n){n&8&&s!==(s=o[14].name+"")&&L(t,s)},d(o){o&&j(e)}}}function G(l,e){let s,t;function _(i,u){return i[14].id===i[4]?ne:se}let o=_(e),n=o(e);return{key:l,first:null,c(){s=N(),n.c(),t=N(),this.first=s},m(i,u){v(i,s,u),n.m(i,u),v(i,t,u)},p(i,u){e=i,o===(o=_(e))&&n?n.p(e,u):(n.d(1),n=o(e),n&&(n.c(),n.m(t.parentNode,t)))},d(i){i&&(j(s),j(t)),n.d(i)}}}function ie(l){let e,s,t=[],_=new Map,o,n,i,u=C(l[3]);const r=a=>a[14].id;for(let a=0;as(4,_=f));const c=A(0);E(l,c,f=>s(13,t=f));const b=z();W(ae,{register_tab:f=>(d.push({name:f.name,id:f.id}),a.update(h=>h??f.id),s(3,d),d.length-1),unregister_tab:f=>{const h=d.findIndex(p=>p.id===f.id);d.splice(h,1),a.update(p=>p===f.id?d[h]?.id||d[d.length-1]?.id:p)},selected_tab:a,selected_tab_index:c});function q(f){s(9,m=f),B(a,_=f,_),B(c,t=d.findIndex(h=>h.id===f),t),b("change")}const Q=(f,h)=>{q(f.id),b("select",{value:f.name,index:h})};return l.$$set=f=>{"visible"in f&&s(0,i=f.visible),"elem_id"in f&&s(1,u=f.elem_id),"elem_classes"in f&&s(2,r=f.elem_classes),"selected"in f&&s(9,m=f.selected),"$$scope"in f&&s(10,n=f.$$scope)},l.$$.update=()=>{l.$$.dirty&512&&m!==null&&q(m)},[i,u,r,d,_,a,c,b,q,m,n,o,Q]}class _e extends H{constructor(e){super(),J(this,e,ce,ie,M,{visible:0,elem_id:1,elem_classes:2,selected:9})}}function ue(l){let e;const s=l[4].default,t=O(s,l,l[8],null);return{c(){t&&t.c()},m(_,o){t&&t.m(_,o),e=!0},p(_,o){t&&t.p&&(!e||o&256)&&P(t,s,_,_[8],e?Y(s,_[8],o,null):U(_[8]),null)},i(_){e||(y(t,_),e=!0)},o(_){w(t,_),e=!1},d(_){t&&t.d(_)}}}function oe(l){let e,s,t;function _(n){l[5](n)}let o={visible:l[1],elem_id:l[2],elem_classes:l[3],$$slots:{default:[ue]},$$scope:{ctx:l}};return l[0]!==void 0&&(o.selected=l[0]),e=new _e({props:o}),Z.push(()=>x(e,"selected",_)),e.$on("change",l[6]),e.$on("select",l[7]),{c(){$(e.$$.fragment)},m(n,i){ee(e,n,i),t=!0},p(n,[i]){const u={};i&2&&(u.visible=n[1]),i&4&&(u.elem_id=n[2]),i&8&&(u.elem_classes=n[3]),i&256&&(u.$$scope={dirty:i,ctx:n}),!s&&i&1&&(s=!0,u.selected=n[0],te(()=>s=!1)),e.$set(u)},i(n){t||(y(e.$$.fragment,n),t=!0)},o(n){w(e.$$.fragment,n),t=!1},d(n){le(e,n)}}}function fe(l,e,s){let{$$slots:t={},$$scope:_}=e;const o=z();let{visible:n=!0}=e,{elem_id:i=""}=e,{elem_classes:u=[]}=e,{selected:r}=e;function m(c){r=c,s(0,r)}function d(c){D.call(this,l,c)}function a(c){D.call(this,l,c)}return l.$$set=c=>{"visible"in c&&s(1,n=c.visible),"elem_id"in c&&s(2,i=c.elem_id),"elem_classes"in c&&s(3,u=c.elem_classes),"selected"in c&&s(0,r=c.selected),"$$scope"in c&&s(8,_=c.$$scope)},l.$$.update=()=>{l.$$.dirty&1&&o("prop_change",{selected:r})},[r,n,i,u,t,m,d,a,_]}class de extends H{constructor(e){super(),J(this,e,fe,oe,M,{visible:1,elem_id:2,elem_classes:3,selected:0})}}const me=de;export{me as S,ae as T}; -//# sourceMappingURL=StaticTabs-00db98ac.js.map diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-329f8260.css b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-329f8260.css deleted file mode 100644 index 3b53ee465e192f512a964e9050e9aab81384add8..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-329f8260.css +++ /dev/null @@ -1 +0,0 @@ -.min.svelte-1ybaih5{min-height:var(--size-24)}.hide.svelte-1ybaih5{display:none}div.svelte-1ed2p3z{transition:.15s}.pending.svelte-1ed2p3z{opacity:.2} diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-a6bf714f.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-a6bf714f.js deleted file mode 100644 index 96dc15658d3281a855f1a7c81f39dbb8904a63b1..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-a6bf714f.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as g,e as v,s as d,a9 as r,m as q,g as o,as as h,Y as u,h as b,ab as S,ac as w,ad as R,w as j,u as C,k}from"./index-39fce9e2.js";function Y(i){let e,_,s;const f=i[6].default,a=r(f,i,i[5],null);return{c(){e=q("div"),a&&a.c(),o(e,"id",i[1]),o(e,"class",_=h(i[2].join(" "))+" svelte-15lo0d8"),u(e,"compact",i[4]==="compact"),u(e,"panel",i[4]==="panel"),u(e,"unequal-height",i[0]===!1),u(e,"stretch",i[0]),u(e,"hide",!i[3])},m(l,t){b(l,e,t),a&&a.m(e,null),s=!0},p(l,[t]){a&&a.p&&(!s||t&32)&&S(a,f,l,l[5],s?R(f,l[5],t,null):w(l[5]),null),(!s||t&2)&&o(e,"id",l[1]),(!s||t&4&&_!==(_=h(l[2].join(" "))+" svelte-15lo0d8"))&&o(e,"class",_),(!s||t&20)&&u(e,"compact",l[4]==="compact"),(!s||t&20)&&u(e,"panel",l[4]==="panel"),(!s||t&5)&&u(e,"unequal-height",l[0]===!1),(!s||t&5)&&u(e,"stretch",l[0]),(!s||t&12)&&u(e,"hide",!l[3])},i(l){s||(j(a,l),s=!0)},o(l){C(a,l),s=!1},d(l){l&&k(e),a&&a.d(l)}}}function z(i,e,_){let{$$slots:s={},$$scope:f}=e,{equal_height:a=!0}=e,{elem_id:l}=e,{elem_classes:t=[]}=e,{visible:c=!0}=e,{variant:m="default"}=e;return i.$$set=n=>{"equal_height"in n&&_(0,a=n.equal_height),"elem_id"in n&&_(1,l=n.elem_id),"elem_classes"in n&&_(2,t=n.elem_classes),"visible"in n&&_(3,c=n.visible),"variant"in n&&_(4,m=n.variant),"$$scope"in n&&_(5,f=n.$$scope)},[a,l,t,c,m,f,s]}class A extends g{constructor(e){super(),v(this,e,z,Y,d,{equal_height:0,elem_id:1,elem_classes:2,visible:3,variant:4})}}const D=A,E=["static"];export{D as Component,E as modes}; -//# sourceMappingURL=index-a6bf714f.js.map diff --git a/spaces/dddmiku/vits-uma-genshin-honkai/transforms.py b/spaces/dddmiku/vits-uma-genshin-honkai/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/dddmiku/vits-uma-genshin-honkai/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/declare-lab/tango/diffusers/examples/community/multilingual_stable_diffusion.py b/spaces/declare-lab/tango/diffusers/examples/community/multilingual_stable_diffusion.py deleted file mode 100644 index f920c4cd59da117dcf2ba926f808d5fcb2ff0350..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/examples/community/multilingual_stable_diffusion.py +++ /dev/null @@ -1,436 +0,0 @@ -import inspect -from typing import Callable, List, Optional, Union - -import torch -from transformers import ( - CLIPImageProcessor, - CLIPTextModel, - CLIPTokenizer, - MBart50TokenizerFast, - MBartForConditionalGeneration, - pipeline, -) - -from diffusers import DiffusionPipeline -from diffusers.configuration_utils import FrozenDict -from diffusers.models import AutoencoderKL, UNet2DConditionModel -from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput -from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker -from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler -from diffusers.utils import deprecate, logging - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -def detect_language(pipe, prompt, batch_size): - """helper function to detect language(s) of prompt""" - - if batch_size == 1: - preds = pipe(prompt, top_k=1, truncation=True, max_length=128) - return preds[0]["label"] - else: - detected_languages = [] - for p in prompt: - preds = pipe(p, top_k=1, truncation=True, max_length=128) - detected_languages.append(preds[0]["label"]) - - return detected_languages - - -def translate_prompt(prompt, translation_tokenizer, translation_model, device): - """helper function to translate prompt to English""" - - encoded_prompt = translation_tokenizer(prompt, return_tensors="pt").to(device) - generated_tokens = translation_model.generate(**encoded_prompt, max_new_tokens=1000) - en_trans = translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) - - return en_trans[0] - - -class MultilingualStableDiffusion(DiffusionPipeline): - r""" - Pipeline for text-to-image generation using Stable Diffusion in different languages. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Args: - detection_pipeline ([`pipeline`]): - Transformers pipeline to detect prompt's language. - translation_model ([`MBartForConditionalGeneration`]): - Model to translate prompt to English, if necessary. Please refer to the - [model card](https://huggingface.co/docs/transformers/model_doc/mbart) for details. - translation_tokenizer ([`MBart50TokenizerFast`]): - Tokenizer of the translation model. - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. - text_encoder ([`CLIPTextModel`]): - Frozen text-encoder. Stable Diffusion uses the text portion of - [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically - the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. - tokenizer (`CLIPTokenizer`): - Tokenizer of class - [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). - unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - safety_checker ([`StableDiffusionSafetyChecker`]): - Classification module that estimates whether generated images could be considered offensive or harmful. - Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. - feature_extractor ([`CLIPImageProcessor`]): - Model that extracts features from generated images to be used as inputs for the `safety_checker`. - """ - - def __init__( - self, - detection_pipeline: pipeline, - translation_model: MBartForConditionalGeneration, - translation_tokenizer: MBart50TokenizerFast, - vae: AutoencoderKL, - text_encoder: CLIPTextModel, - tokenizer: CLIPTokenizer, - unet: UNet2DConditionModel, - scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], - safety_checker: StableDiffusionSafetyChecker, - feature_extractor: CLIPImageProcessor, - ): - super().__init__() - - if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: - deprecation_message = ( - f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" - f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " - "to update the config accordingly as leaving `steps_offset` might led to incorrect results" - " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," - " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" - " file" - ) - deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(scheduler.config) - new_config["steps_offset"] = 1 - scheduler._internal_dict = FrozenDict(new_config) - - if safety_checker is None: - logger.warning( - f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" - " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" - " results in services or applications open to the public. Both the diffusers team and Hugging Face" - " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" - " it only for use-cases that involve analyzing network behavior or auditing its results. For more" - " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." - ) - - self.register_modules( - detection_pipeline=detection_pipeline, - translation_model=translation_model, - translation_tokenizer=translation_tokenizer, - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - - def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"): - r""" - Enable sliced attention computation. - - When this option is enabled, the attention module will split the input tensor in slices, to compute attention - in several steps. This is useful to save some memory in exchange for a small speed decrease. - - Args: - slice_size (`str` or `int`, *optional*, defaults to `"auto"`): - When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If - a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, - `attention_head_dim` must be a multiple of `slice_size`. - """ - if slice_size == "auto": - # half the attention head size is usually a good trade-off between - # speed and memory - slice_size = self.unet.config.attention_head_dim // 2 - self.unet.set_attention_slice(slice_size) - - def disable_attention_slicing(self): - r""" - Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go - back to computing attention in one step. - """ - # set slice_size = `None` to disable `attention slicing` - self.enable_attention_slicing(None) - - @torch.no_grad() - def __call__( - self, - prompt: Union[str, List[str]], - height: int = 512, - width: int = 512, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[torch.Generator] = None, - latents: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: int = 1, - **kwargs, - ): - r""" - Function invoked when calling the pipeline for generation. - - Args: - prompt (`str` or `List[str]`): - The prompt or prompts to guide the image generation. Can be in different languages. - height (`int`, *optional*, defaults to 512): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to 512): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored - if `guidance_scale` is less than `1`). - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`torch.Generator`, *optional*): - A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation - deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. - When returning a tuple, the first element is a list with the generated images, and the second element is a - list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, according to the `safety_checker`. - """ - if isinstance(prompt, str): - batch_size = 1 - elif isinstance(prompt, list): - batch_size = len(prompt) - else: - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if height % 8 != 0 or width % 8 != 0: - raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") - - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - # detect language and translate if necessary - prompt_language = detect_language(self.detection_pipeline, prompt, batch_size) - if batch_size == 1 and prompt_language != "en": - prompt = translate_prompt(prompt, self.translation_tokenizer, self.translation_model, self.device) - - if isinstance(prompt, list): - for index in range(batch_size): - if prompt_language[index] != "en": - p = translate_prompt( - prompt[index], self.translation_tokenizer, self.translation_model, self.device - ) - prompt[index] = p - - # get prompt text embeddings - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - - if text_input_ids.shape[-1] > self.tokenizer.model_max_length: - removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] - text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0] - - # duplicate text embeddings for each generation per prompt, using mps friendly method - bs_embed, seq_len, _ = text_embeddings.shape - text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1) - text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) - - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] * batch_size - elif type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - elif isinstance(negative_prompt, str): - # detect language and translate it if necessary - negative_prompt_language = detect_language(self.detection_pipeline, negative_prompt, batch_size) - if negative_prompt_language != "en": - negative_prompt = translate_prompt( - negative_prompt, self.translation_tokenizer, self.translation_model, self.device - ) - if isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - # detect language and translate it if necessary - if isinstance(negative_prompt, list): - negative_prompt_languages = detect_language(self.detection_pipeline, negative_prompt, batch_size) - for index in range(batch_size): - if negative_prompt_languages[index] != "en": - p = translate_prompt( - negative_prompt[index], self.translation_tokenizer, self.translation_model, self.device - ) - negative_prompt[index] = p - uncond_tokens = negative_prompt - - max_length = text_input_ids.shape[-1] - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=max_length, - truncation=True, - return_tensors="pt", - ) - uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] - - # duplicate unconditional embeddings for each generation per prompt, using mps friendly method - seq_len = uncond_embeddings.shape[1] - uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1) - uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) - - # get the initial random noise unless the user supplied it - - # Unlike in other pipelines, latents need to be generated in the target device - # for 1-to-1 results reproducibility with the CompVis implementation. - # However this currently doesn't work in `mps`. - latents_shape = (batch_size * num_images_per_prompt, self.unet.in_channels, height // 8, width // 8) - latents_dtype = text_embeddings.dtype - if latents is None: - if self.device.type == "mps": - # randn does not work reproducibly on mps - latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to( - self.device - ) - else: - latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype) - else: - if latents.shape != latents_shape: - raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") - latents = latents.to(self.device) - - # set timesteps - self.scheduler.set_timesteps(num_inference_steps) - - # Some schedulers like PNDM have timesteps as arrays - # It's more optimized to move all timesteps to correct device beforehand - timesteps_tensor = self.scheduler.timesteps.to(self.device) - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - for i, t in enumerate(self.progress_bar(timesteps_tensor)): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # predict the noise residual - noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample - - # call the callback, if provided - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - latents = 1 / 0.18215 * latents - image = self.vae.decode(latents).sample - - image = (image / 2 + 0.5).clamp(0, 1) - - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 - image = image.cpu().permute(0, 2, 3, 1).float().numpy() - - if self.safety_checker is not None: - safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to( - self.device - ) - image, has_nsfw_concept = self.safety_checker( - images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype) - ) - else: - has_nsfw_concept = None - - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return (image, has_nsfw_concept) - - return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/spaces/declare-lab/tango/diffusers/scripts/convert_vq_diffusion_to_diffusers.py b/spaces/declare-lab/tango/diffusers/scripts/convert_vq_diffusion_to_diffusers.py deleted file mode 100644 index 58ed2d93d5df4bd486b7485e1dc5e3cd255f2d99..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/scripts/convert_vq_diffusion_to_diffusers.py +++ /dev/null @@ -1,925 +0,0 @@ -""" -This script ports models from VQ-diffusion (https://github.com/microsoft/VQ-Diffusion) to diffusers. - -It currently only supports porting the ITHQ dataset. - -ITHQ dataset: -```sh -# From the root directory of diffusers. - -# Download the VQVAE checkpoint -$ wget https://facevcstandard.blob.core.windows.net/v-zhictang/Improved-VQ-Diffusion_model_release/ithq_vqvae.pth?sv=2020-10-02&st=2022-05-30T15%3A17%3A18Z&se=2030-05-31T15%3A17%3A00Z&sr=b&sp=r&sig=1jVavHFPpUjDs%2FTO1V3PTezaNbPp2Nx8MxiWI7y6fEY%3D -O ithq_vqvae.pth - -# Download the VQVAE config -# NOTE that in VQ-diffusion the documented file is `configs/ithq.yaml` but the target class -# `image_synthesis.modeling.codecs.image_codec.ema_vqvae.PatchVQVAE` -# loads `OUTPUT/pretrained_model/taming_dvae/config.yaml` -$ wget https://raw.githubusercontent.com/microsoft/VQ-Diffusion/main/OUTPUT/pretrained_model/taming_dvae/config.yaml -O ithq_vqvae.yaml - -# Download the main model checkpoint -$ wget https://facevcstandard.blob.core.windows.net/v-zhictang/Improved-VQ-Diffusion_model_release/ithq_learnable.pth?sv=2020-10-02&st=2022-05-30T10%3A22%3A06Z&se=2030-05-31T10%3A22%3A00Z&sr=b&sp=r&sig=GOE%2Bza02%2FPnGxYVOOPtwrTR4RA3%2F5NVgMxdW4kjaEZ8%3D -O ithq_learnable.pth - -# Download the main model config -$ wget https://raw.githubusercontent.com/microsoft/VQ-Diffusion/main/configs/ithq.yaml -O ithq.yaml - -# run the convert script -$ python ./scripts/convert_vq_diffusion_to_diffusers.py \ - --checkpoint_path ./ithq_learnable.pth \ - --original_config_file ./ithq.yaml \ - --vqvae_checkpoint_path ./ithq_vqvae.pth \ - --vqvae_original_config_file ./ithq_vqvae.yaml \ - --dump_path -``` -""" - -import argparse -import tempfile - -import torch -import yaml -from accelerate import init_empty_weights, load_checkpoint_and_dispatch -from transformers import CLIPTextModel, CLIPTokenizer -from yaml.loader import FullLoader - -from diffusers import Transformer2DModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel -from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings - - -try: - from omegaconf import OmegaConf -except ImportError: - raise ImportError( - "OmegaConf is required to convert the VQ Diffusion checkpoints. Please install it with `pip install" - " OmegaConf`." - ) - -# vqvae model - -PORTED_VQVAES = ["image_synthesis.modeling.codecs.image_codec.patch_vqgan.PatchVQGAN"] - - -def vqvae_model_from_original_config(original_config): - assert original_config.target in PORTED_VQVAES, f"{original_config.target} has not yet been ported to diffusers." - - original_config = original_config.params - - original_encoder_config = original_config.encoder_config.params - original_decoder_config = original_config.decoder_config.params - - in_channels = original_encoder_config.in_channels - out_channels = original_decoder_config.out_ch - - down_block_types = get_down_block_types(original_encoder_config) - up_block_types = get_up_block_types(original_decoder_config) - - assert original_encoder_config.ch == original_decoder_config.ch - assert original_encoder_config.ch_mult == original_decoder_config.ch_mult - block_out_channels = tuple( - [original_encoder_config.ch * a_ch_mult for a_ch_mult in original_encoder_config.ch_mult] - ) - - assert original_encoder_config.num_res_blocks == original_decoder_config.num_res_blocks - layers_per_block = original_encoder_config.num_res_blocks - - assert original_encoder_config.z_channels == original_decoder_config.z_channels - latent_channels = original_encoder_config.z_channels - - num_vq_embeddings = original_config.n_embed - - # Hard coded value for ResnetBlock.GoupNorm(num_groups) in VQ-diffusion - norm_num_groups = 32 - - e_dim = original_config.embed_dim - - model = VQModel( - in_channels=in_channels, - out_channels=out_channels, - down_block_types=down_block_types, - up_block_types=up_block_types, - block_out_channels=block_out_channels, - layers_per_block=layers_per_block, - latent_channels=latent_channels, - num_vq_embeddings=num_vq_embeddings, - norm_num_groups=norm_num_groups, - vq_embed_dim=e_dim, - ) - - return model - - -def get_down_block_types(original_encoder_config): - attn_resolutions = coerce_attn_resolutions(original_encoder_config.attn_resolutions) - num_resolutions = len(original_encoder_config.ch_mult) - resolution = coerce_resolution(original_encoder_config.resolution) - - curr_res = resolution - down_block_types = [] - - for _ in range(num_resolutions): - if curr_res in attn_resolutions: - down_block_type = "AttnDownEncoderBlock2D" - else: - down_block_type = "DownEncoderBlock2D" - - down_block_types.append(down_block_type) - - curr_res = [r // 2 for r in curr_res] - - return down_block_types - - -def get_up_block_types(original_decoder_config): - attn_resolutions = coerce_attn_resolutions(original_decoder_config.attn_resolutions) - num_resolutions = len(original_decoder_config.ch_mult) - resolution = coerce_resolution(original_decoder_config.resolution) - - curr_res = [r // 2 ** (num_resolutions - 1) for r in resolution] - up_block_types = [] - - for _ in reversed(range(num_resolutions)): - if curr_res in attn_resolutions: - up_block_type = "AttnUpDecoderBlock2D" - else: - up_block_type = "UpDecoderBlock2D" - - up_block_types.append(up_block_type) - - curr_res = [r * 2 for r in curr_res] - - return up_block_types - - -def coerce_attn_resolutions(attn_resolutions): - attn_resolutions = OmegaConf.to_object(attn_resolutions) - attn_resolutions_ = [] - for ar in attn_resolutions: - if isinstance(ar, (list, tuple)): - attn_resolutions_.append(list(ar)) - else: - attn_resolutions_.append([ar, ar]) - return attn_resolutions_ - - -def coerce_resolution(resolution): - resolution = OmegaConf.to_object(resolution) - if isinstance(resolution, int): - resolution = [resolution, resolution] # H, W - elif isinstance(resolution, (tuple, list)): - resolution = list(resolution) - else: - raise ValueError("Unknown type of resolution:", resolution) - return resolution - - -# done vqvae model - -# vqvae checkpoint - - -def vqvae_original_checkpoint_to_diffusers_checkpoint(model, checkpoint): - diffusers_checkpoint = {} - - diffusers_checkpoint.update(vqvae_encoder_to_diffusers_checkpoint(model, checkpoint)) - - # quant_conv - - diffusers_checkpoint.update( - { - "quant_conv.weight": checkpoint["quant_conv.weight"], - "quant_conv.bias": checkpoint["quant_conv.bias"], - } - ) - - # quantize - diffusers_checkpoint.update({"quantize.embedding.weight": checkpoint["quantize.embedding"]}) - - # post_quant_conv - diffusers_checkpoint.update( - { - "post_quant_conv.weight": checkpoint["post_quant_conv.weight"], - "post_quant_conv.bias": checkpoint["post_quant_conv.bias"], - } - ) - - # decoder - diffusers_checkpoint.update(vqvae_decoder_to_diffusers_checkpoint(model, checkpoint)) - - return diffusers_checkpoint - - -def vqvae_encoder_to_diffusers_checkpoint(model, checkpoint): - diffusers_checkpoint = {} - - # conv_in - diffusers_checkpoint.update( - { - "encoder.conv_in.weight": checkpoint["encoder.conv_in.weight"], - "encoder.conv_in.bias": checkpoint["encoder.conv_in.bias"], - } - ) - - # down_blocks - for down_block_idx, down_block in enumerate(model.encoder.down_blocks): - diffusers_down_block_prefix = f"encoder.down_blocks.{down_block_idx}" - down_block_prefix = f"encoder.down.{down_block_idx}" - - # resnets - for resnet_idx, resnet in enumerate(down_block.resnets): - diffusers_resnet_prefix = f"{diffusers_down_block_prefix}.resnets.{resnet_idx}" - resnet_prefix = f"{down_block_prefix}.block.{resnet_idx}" - - diffusers_checkpoint.update( - vqvae_resnet_to_diffusers_checkpoint( - resnet, checkpoint, diffusers_resnet_prefix=diffusers_resnet_prefix, resnet_prefix=resnet_prefix - ) - ) - - # downsample - - # do not include the downsample when on the last down block - # There is no downsample on the last down block - if down_block_idx != len(model.encoder.down_blocks) - 1: - # There's a single downsample in the original checkpoint but a list of downsamples - # in the diffusers model. - diffusers_downsample_prefix = f"{diffusers_down_block_prefix}.downsamplers.0.conv" - downsample_prefix = f"{down_block_prefix}.downsample.conv" - diffusers_checkpoint.update( - { - f"{diffusers_downsample_prefix}.weight": checkpoint[f"{downsample_prefix}.weight"], - f"{diffusers_downsample_prefix}.bias": checkpoint[f"{downsample_prefix}.bias"], - } - ) - - # attentions - - if hasattr(down_block, "attentions"): - for attention_idx, _ in enumerate(down_block.attentions): - diffusers_attention_prefix = f"{diffusers_down_block_prefix}.attentions.{attention_idx}" - attention_prefix = f"{down_block_prefix}.attn.{attention_idx}" - diffusers_checkpoint.update( - vqvae_attention_to_diffusers_checkpoint( - checkpoint, - diffusers_attention_prefix=diffusers_attention_prefix, - attention_prefix=attention_prefix, - ) - ) - - # mid block - - # mid block attentions - - # There is a single hardcoded attention block in the middle of the VQ-diffusion encoder - diffusers_attention_prefix = "encoder.mid_block.attentions.0" - attention_prefix = "encoder.mid.attn_1" - diffusers_checkpoint.update( - vqvae_attention_to_diffusers_checkpoint( - checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, attention_prefix=attention_prefix - ) - ) - - # mid block resnets - - for diffusers_resnet_idx, resnet in enumerate(model.encoder.mid_block.resnets): - diffusers_resnet_prefix = f"encoder.mid_block.resnets.{diffusers_resnet_idx}" - - # the hardcoded prefixes to `block_` are 1 and 2 - orig_resnet_idx = diffusers_resnet_idx + 1 - # There are two hardcoded resnets in the middle of the VQ-diffusion encoder - resnet_prefix = f"encoder.mid.block_{orig_resnet_idx}" - - diffusers_checkpoint.update( - vqvae_resnet_to_diffusers_checkpoint( - resnet, checkpoint, diffusers_resnet_prefix=diffusers_resnet_prefix, resnet_prefix=resnet_prefix - ) - ) - - diffusers_checkpoint.update( - { - # conv_norm_out - "encoder.conv_norm_out.weight": checkpoint["encoder.norm_out.weight"], - "encoder.conv_norm_out.bias": checkpoint["encoder.norm_out.bias"], - # conv_out - "encoder.conv_out.weight": checkpoint["encoder.conv_out.weight"], - "encoder.conv_out.bias": checkpoint["encoder.conv_out.bias"], - } - ) - - return diffusers_checkpoint - - -def vqvae_decoder_to_diffusers_checkpoint(model, checkpoint): - diffusers_checkpoint = {} - - # conv in - diffusers_checkpoint.update( - { - "decoder.conv_in.weight": checkpoint["decoder.conv_in.weight"], - "decoder.conv_in.bias": checkpoint["decoder.conv_in.bias"], - } - ) - - # up_blocks - - for diffusers_up_block_idx, up_block in enumerate(model.decoder.up_blocks): - # up_blocks are stored in reverse order in the VQ-diffusion checkpoint - orig_up_block_idx = len(model.decoder.up_blocks) - 1 - diffusers_up_block_idx - - diffusers_up_block_prefix = f"decoder.up_blocks.{diffusers_up_block_idx}" - up_block_prefix = f"decoder.up.{orig_up_block_idx}" - - # resnets - for resnet_idx, resnet in enumerate(up_block.resnets): - diffusers_resnet_prefix = f"{diffusers_up_block_prefix}.resnets.{resnet_idx}" - resnet_prefix = f"{up_block_prefix}.block.{resnet_idx}" - - diffusers_checkpoint.update( - vqvae_resnet_to_diffusers_checkpoint( - resnet, checkpoint, diffusers_resnet_prefix=diffusers_resnet_prefix, resnet_prefix=resnet_prefix - ) - ) - - # upsample - - # there is no up sample on the last up block - if diffusers_up_block_idx != len(model.decoder.up_blocks) - 1: - # There's a single upsample in the VQ-diffusion checkpoint but a list of downsamples - # in the diffusers model. - diffusers_downsample_prefix = f"{diffusers_up_block_prefix}.upsamplers.0.conv" - downsample_prefix = f"{up_block_prefix}.upsample.conv" - diffusers_checkpoint.update( - { - f"{diffusers_downsample_prefix}.weight": checkpoint[f"{downsample_prefix}.weight"], - f"{diffusers_downsample_prefix}.bias": checkpoint[f"{downsample_prefix}.bias"], - } - ) - - # attentions - - if hasattr(up_block, "attentions"): - for attention_idx, _ in enumerate(up_block.attentions): - diffusers_attention_prefix = f"{diffusers_up_block_prefix}.attentions.{attention_idx}" - attention_prefix = f"{up_block_prefix}.attn.{attention_idx}" - diffusers_checkpoint.update( - vqvae_attention_to_diffusers_checkpoint( - checkpoint, - diffusers_attention_prefix=diffusers_attention_prefix, - attention_prefix=attention_prefix, - ) - ) - - # mid block - - # mid block attentions - - # There is a single hardcoded attention block in the middle of the VQ-diffusion decoder - diffusers_attention_prefix = "decoder.mid_block.attentions.0" - attention_prefix = "decoder.mid.attn_1" - diffusers_checkpoint.update( - vqvae_attention_to_diffusers_checkpoint( - checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, attention_prefix=attention_prefix - ) - ) - - # mid block resnets - - for diffusers_resnet_idx, resnet in enumerate(model.encoder.mid_block.resnets): - diffusers_resnet_prefix = f"decoder.mid_block.resnets.{diffusers_resnet_idx}" - - # the hardcoded prefixes to `block_` are 1 and 2 - orig_resnet_idx = diffusers_resnet_idx + 1 - # There are two hardcoded resnets in the middle of the VQ-diffusion decoder - resnet_prefix = f"decoder.mid.block_{orig_resnet_idx}" - - diffusers_checkpoint.update( - vqvae_resnet_to_diffusers_checkpoint( - resnet, checkpoint, diffusers_resnet_prefix=diffusers_resnet_prefix, resnet_prefix=resnet_prefix - ) - ) - - diffusers_checkpoint.update( - { - # conv_norm_out - "decoder.conv_norm_out.weight": checkpoint["decoder.norm_out.weight"], - "decoder.conv_norm_out.bias": checkpoint["decoder.norm_out.bias"], - # conv_out - "decoder.conv_out.weight": checkpoint["decoder.conv_out.weight"], - "decoder.conv_out.bias": checkpoint["decoder.conv_out.bias"], - } - ) - - return diffusers_checkpoint - - -def vqvae_resnet_to_diffusers_checkpoint(resnet, checkpoint, *, diffusers_resnet_prefix, resnet_prefix): - rv = { - # norm1 - f"{diffusers_resnet_prefix}.norm1.weight": checkpoint[f"{resnet_prefix}.norm1.weight"], - f"{diffusers_resnet_prefix}.norm1.bias": checkpoint[f"{resnet_prefix}.norm1.bias"], - # conv1 - f"{diffusers_resnet_prefix}.conv1.weight": checkpoint[f"{resnet_prefix}.conv1.weight"], - f"{diffusers_resnet_prefix}.conv1.bias": checkpoint[f"{resnet_prefix}.conv1.bias"], - # norm2 - f"{diffusers_resnet_prefix}.norm2.weight": checkpoint[f"{resnet_prefix}.norm2.weight"], - f"{diffusers_resnet_prefix}.norm2.bias": checkpoint[f"{resnet_prefix}.norm2.bias"], - # conv2 - f"{diffusers_resnet_prefix}.conv2.weight": checkpoint[f"{resnet_prefix}.conv2.weight"], - f"{diffusers_resnet_prefix}.conv2.bias": checkpoint[f"{resnet_prefix}.conv2.bias"], - } - - if resnet.conv_shortcut is not None: - rv.update( - { - f"{diffusers_resnet_prefix}.conv_shortcut.weight": checkpoint[f"{resnet_prefix}.nin_shortcut.weight"], - f"{diffusers_resnet_prefix}.conv_shortcut.bias": checkpoint[f"{resnet_prefix}.nin_shortcut.bias"], - } - ) - - return rv - - -def vqvae_attention_to_diffusers_checkpoint(checkpoint, *, diffusers_attention_prefix, attention_prefix): - return { - # group_norm - f"{diffusers_attention_prefix}.group_norm.weight": checkpoint[f"{attention_prefix}.norm.weight"], - f"{diffusers_attention_prefix}.group_norm.bias": checkpoint[f"{attention_prefix}.norm.bias"], - # query - f"{diffusers_attention_prefix}.query.weight": checkpoint[f"{attention_prefix}.q.weight"][:, :, 0, 0], - f"{diffusers_attention_prefix}.query.bias": checkpoint[f"{attention_prefix}.q.bias"], - # key - f"{diffusers_attention_prefix}.key.weight": checkpoint[f"{attention_prefix}.k.weight"][:, :, 0, 0], - f"{diffusers_attention_prefix}.key.bias": checkpoint[f"{attention_prefix}.k.bias"], - # value - f"{diffusers_attention_prefix}.value.weight": checkpoint[f"{attention_prefix}.v.weight"][:, :, 0, 0], - f"{diffusers_attention_prefix}.value.bias": checkpoint[f"{attention_prefix}.v.bias"], - # proj_attn - f"{diffusers_attention_prefix}.proj_attn.weight": checkpoint[f"{attention_prefix}.proj_out.weight"][ - :, :, 0, 0 - ], - f"{diffusers_attention_prefix}.proj_attn.bias": checkpoint[f"{attention_prefix}.proj_out.bias"], - } - - -# done vqvae checkpoint - -# transformer model - -PORTED_DIFFUSIONS = ["image_synthesis.modeling.transformers.diffusion_transformer.DiffusionTransformer"] -PORTED_TRANSFORMERS = ["image_synthesis.modeling.transformers.transformer_utils.Text2ImageTransformer"] -PORTED_CONTENT_EMBEDDINGS = ["image_synthesis.modeling.embeddings.dalle_mask_image_embedding.DalleMaskImageEmbedding"] - - -def transformer_model_from_original_config( - original_diffusion_config, original_transformer_config, original_content_embedding_config -): - assert ( - original_diffusion_config.target in PORTED_DIFFUSIONS - ), f"{original_diffusion_config.target} has not yet been ported to diffusers." - assert ( - original_transformer_config.target in PORTED_TRANSFORMERS - ), f"{original_transformer_config.target} has not yet been ported to diffusers." - assert ( - original_content_embedding_config.target in PORTED_CONTENT_EMBEDDINGS - ), f"{original_content_embedding_config.target} has not yet been ported to diffusers." - - original_diffusion_config = original_diffusion_config.params - original_transformer_config = original_transformer_config.params - original_content_embedding_config = original_content_embedding_config.params - - inner_dim = original_transformer_config["n_embd"] - - n_heads = original_transformer_config["n_head"] - - # VQ-Diffusion gives dimension of the multi-headed attention layers as the - # number of attention heads times the sequence length (the dimension) of a - # single head. We want to specify our attention blocks with those values - # specified separately - assert inner_dim % n_heads == 0 - d_head = inner_dim // n_heads - - depth = original_transformer_config["n_layer"] - context_dim = original_transformer_config["condition_dim"] - - num_embed = original_content_embedding_config["num_embed"] - # the number of embeddings in the transformer includes the mask embedding. - # the content embedding (the vqvae) does not include the mask embedding. - num_embed = num_embed + 1 - - height = original_transformer_config["content_spatial_size"][0] - width = original_transformer_config["content_spatial_size"][1] - - assert width == height, "width has to be equal to height" - dropout = original_transformer_config["resid_pdrop"] - num_embeds_ada_norm = original_diffusion_config["diffusion_step"] - - model_kwargs = { - "attention_bias": True, - "cross_attention_dim": context_dim, - "attention_head_dim": d_head, - "num_layers": depth, - "dropout": dropout, - "num_attention_heads": n_heads, - "num_vector_embeds": num_embed, - "num_embeds_ada_norm": num_embeds_ada_norm, - "norm_num_groups": 32, - "sample_size": width, - "activation_fn": "geglu-approximate", - } - - model = Transformer2DModel(**model_kwargs) - return model - - -# done transformer model - -# transformer checkpoint - - -def transformer_original_checkpoint_to_diffusers_checkpoint(model, checkpoint): - diffusers_checkpoint = {} - - transformer_prefix = "transformer.transformer" - - diffusers_latent_image_embedding_prefix = "latent_image_embedding" - latent_image_embedding_prefix = f"{transformer_prefix}.content_emb" - - # DalleMaskImageEmbedding - diffusers_checkpoint.update( - { - f"{diffusers_latent_image_embedding_prefix}.emb.weight": checkpoint[ - f"{latent_image_embedding_prefix}.emb.weight" - ], - f"{diffusers_latent_image_embedding_prefix}.height_emb.weight": checkpoint[ - f"{latent_image_embedding_prefix}.height_emb.weight" - ], - f"{diffusers_latent_image_embedding_prefix}.width_emb.weight": checkpoint[ - f"{latent_image_embedding_prefix}.width_emb.weight" - ], - } - ) - - # transformer blocks - for transformer_block_idx, transformer_block in enumerate(model.transformer_blocks): - diffusers_transformer_block_prefix = f"transformer_blocks.{transformer_block_idx}" - transformer_block_prefix = f"{transformer_prefix}.blocks.{transformer_block_idx}" - - # ada norm block - diffusers_ada_norm_prefix = f"{diffusers_transformer_block_prefix}.norm1" - ada_norm_prefix = f"{transformer_block_prefix}.ln1" - - diffusers_checkpoint.update( - transformer_ada_norm_to_diffusers_checkpoint( - checkpoint, diffusers_ada_norm_prefix=diffusers_ada_norm_prefix, ada_norm_prefix=ada_norm_prefix - ) - ) - - # attention block - diffusers_attention_prefix = f"{diffusers_transformer_block_prefix}.attn1" - attention_prefix = f"{transformer_block_prefix}.attn1" - - diffusers_checkpoint.update( - transformer_attention_to_diffusers_checkpoint( - checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, attention_prefix=attention_prefix - ) - ) - - # ada norm block - diffusers_ada_norm_prefix = f"{diffusers_transformer_block_prefix}.norm2" - ada_norm_prefix = f"{transformer_block_prefix}.ln1_1" - - diffusers_checkpoint.update( - transformer_ada_norm_to_diffusers_checkpoint( - checkpoint, diffusers_ada_norm_prefix=diffusers_ada_norm_prefix, ada_norm_prefix=ada_norm_prefix - ) - ) - - # attention block - diffusers_attention_prefix = f"{diffusers_transformer_block_prefix}.attn2" - attention_prefix = f"{transformer_block_prefix}.attn2" - - diffusers_checkpoint.update( - transformer_attention_to_diffusers_checkpoint( - checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, attention_prefix=attention_prefix - ) - ) - - # norm block - diffusers_norm_block_prefix = f"{diffusers_transformer_block_prefix}.norm3" - norm_block_prefix = f"{transformer_block_prefix}.ln2" - - diffusers_checkpoint.update( - { - f"{diffusers_norm_block_prefix}.weight": checkpoint[f"{norm_block_prefix}.weight"], - f"{diffusers_norm_block_prefix}.bias": checkpoint[f"{norm_block_prefix}.bias"], - } - ) - - # feedforward block - diffusers_feedforward_prefix = f"{diffusers_transformer_block_prefix}.ff" - feedforward_prefix = f"{transformer_block_prefix}.mlp" - - diffusers_checkpoint.update( - transformer_feedforward_to_diffusers_checkpoint( - checkpoint, - diffusers_feedforward_prefix=diffusers_feedforward_prefix, - feedforward_prefix=feedforward_prefix, - ) - ) - - # to logits - - diffusers_norm_out_prefix = "norm_out" - norm_out_prefix = f"{transformer_prefix}.to_logits.0" - - diffusers_checkpoint.update( - { - f"{diffusers_norm_out_prefix}.weight": checkpoint[f"{norm_out_prefix}.weight"], - f"{diffusers_norm_out_prefix}.bias": checkpoint[f"{norm_out_prefix}.bias"], - } - ) - - diffusers_out_prefix = "out" - out_prefix = f"{transformer_prefix}.to_logits.1" - - diffusers_checkpoint.update( - { - f"{diffusers_out_prefix}.weight": checkpoint[f"{out_prefix}.weight"], - f"{diffusers_out_prefix}.bias": checkpoint[f"{out_prefix}.bias"], - } - ) - - return diffusers_checkpoint - - -def transformer_ada_norm_to_diffusers_checkpoint(checkpoint, *, diffusers_ada_norm_prefix, ada_norm_prefix): - return { - f"{diffusers_ada_norm_prefix}.emb.weight": checkpoint[f"{ada_norm_prefix}.emb.weight"], - f"{diffusers_ada_norm_prefix}.linear.weight": checkpoint[f"{ada_norm_prefix}.linear.weight"], - f"{diffusers_ada_norm_prefix}.linear.bias": checkpoint[f"{ada_norm_prefix}.linear.bias"], - } - - -def transformer_attention_to_diffusers_checkpoint(checkpoint, *, diffusers_attention_prefix, attention_prefix): - return { - # key - f"{diffusers_attention_prefix}.to_k.weight": checkpoint[f"{attention_prefix}.key.weight"], - f"{diffusers_attention_prefix}.to_k.bias": checkpoint[f"{attention_prefix}.key.bias"], - # query - f"{diffusers_attention_prefix}.to_q.weight": checkpoint[f"{attention_prefix}.query.weight"], - f"{diffusers_attention_prefix}.to_q.bias": checkpoint[f"{attention_prefix}.query.bias"], - # value - f"{diffusers_attention_prefix}.to_v.weight": checkpoint[f"{attention_prefix}.value.weight"], - f"{diffusers_attention_prefix}.to_v.bias": checkpoint[f"{attention_prefix}.value.bias"], - # linear out - f"{diffusers_attention_prefix}.to_out.0.weight": checkpoint[f"{attention_prefix}.proj.weight"], - f"{diffusers_attention_prefix}.to_out.0.bias": checkpoint[f"{attention_prefix}.proj.bias"], - } - - -def transformer_feedforward_to_diffusers_checkpoint(checkpoint, *, diffusers_feedforward_prefix, feedforward_prefix): - return { - f"{diffusers_feedforward_prefix}.net.0.proj.weight": checkpoint[f"{feedforward_prefix}.0.weight"], - f"{diffusers_feedforward_prefix}.net.0.proj.bias": checkpoint[f"{feedforward_prefix}.0.bias"], - f"{diffusers_feedforward_prefix}.net.2.weight": checkpoint[f"{feedforward_prefix}.2.weight"], - f"{diffusers_feedforward_prefix}.net.2.bias": checkpoint[f"{feedforward_prefix}.2.bias"], - } - - -# done transformer checkpoint - - -def read_config_file(filename): - # The yaml file contains annotations that certain values should - # loaded as tuples. By default, OmegaConf will panic when reading - # these. Instead, we can manually read the yaml with the FullLoader and then - # construct the OmegaConf object. - with open(filename) as f: - original_config = yaml.load(f, FullLoader) - - return OmegaConf.create(original_config) - - -# We take separate arguments for the vqvae because the ITHQ vqvae config file -# is separate from the config file for the rest of the model. -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "--vqvae_checkpoint_path", - default=None, - type=str, - required=True, - help="Path to the vqvae checkpoint to convert.", - ) - - parser.add_argument( - "--vqvae_original_config_file", - default=None, - type=str, - required=True, - help="The YAML config file corresponding to the original architecture for the vqvae.", - ) - - parser.add_argument( - "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." - ) - - parser.add_argument( - "--original_config_file", - default=None, - type=str, - required=True, - help="The YAML config file corresponding to the original architecture.", - ) - - parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") - - parser.add_argument( - "--checkpoint_load_device", - default="cpu", - type=str, - required=False, - help="The device passed to `map_location` when loading checkpoints.", - ) - - # See link for how ema weights are always selected - # https://github.com/microsoft/VQ-Diffusion/blob/3c98e77f721db7c787b76304fa2c96a36c7b00af/inference_VQ_Diffusion.py#L65 - parser.add_argument( - "--no_use_ema", - action="store_true", - required=False, - help=( - "Set to not use the ema weights from the original VQ-Diffusion checkpoint. You probably do not want to set" - " it as the original VQ-Diffusion always uses the ema weights when loading models." - ), - ) - - args = parser.parse_args() - - use_ema = not args.no_use_ema - - print(f"loading checkpoints to {args.checkpoint_load_device}") - - checkpoint_map_location = torch.device(args.checkpoint_load_device) - - # vqvae_model - - print(f"loading vqvae, config: {args.vqvae_original_config_file}, checkpoint: {args.vqvae_checkpoint_path}") - - vqvae_original_config = read_config_file(args.vqvae_original_config_file).model - vqvae_checkpoint = torch.load(args.vqvae_checkpoint_path, map_location=checkpoint_map_location)["model"] - - with init_empty_weights(): - vqvae_model = vqvae_model_from_original_config(vqvae_original_config) - - vqvae_diffusers_checkpoint = vqvae_original_checkpoint_to_diffusers_checkpoint(vqvae_model, vqvae_checkpoint) - - with tempfile.NamedTemporaryFile() as vqvae_diffusers_checkpoint_file: - torch.save(vqvae_diffusers_checkpoint, vqvae_diffusers_checkpoint_file.name) - del vqvae_diffusers_checkpoint - del vqvae_checkpoint - load_checkpoint_and_dispatch(vqvae_model, vqvae_diffusers_checkpoint_file.name, device_map="auto") - - print("done loading vqvae") - - # done vqvae_model - - # transformer_model - - print( - f"loading transformer, config: {args.original_config_file}, checkpoint: {args.checkpoint_path}, use ema:" - f" {use_ema}" - ) - - original_config = read_config_file(args.original_config_file).model - - diffusion_config = original_config.params.diffusion_config - transformer_config = original_config.params.diffusion_config.params.transformer_config - content_embedding_config = original_config.params.diffusion_config.params.content_emb_config - - pre_checkpoint = torch.load(args.checkpoint_path, map_location=checkpoint_map_location) - - if use_ema: - if "ema" in pre_checkpoint: - checkpoint = {} - for k, v in pre_checkpoint["model"].items(): - checkpoint[k] = v - - for k, v in pre_checkpoint["ema"].items(): - # The ema weights are only used on the transformer. To mimic their key as if they came - # from the state_dict for the top level model, we prefix with an additional "transformer." - # See the source linked in the args.use_ema config for more information. - checkpoint[f"transformer.{k}"] = v - else: - print("attempted to load ema weights but no ema weights are specified in the loaded checkpoint.") - checkpoint = pre_checkpoint["model"] - else: - checkpoint = pre_checkpoint["model"] - - del pre_checkpoint - - with init_empty_weights(): - transformer_model = transformer_model_from_original_config( - diffusion_config, transformer_config, content_embedding_config - ) - - diffusers_transformer_checkpoint = transformer_original_checkpoint_to_diffusers_checkpoint( - transformer_model, checkpoint - ) - - # classifier free sampling embeddings interlude - - # The learned embeddings are stored on the transformer in the original VQ-diffusion. We store them on a separate - # model, so we pull them off the checkpoint before the checkpoint is deleted. - - learnable_classifier_free_sampling_embeddings = diffusion_config.params.learnable_cf - - if learnable_classifier_free_sampling_embeddings: - learned_classifier_free_sampling_embeddings_embeddings = checkpoint["transformer.empty_text_embed"] - else: - learned_classifier_free_sampling_embeddings_embeddings = None - - # done classifier free sampling embeddings interlude - - with tempfile.NamedTemporaryFile() as diffusers_transformer_checkpoint_file: - torch.save(diffusers_transformer_checkpoint, diffusers_transformer_checkpoint_file.name) - del diffusers_transformer_checkpoint - del checkpoint - load_checkpoint_and_dispatch(transformer_model, diffusers_transformer_checkpoint_file.name, device_map="auto") - - print("done loading transformer") - - # done transformer_model - - # text encoder - - print("loading CLIP text encoder") - - clip_name = "openai/clip-vit-base-patch32" - - # The original VQ-Diffusion specifies the pad value by the int used in the - # returned tokens. Each model uses `0` as the pad value. The transformers clip api - # specifies the pad value via the token before it has been tokenized. The `!` pad - # token is the same as padding with the `0` pad value. - pad_token = "!" - - tokenizer_model = CLIPTokenizer.from_pretrained(clip_name, pad_token=pad_token, device_map="auto") - - assert tokenizer_model.convert_tokens_to_ids(pad_token) == 0 - - text_encoder_model = CLIPTextModel.from_pretrained( - clip_name, - # `CLIPTextModel` does not support device_map="auto" - # device_map="auto" - ) - - print("done loading CLIP text encoder") - - # done text encoder - - # scheduler - - scheduler_model = VQDiffusionScheduler( - # the scheduler has the same number of embeddings as the transformer - num_vec_classes=transformer_model.num_vector_embeds - ) - - # done scheduler - - # learned classifier free sampling embeddings - - with init_empty_weights(): - learned_classifier_free_sampling_embeddings_model = LearnedClassifierFreeSamplingEmbeddings( - learnable_classifier_free_sampling_embeddings, - hidden_size=text_encoder_model.config.hidden_size, - length=tokenizer_model.model_max_length, - ) - - learned_classifier_free_sampling_checkpoint = { - "embeddings": learned_classifier_free_sampling_embeddings_embeddings.float() - } - - with tempfile.NamedTemporaryFile() as learned_classifier_free_sampling_checkpoint_file: - torch.save(learned_classifier_free_sampling_checkpoint, learned_classifier_free_sampling_checkpoint_file.name) - del learned_classifier_free_sampling_checkpoint - del learned_classifier_free_sampling_embeddings_embeddings - load_checkpoint_and_dispatch( - learned_classifier_free_sampling_embeddings_model, - learned_classifier_free_sampling_checkpoint_file.name, - device_map="auto", - ) - - # done learned classifier free sampling embeddings - - print(f"saving VQ diffusion model, path: {args.dump_path}") - - pipe = VQDiffusionPipeline( - vqvae=vqvae_model, - transformer=transformer_model, - tokenizer=tokenizer_model, - text_encoder=text_encoder_model, - learned_classifier_free_sampling_embeddings=learned_classifier_free_sampling_embeddings_model, - scheduler=scheduler_model, - ) - pipe.save_pretrained(args.dump_path) - - print("done writing VQ diffusion model") diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/.ipynb_checkpoints/__init__-checkpoint.py b/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/.ipynb_checkpoints/__init__-checkpoint.py deleted file mode 100644 index 421099a6d746f072222567bbe5f313da5de36206..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/.ipynb_checkpoints/__init__-checkpoint.py +++ /dev/null @@ -1,139 +0,0 @@ -from ..utils import ( - OptionalDependencyNotAvailable, - is_flax_available, - is_k_diffusion_available, - is_librosa_available, - is_note_seq_available, - is_onnx_available, - is_torch_available, - is_transformers_available, -) - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from ..utils.dummy_pt_objects import * # noqa F403 -else: - from .dance_diffusion import DanceDiffusionPipeline - from .ddim import DDIMPipeline - from .ddpm import DDPMPipeline - from .dit import DiTPipeline - from .latent_diffusion import LDMSuperResolutionPipeline - from .latent_diffusion_uncond import LDMPipeline - from .pipeline_utils import AudioPipelineOutput, DiffusionPipeline, ImagePipelineOutput - from .pndm import PNDMPipeline - from .repaint import RePaintPipeline - from .score_sde_ve import ScoreSdeVePipeline - from .stochastic_karras_ve import KarrasVePipeline - -try: - if not (is_torch_available() and is_librosa_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from ..utils.dummy_torch_and_librosa_objects import * # noqa F403 -else: - from .audio_diffusion import AudioDiffusionPipeline, Mel - -try: - if not (is_torch_available() and is_transformers_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from ..utils.dummy_torch_and_transformers_objects import * # noqa F403 -else: - from .alt_diffusion import AltDiffusionImg2ImgPipeline, AltDiffusionPipeline - from .audioldm import AudioLDMPipeline - from .latent_diffusion import LDMTextToImagePipeline - from .paint_by_example import PaintByExamplePipeline - from .semantic_stable_diffusion import SemanticStableDiffusionPipeline - from .stable_diffusion import ( - CycleDiffusionPipeline, - StableDiffusionAttendAndExcitePipeline, - StableDiffusionControlNetPipeline, - StableDiffusionDepth2ImgPipeline, - StableDiffusionImageVariationPipeline, - StableDiffusionImg2ImgPipeline, - StableDiffusionInpaintPipeline, - StableDiffusionInpaintPipelineLegacy, - StableDiffusionInstructPix2PixPipeline, - StableDiffusionLatentUpscalePipeline, - StableDiffusionModelEditingPipeline, - StableDiffusionPanoramaPipeline, - StableDiffusionPipeline, - StableDiffusionPix2PixZeroPipeline, - StableDiffusionSAGPipeline, - StableDiffusionUpscalePipeline, - StableUnCLIPImg2ImgPipeline, - StableUnCLIPPipeline, - ) - from .stable_diffusion_safe import StableDiffusionPipelineSafe - from .text_to_video_synthesis import TextToVideoSDPipeline - from .unclip import UnCLIPImageVariationPipeline, UnCLIPPipeline - from .versatile_diffusion import ( - VersatileDiffusionDualGuidedPipeline, - VersatileDiffusionImageVariationPipeline, - VersatileDiffusionPipeline, - VersatileDiffusionTextToImagePipeline, - ) - from .vq_diffusion import VQDiffusionPipeline - -try: - if not is_onnx_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from ..utils.dummy_onnx_objects import * # noqa F403 -else: - from .onnx_utils import OnnxRuntimeModel - -try: - if not (is_torch_available() and is_transformers_available() and is_onnx_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from ..utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 -else: - from .stable_diffusion import ( - OnnxStableDiffusionImg2ImgPipeline, - OnnxStableDiffusionInpaintPipeline, - OnnxStableDiffusionInpaintPipelineLegacy, - OnnxStableDiffusionPipeline, - OnnxStableDiffusionUpscalePipeline, - StableDiffusionOnnxPipeline, - ) - -try: - if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from ..utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 -else: - from .stable_diffusion import StableDiffusionKDiffusionPipeline - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from ..utils.dummy_flax_objects import * # noqa F403 -else: - from .pipeline_flax_utils import FlaxDiffusionPipeline - - -try: - if not (is_flax_available() and is_transformers_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from ..utils.dummy_flax_and_transformers_objects import * # noqa F403 -else: - from .stable_diffusion import ( - FlaxStableDiffusionControlNetPipeline, - FlaxStableDiffusionImg2ImgPipeline, - FlaxStableDiffusionInpaintPipeline, - FlaxStableDiffusionPipeline, - ) -try: - if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from ..utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 -else: - from .spectrogram_diffusion import MidiProcessor, SpectrogramDiffusionPipeline diff --git a/spaces/diacanFperku/AutoGPT/Call Of Duty Black Ops 2 Code Pre Gfx. 2021.md b/spaces/diacanFperku/AutoGPT/Call Of Duty Black Ops 2 Code Pre Gfx. 2021.md deleted file mode 100644 index 10b75a02edc5938b5a9bf301b8afab358fe6385f..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Call Of Duty Black Ops 2 Code Pre Gfx. 2021.md +++ /dev/null @@ -1,6 +0,0 @@ -

    call of duty black ops 2 code pre gfx.


    Downloadhttps://gohhs.com/2uFU4u



    - -Best mouse settings for Black Ops Cold War (Picture: Treyarch) There are really two areas ... Jan 03, 2010 · Call of Duty: Modern Warfare 2 transmits and receives network traffic on : ports ... Code Injections [Exclusion] and add the entire Call of Duty: Modern Warfare game folder to ... Set Maximum pre-rendered frames to 1. 1fdad05405
    -
    -
    -

    diff --git a/spaces/diacanFperku/AutoGPT/Macdrive 10 Standard Serial Number.md b/spaces/diacanFperku/AutoGPT/Macdrive 10 Standard Serial Number.md deleted file mode 100644 index 446592237eeda56b7ec9837f462bf1e0127d1060..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Macdrive 10 Standard Serial Number.md +++ /dev/null @@ -1,6 +0,0 @@ -

    macdrive 10 standard serial number


    Download ★★★ https://gohhs.com/2uFVoT



    - -Windows XP / Vista / Windows 7 / Windows 8 / Windows 10 / XP64 / Vista64 / Windows 7 ... At that point introduce MacDrive 10.5.0 Product key. 1fdad05405
    -
    -
    -

    diff --git a/spaces/diacanFperku/AutoGPT/PESEdit 2013 Patch 6.0 Crack LINK Free.md b/spaces/diacanFperku/AutoGPT/PESEdit 2013 Patch 6.0 Crack LINK Free.md deleted file mode 100644 index afbdb94390464b7091baf1cee66de6f5ac7878b2..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/PESEdit 2013 Patch 6.0 Crack LINK Free.md +++ /dev/null @@ -1,6 +0,0 @@ -

    PESEdit 2013 Patch 6.0 crack free


    Download Zip > https://gohhs.com/2uFUsy



    -
    -Winter Transfer 2015 · PES 2013 Update Winter Transfers For PESEdit 6.0 ... Season 1516 A crack is a program, set of instructions or patch used to remove ... 1fdad05405
    -
    -
    -

    diff --git a/spaces/diacanFperku/AutoGPT/Parks And Recreation 720p Season 1l [EXCLUSIVE].md b/spaces/diacanFperku/AutoGPT/Parks And Recreation 720p Season 1l [EXCLUSIVE].md deleted file mode 100644 index c8ca312835e047381b9ea272ee4b494773294aad..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Parks And Recreation 720p Season 1l [EXCLUSIVE].md +++ /dev/null @@ -1,10 +0,0 @@ -

    Parks And Recreation 720p Season 1l


    Download Zip >>> https://gohhs.com/2uFVqE



    - -heleen van royen self made pdf 52 Prachanda Ravana Kannada Movie Downloadinstmank copertina tetta sala Parks And Recreation 720p Season 1l . Kannada Movie Songs, Kannada Songs Download, Kannada Songs mp3 and Free Download, Kannada Songs Videos Latest Punjabi Songs Download, Online Video Songs, Kannada Songs video mp4 download, Kannada songs video download kannada song of the moment, Kannada Songs Movie Download, Kannada Mp3 Songs, Videos, Mp3s, Mp3 Songs, Mp3 Videos, Mp3, Albums, Ringtones, Music. -Ji Karthik Ghosh Kannada Movie Songs Download. -Chalchana Kannada Movie Songs New. -Songs Chalcana. -Prem Kannada Movie Songs Download. 8a78ff9644
    -
    -
    -

    diff --git a/spaces/diacanFperku/AutoGPT/Pocket Tanks Deluxe - With All Weapon Packs! Generator Online.md b/spaces/diacanFperku/AutoGPT/Pocket Tanks Deluxe - With All Weapon Packs! Generator Online.md deleted file mode 100644 index 3c14b53397c671a12fbaf9f829b1e3c3cc8a547b..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Pocket Tanks Deluxe - With All Weapon Packs! Generator Online.md +++ /dev/null @@ -1,6 +0,0 @@ -
    -

    The P-52 is a unique, rocket-powered concept aircraft, built by American scientist Dr. Smith. It is capable of lifting off from conventional runways by mounting its propulsion unit to the underside of its wing, and can ascend into the stratosphere by firing a rocket into an engine installed in the rear portion of its fuselage. The P-52 is commonly used for reconnaissance, but can also be equipped with machine guns for local defense. This can be done by attaching machine guns to its wing. The P-52 is very vulnerable to space mines, taking around 200 damage from all types of them. It can take any weapon of its time in 50 bursts, but is rather weak against fully modern weapons, requiring at least 600 damage from them. The P-52 is well-protected against small arms, taking 10 hits from any type of weapon to hit, and being completely vulnerable to small arms fire (50 shots needed to kill). The P-52 can be damaged while flying, and exploding air bombs can burn it. On the long run, it is possible to repair the P-52, but it will return to a semi-functional condition.

    -

    The P-43 is a new Soviet experimental aircraft. Made in the early '60s, it showed impressive performances in numerous tests, but it was never mass-produced. The P-43 has excellent maneuverability thanks to its unique configuration (the P-43 itself has an advantage over any other plane in the game - it cannot even be shot down by any weapon) and its massive aerodynamic efficiency. Because of its performance, the P-43 became a testbed for many Soviet experimental aircraft, including the Ka-52 and the Kamov Ka-226. The P-43 is very vulnerable to ground-based fire and missiles, taking anywhere from 200 to 1,000 damage from any weapon and 1,000-1,500 damage from missile attacks. The P-43 is very vulnerable against its own propellers, taking a maximum of 300 points of damage from the propellers, and is even more vulnerable when landing. It can take the full firepower of any of its time's weapons, with the weakest SMG taking away only 1,100 points of damage. It is also quite powerful against missiles, taking only a few hits from missiles to be destroyed.

    -

    Pocket Tanks Deluxe - With All Weapon Packs! generator online


    Download File ····· https://gohhs.com/2uFT3B



    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/digitalxingtong/Nailv-Bert-Vits2/monotonic_align/core.py b/spaces/digitalxingtong/Nailv-Bert-Vits2/monotonic_align/core.py deleted file mode 100644 index 5ff728cd74c9228346a82ec64a9829cb98ad315e..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Nailv-Bert-Vits2/monotonic_align/core.py +++ /dev/null @@ -1,36 +0,0 @@ -import numba - - -@numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]), - nopython=True, nogil=True) -def maximum_path_jit(paths, values, t_ys, t_xs): - b = paths.shape[0] - max_neg_val = -1e9 - for i in range(int(b)): - path = paths[i] - value = values[i] - t_y = t_ys[i] - t_x = t_xs[i] - - v_prev = v_cur = 0.0 - index = t_x - 1 - - for y in range(t_y): - for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - if x == y: - v_cur = max_neg_val - else: - v_cur = value[y - 1, x] - if x == 0: - if y == 0: - v_prev = 0. - else: - v_prev = max_neg_val - else: - v_prev = value[y - 1, x - 1] - value[y, x] += max(v_prev, v_cur) - - for y in range(t_y - 1, -1, -1): - path[y, index] = 1 - if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]): - index = index - 1 \ No newline at end of file diff --git a/spaces/dnth/webdemo-fridge-detection/README.md b/spaces/dnth/webdemo-fridge-detection/README.md deleted file mode 100644 index 3eb70b41ee5258d61f143d82eeeae58e66b70020..0000000000000000000000000000000000000000 --- a/spaces/dnth/webdemo-fridge-detection/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: webdemo-fridge-detection -emoji: 🍿 -colorFrom: red -colorTo: purple -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/dorkai/SINGPT-Temporary/api-example.py b/spaces/dorkai/SINGPT-Temporary/api-example.py deleted file mode 100644 index 0306b7ab8a3fa3d6f57d8474ad74d67f13557b6d..0000000000000000000000000000000000000000 --- a/spaces/dorkai/SINGPT-Temporary/api-example.py +++ /dev/null @@ -1,59 +0,0 @@ -''' - -This is an example on how to use the API for oobabooga/text-generation-webui. - -Make sure to start the web UI with the following flags: - -python server.py --model MODEL --listen --no-stream - -Optionally, you can also add the --share flag to generate a public gradio URL, -allowing you to use the API remotely. - -''' -import requests - -# Server address -server = "127.0.0.1" - -# Generation parameters -# Reference: https://huggingface.co/docs/transformers/main_classes/text_generation#transformers.GenerationConfig -params = { - 'max_new_tokens': 200, - 'do_sample': True, - 'temperature': 0.5, - 'top_p': 0.9, - 'typical_p': 1, - 'repetition_penalty': 1.05, - 'top_k': 0, - 'min_length': 0, - 'no_repeat_ngram_size': 0, - 'num_beams': 1, - 'penalty_alpha': 0, - 'length_penalty': 1, - 'early_stopping': False, -} - -# Input prompt -prompt = "What I would like to say is the following: " - -response = requests.post(f"http://{server}:7860/run/textgen", json={ - "data": [ - prompt, - params['max_new_tokens'], - params['do_sample'], - params['temperature'], - params['top_p'], - params['typical_p'], - params['repetition_penalty'], - params['top_k'], - params['min_length'], - params['no_repeat_ngram_size'], - params['num_beams'], - params['penalty_alpha'], - params['length_penalty'], - params['early_stopping'], - ] -}).json() - -reply = response["data"][0] -print(reply) diff --git a/spaces/erastorgueva-nv/NeMo-Forced-Aligner/utils/make_ass_files.py b/spaces/erastorgueva-nv/NeMo-Forced-Aligner/utils/make_ass_files.py deleted file mode 100644 index 62a28b48ee70250f4ca2fb69bab6a60ebafb7d98..0000000000000000000000000000000000000000 --- a/spaces/erastorgueva-nv/NeMo-Forced-Aligner/utils/make_ass_files.py +++ /dev/null @@ -1,522 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This file contains functions for make ASS-format subtitle files based on the generated alignment. -ASS files can be generated highlighting token-level alignments or word-level alignments. -In both cases, 'segment' boundaries will be used to determine which parts of the text will appear -at the same time. -For the token-level ASS files, the text will be highlighted token-by-token, with the timings determined -by the NFA alignments. -For the word-level ASS files, the text will be highlighted word-by-word, with the timings determined -by the NFA alignemtns. -""" - -import math -import os -import soundfile as sf - -from utils.constants import BLANK_TOKEN, SPACE_TOKEN -from utils.data_prep import Segment, Token, Word - -PLAYERRESX = 384 -PLAYERRESY = 288 -MARGINL = 10 -MARGINR = 10 -MARGINV = 20 - - -def seconds_to_ass_format(seconds_float): - seconds_float = float(seconds_float) - mm, ss_decimals = divmod(seconds_float, 60) - hh, mm = divmod(mm, 60) - - hh = str(round(hh)) - if len(hh) == 1: - hh = '0' + hh - - mm = str(round(mm)) - if len(mm) == 1: - mm = '0' + mm - - ss_decimals = f"{ss_decimals:.2f}" - if len(ss_decimals.split(".")[0]) == 1: - ss_decimals = "0" + ss_decimals - - srt_format_time = f"{hh}:{mm}:{ss_decimals}" - - return srt_format_time - - -def rgb_list_to_hex_bgr(rgb_list): - r, g, b = rgb_list - return f"{b:x}{g:x}{r:x}" - - -def make_ass_files( - utt_obj, output_dir_root, ass_file_config, -): - - # don't try to make files if utt_obj.segments_and_tokens is empty, which will happen - # in the case of the ground truth text being empty or the number of tokens being too large vs audio duration - if not utt_obj.segments_and_tokens: - return utt_obj - - if ass_file_config.resegment_text_to_fill_space: - utt_obj = resegment_utt_obj(utt_obj, ass_file_config) - - # get duration of the utterance, so we know the final timestamp of the final set of subtitles, - # which we will keep showing until the end - with sf.SoundFile(utt_obj.audio_filepath) as f: - audio_dur = f.frames / f.samplerate - - utt_obj = make_word_level_ass_file(utt_obj, output_dir_root, ass_file_config, audio_dur) - utt_obj = make_token_level_ass_file(utt_obj, output_dir_root, ass_file_config, audio_dur) - - return utt_obj - - -def _get_word_n_chars(word): - n_chars = 0 - for token in word.tokens: - if token.text != BLANK_TOKEN: - n_chars += len(token.text) - return n_chars - - -def _get_segment_n_chars(segment): - n_chars = 0 - for word_or_token in segment.words_and_tokens: - if word_or_token.text == SPACE_TOKEN: - n_chars += 1 - elif word_or_token.text != BLANK_TOKEN: - n_chars += len(word_or_token.text) - return n_chars - - -def resegment_utt_obj(utt_obj, ass_file_config): - - # get list of just all words and tokens - all_words_and_tokens = [] - for segment_or_token in utt_obj.segments_and_tokens: - if type(segment_or_token) is Segment: - all_words_and_tokens.extend(segment_or_token.words_and_tokens) - else: - all_words_and_tokens.append(segment_or_token) - - # figure out how many chars will fit into one 'slide' and thus should be the max - # size of a segment - approx_chars_per_line = (PLAYERRESX - MARGINL - MARGINR) / ( - ass_file_config.fontsize * 0.6 - ) # assume chars 0.6 as wide as they are tall - approx_lines_per_segment = (PLAYERRESY - MARGINV) / ( - ass_file_config.fontsize * 1.15 - ) # assume line spacing is 1.15 - if approx_lines_per_segment > ass_file_config.max_lines_per_segment: - approx_lines_per_segment = ass_file_config.max_lines_per_segment - - max_chars_per_segment = int(approx_chars_per_line * approx_lines_per_segment) - - new_segments_and_tokens = [] - all_words_and_tokens_pointer = 0 - for word_or_token in all_words_and_tokens: - if type(word_or_token) is Token: - new_segments_and_tokens.append(word_or_token) - all_words_and_tokens_pointer += 1 - else: - break - - new_segments_and_tokens.append(Segment()) - - while all_words_and_tokens_pointer < len(all_words_and_tokens): - word_or_token = all_words_and_tokens[all_words_and_tokens_pointer] - if type(word_or_token) is Word: - - # if this is going to be the first word in the segment, we definitely want - # to add it to the segment - if not new_segments_and_tokens[-1].words_and_tokens: - new_segments_and_tokens[-1].words_and_tokens.append(word_or_token) - - else: - # if not the first word, check what the new length of the segment will be - # if short enough - add this word to this segment; - # if too long - add to a new segment - this_word_n_chars = _get_word_n_chars(word_or_token) - segment_so_far_n_chars = _get_segment_n_chars(new_segments_and_tokens[-1]) - if this_word_n_chars + segment_so_far_n_chars < max_chars_per_segment: - new_segments_and_tokens[-1].words_and_tokens.append(word_or_token) - else: - new_segments_and_tokens.append(Segment()) - new_segments_and_tokens[-1].words_and_tokens.append(word_or_token) - - else: # i.e. word_or_token is a token - # currently this breaks the convention of tokens at the end/beginning - # of segments being listed as separate tokens in segment.word_and_tokens - # TODO: change code so we follow this convention - new_segments_and_tokens[-1].words_and_tokens.append(word_or_token) - - all_words_and_tokens_pointer += 1 - - utt_obj.segments_and_tokens = new_segments_and_tokens - - return utt_obj - - -def make_word_level_ass_file(utt_obj, output_dir_root, ass_file_config, audio_dur): - - default_style_dict = { - "Name": "Default", - "Fontname": "Arial", - "Fontsize": str(ass_file_config.fontsize), - "PrimaryColour": "&Hffffff", - "SecondaryColour": "&Hffffff", - "OutlineColour": "&H0", - "BackColour": "&H0", - "Bold": "0", - "Italic": "0", - "Underline": "0", - "StrikeOut": "0", - "ScaleX": "100", - "ScaleY": "100", - "Spacing": "0", - "Angle": "0", - "BorderStyle": "1", - "Outline": "1", - "Shadow": "0", - "Alignment": None, # will specify below - "MarginL": str(MARGINL), - "MarginR": str(MARGINR), - "MarginV": str(MARGINV), - "Encoding": "0", - } - - if ass_file_config.vertical_alignment == "top": - default_style_dict["Alignment"] = "8" # text will be 'center-justified' and in the top of the screen - elif ass_file_config.vertical_alignment == "center": - default_style_dict["Alignment"] = "5" # text will be 'center-justified' and in the middle of the screen - elif ass_file_config.vertical_alignment == "bottom": - default_style_dict["Alignment"] = "2" # text will be 'center-justified' and in the bottom of the screen - else: - raise ValueError(f"got an unexpected value for ass_file_config.vertical_alignment") - - output_dir = os.path.join(output_dir_root, "ass", "words") - os.makedirs(output_dir, exist_ok=True) - output_file = os.path.join(output_dir, f"{utt_obj.utt_id}.ass") - - already_spoken_color_code = r"{\c&H" + rgb_list_to_hex_bgr(ass_file_config.text_already_spoken_rgb) + r"&}" - being_spoken_color_code = r"{\c&H" + rgb_list_to_hex_bgr(ass_file_config.text_being_spoken_rgb) + r"&}" - not_yet_spoken_color_code = r"{\c&H" + rgb_list_to_hex_bgr(ass_file_config.text_not_yet_spoken_rgb) + r"&}" - - with open(output_file, 'w') as f: - default_style_top_line = "Format: " + ", ".join(default_style_dict.keys()) - default_style_bottom_line = "Style: " + ",".join(default_style_dict.values()) - - f.write( - ( - "[Script Info]\n" - "ScriptType: v4.00+\n" - f"PlayResX: {PLAYERRESX}\n" - f"PlayResY: {PLAYERRESY}\n" - "\n" - "[V4+ Styles]\n" - f"{default_style_top_line}\n" - f"{default_style_bottom_line}\n" - "\n" - "[Events]\n" - "Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\n\n" - ) - ) - - # write first set of subtitles for text before speech starts to be spoken - words_in_first_segment = [] - for segment_or_token in utt_obj.segments_and_tokens: - if type(segment_or_token) is Segment: - first_segment = segment_or_token - - for word_or_token in first_segment.words_and_tokens: - if type(word_or_token) is Word: - words_in_first_segment.append(word_or_token) - break - - text_before_speech = not_yet_spoken_color_code + " ".join([x.text for x in words_in_first_segment]) + r"{\r}" - subtitle_text = ( - f"Dialogue: 0,{seconds_to_ass_format(0)},{seconds_to_ass_format(words_in_first_segment[0].t_start)},Default,,0,0,0,," - + text_before_speech.rstrip() - ) - - f.write(subtitle_text + '\n') - - for segment_or_token in utt_obj.segments_and_tokens: - if type(segment_or_token) is Segment: - segment = segment_or_token - - words_in_segment = [] - for word_or_token in segment.words_and_tokens: - if type(word_or_token) is Word: - words_in_segment.append(word_or_token) - - for word_i, word in enumerate(words_in_segment): - - text_before = " ".join([x.text for x in words_in_segment[:word_i]]) - if text_before != "": - text_before += " " - text_before = already_spoken_color_code + text_before + r"{\r}" - - if word_i < len(words_in_segment) - 1: - text_after = " " + " ".join([x.text for x in words_in_segment[word_i + 1 :]]) - else: - text_after = "" - text_after = not_yet_spoken_color_code + text_after + r"{\r}" - - aligned_text = being_spoken_color_code + word.text + r"{\r}" - aligned_text_off = already_spoken_color_code + word.text + r"{\r}" - - subtitle_text = ( - f"Dialogue: 0,{seconds_to_ass_format(word.t_start)},{seconds_to_ass_format(word.t_end)},Default,,0,0,0,," - + text_before - + aligned_text - + text_after.rstrip() - ) - f.write(subtitle_text + '\n') - - # add subtitles without word-highlighting for when words are not being spoken - if word_i < len(words_in_segment) - 1: - last_word_end = float(words_in_segment[word_i].t_end) - next_word_start = float(words_in_segment[word_i + 1].t_start) - if next_word_start - last_word_end > 0.001: - subtitle_text = ( - f"Dialogue: 0,{seconds_to_ass_format(last_word_end)},{seconds_to_ass_format(next_word_start)},Default,,0,0,0,," - + text_before - + aligned_text_off - + text_after.rstrip() - ) - f.write(subtitle_text + '\n') - - # write final set of subtitles for text after speech has been spoken - words_in_final_segment = [] - for segment_or_token in utt_obj.segments_and_tokens[::-1]: - if type(segment_or_token) is Segment: - final_segment = segment_or_token - - for word_or_token in final_segment.words_and_tokens: - if type(word_or_token) is Word: - words_in_final_segment.append(word_or_token) - break - - text_after_speech = already_spoken_color_code + " ".join([x.text for x in words_in_final_segment]) + r"{\r}" - # note: for now doing some extra padding with math.ceil(audio_dur)+1) to account for the fact that the video with subtitles can become - # longer than the original audio during the MP4 creation stage. - subtitle_text = ( - f"Dialogue: 0,{seconds_to_ass_format(words_in_final_segment[-1].t_end)},{seconds_to_ass_format(math.ceil(audio_dur)+1)},Default,,0,0,0,," - + text_after_speech.rstrip() - ) - - f.write(subtitle_text + '\n') - - utt_obj.saved_output_files[f"words_level_ass_filepath"] = output_file - - return utt_obj - - -def make_token_level_ass_file(utt_obj, output_dir_root, ass_file_config, audio_dur): - - default_style_dict = { - "Name": "Default", - "Fontname": "Arial", - "Fontsize": str(ass_file_config.fontsize), - "PrimaryColour": "&Hffffff", - "SecondaryColour": "&Hffffff", - "OutlineColour": "&H0", - "BackColour": "&H0", - "Bold": "0", - "Italic": "0", - "Underline": "0", - "StrikeOut": "0", - "ScaleX": "100", - "ScaleY": "100", - "Spacing": "0", - "Angle": "0", - "BorderStyle": "1", - "Outline": "1", - "Shadow": "0", - "Alignment": None, # will specify below - "MarginL": str(MARGINL), - "MarginR": str(MARGINR), - "MarginV": str(MARGINV), - "Encoding": "0", - } - - if ass_file_config.vertical_alignment == "top": - default_style_dict["Alignment"] = "8" # text will be 'center-justified' and in the top of the screen - elif ass_file_config.vertical_alignment == "center": - default_style_dict["Alignment"] = "5" # text will be 'center-justified' and in the middle of the screen - elif ass_file_config.vertical_alignment == "bottom": - default_style_dict["Alignment"] = "2" # text will be 'center-justified' and in the bottom of the screen - else: - raise ValueError(f"got an unexpected value for ass_file_config.vertical_alignment") - - output_dir = os.path.join(output_dir_root, "ass", "tokens") - os.makedirs(output_dir, exist_ok=True) - output_file = os.path.join(output_dir, f"{utt_obj.utt_id}.ass") - - already_spoken_color_code = r"{\c&H" + rgb_list_to_hex_bgr(ass_file_config.text_already_spoken_rgb) + r"&}" - being_spoken_color_code = r"{\c&H" + rgb_list_to_hex_bgr(ass_file_config.text_being_spoken_rgb) + r"&}" - not_yet_spoken_color_code = r"{\c&H" + rgb_list_to_hex_bgr(ass_file_config.text_not_yet_spoken_rgb) + r"&}" - - with open(output_file, 'w') as f: - default_style_top_line = "Format: " + ", ".join(default_style_dict.keys()) - default_style_bottom_line = "Style: " + ",".join(default_style_dict.values()) - - f.write( - ( - "[Script Info]\n" - "ScriptType: v4.00+\n" - f"PlayResX: {PLAYERRESX}\n" - f"PlayResY: {PLAYERRESY}\n" - "ScaledBorderAndShadow: yes\n" - "\n" - "[V4+ Styles]\n" - f"{default_style_top_line}\n" - f"{default_style_bottom_line}\n" - "\n" - "[Events]\n" - "Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\n\n" - ) - ) - - # write first set of subtitles for text before speech starts to be spoken - tokens_in_first_segment = [] - for segment_or_token in utt_obj.segments_and_tokens: - if type(segment_or_token) is Segment: - for word_or_token in segment_or_token.words_and_tokens: - if type(word_or_token) is Token: - if word_or_token.text != BLANK_TOKEN: - tokens_in_first_segment.append(word_or_token) - else: - for token in word_or_token.tokens: - if token.text != BLANK_TOKEN: - tokens_in_first_segment.append(token) - - break - - for token in tokens_in_first_segment: - token.text_cased = token.text_cased.replace( - "▁", " " - ) # replace underscores used in subword tokens with spaces - token.text_cased = token.text_cased.replace(SPACE_TOKEN, " ") # space token with actual space - - text_before_speech = ( - not_yet_spoken_color_code + "".join([x.text_cased for x in tokens_in_first_segment]) + r"{\r}" - ) - subtitle_text = ( - f"Dialogue: 0,{seconds_to_ass_format(0)},{seconds_to_ass_format(tokens_in_first_segment[0].t_start)},Default,,0,0,0,," - + text_before_speech.rstrip() - ) - - f.write(subtitle_text + '\n') - - for segment_or_token in utt_obj.segments_and_tokens: - if type(segment_or_token) is Segment: - segment = segment_or_token - - tokens_in_segment = [] # make list of (non-blank) tokens - for word_or_token in segment.words_and_tokens: - if type(word_or_token) is Token: - if word_or_token.text != BLANK_TOKEN: - tokens_in_segment.append(word_or_token) - else: - for token in word_or_token.tokens: - if token.text != BLANK_TOKEN: - tokens_in_segment.append(token) - - for token in tokens_in_segment: - token.text_cased = token.text_cased.replace( - "▁", " " - ) # replace underscores used in subword tokens with spaces - token.text_cased = token.text_cased.replace(SPACE_TOKEN, " ") # space token with actual space - - for token_i, token in enumerate(tokens_in_segment): - - text_before = "".join([x.text_cased for x in tokens_in_segment[:token_i]]) - text_before = already_spoken_color_code + text_before + r"{\r}" - - if token_i < len(tokens_in_segment) - 1: - text_after = "".join([x.text_cased for x in tokens_in_segment[token_i + 1 :]]) - else: - text_after = "" - text_after = not_yet_spoken_color_code + text_after + r"{\r}" - - aligned_text = being_spoken_color_code + token.text_cased + r"{\r}" - aligned_text_off = already_spoken_color_code + token.text_cased + r"{\r}" - - subtitle_text = ( - f"Dialogue: 0,{seconds_to_ass_format(token.t_start)},{seconds_to_ass_format(token.t_end)},Default,,0,0,0,," - + text_before - + aligned_text - + text_after.rstrip() - ) - f.write(subtitle_text + '\n') - - # add subtitles without word-highlighting for when words are not being spoken - if token_i < len(tokens_in_segment) - 1: - last_token_end = float(tokens_in_segment[token_i].t_end) - next_token_start = float(tokens_in_segment[token_i + 1].t_start) - if next_token_start - last_token_end > 0.001: - subtitle_text = ( - f"Dialogue: 0,{seconds_to_ass_format(last_token_end)},{seconds_to_ass_format(next_token_start)},Default,,0,0,0,," - + text_before - + aligned_text_off - + text_after.rstrip() - ) - f.write(subtitle_text + '\n') - - # Write final set of subtitles for text after speech has been spoken. - # To do this, we need to collect 'tokens_in_final_segment' so that we know what the final line is. - tokens_in_final_segment = [] - for segment_or_token in utt_obj.segments_and_tokens[::-1]: - # Collect tokens from final segment - will 'break' so we only look at the final one. - if type(segment_or_token) is Segment: - # 'segment_or_token' is known to be Segment, which has attribute 'words_and_tokens' - for word_or_token in segment_or_token.words_and_tokens: - if type(word_or_token) is Token: - if word_or_token.text != BLANK_TOKEN: - tokens_in_final_segment.append(word_or_token) - else: - # 'word_or_token' is known to be a Word, which has attribute 'tokens' - for token in word_or_token.tokens: - if token.text != BLANK_TOKEN: - tokens_in_final_segment.append(token) - break - - for token in tokens_in_final_segment: - token.text_cased = token.text_cased.replace( - "▁", " " - ) # replace underscores used in subword tokens with spaces - token.text_cased = token.text_cased.replace(SPACE_TOKEN, " ") # space token with actual space - - text_after_speech = ( - already_spoken_color_code + "".join([x.text_cased for x in tokens_in_final_segment]) + r"{\r}" - ) - # note: for now doing some extra padding with math.ceil(audio_dur)+1) to account for the fact that the video with subtitles can become - # longer than the original audio during the MP4 creation stage. - subtitle_text = ( - f"Dialogue: 0,{seconds_to_ass_format(tokens_in_final_segment[-1].t_end)},{seconds_to_ass_format(math.ceil(audio_dur)+1)},Default,,0,0,0,," - + text_after_speech.rstrip() - ) - - f.write(subtitle_text + '\n') - - utt_obj.saved_output_files[f"tokens_level_ass_filepath"] = output_file - - return utt_obj diff --git a/spaces/facebook/MusicGen/audiocraft/utils/export_legacy.py b/spaces/facebook/MusicGen/audiocraft/utils/export_legacy.py deleted file mode 100644 index 367c3f3c9f95ae59a95edbb60b470e03cc842fbb..0000000000000000000000000000000000000000 --- a/spaces/facebook/MusicGen/audiocraft/utils/export_legacy.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Legacy functions used at the time of the first release, kept for referencd. -""" - -from pathlib import Path -import typing as tp - -from omegaconf import OmegaConf, DictConfig -import torch - -from audiocraft import __version__ - - -def _clean_lm_cfg(cfg: DictConfig): - OmegaConf.set_struct(cfg, False) - # This used to be set automatically in the LM solver, need a more robust solution - # for the future. - cfg['transformer_lm']['card'] = 2048 - n_q = 4 - stereo_cfg = getattr(cfg, 'interleave_stereo_codebooks', None) - if stereo_cfg is not None and stereo_cfg.use: - if 'downsample' in stereo_cfg: - del stereo_cfg['downsample'] - n_q = 8 - cfg['transformer_lm']['n_q'] = n_q - # Experimental params no longer supported. - bad_params = ['spectral_norm_attn_iters', 'spectral_norm_ff_iters', - 'residual_balancer_attn', 'residual_balancer_ff', 'layer_drop'] - for name in bad_params: - del cfg['transformer_lm'][name] - OmegaConf.set_struct(cfg, True) - return cfg - - -def export_encodec(checkpoint_path: tp.Union[Path, str], out_file: tp.Union[Path, str]): - pkg = torch.load(checkpoint_path, 'cpu') - new_pkg = { - 'best_state': pkg['ema']['state']['model'], - 'xp.cfg': OmegaConf.to_yaml(pkg['xp.cfg']), - # The following params were NOT exported for the first release of MusicGen. - 'version': __version__, - 'exported': True, - } - Path(out_file).parent.mkdir(exist_ok=True, parents=True) - torch.save(new_pkg, out_file) - return out_file - - -def export_lm(checkpoint_path: tp.Union[Path, str], out_file: tp.Union[Path, str]): - pkg = torch.load(checkpoint_path, 'cpu') - if pkg['fsdp_best_state']: - best_state = pkg['fsdp_best_state']['model'] - else: - best_state = pkg['best_state']['model'] - new_pkg = { - 'best_state': best_state, - 'xp.cfg': OmegaConf.to_yaml(_clean_lm_cfg(pkg['xp.cfg'])), - # The following params were NOT exported for the first release of MusicGen. - 'version': __version__, - 'exported': True, - } - Path(out_file).parent.mkdir(exist_ok=True, parents=True) - torch.save(new_pkg, out_file) - return out_file diff --git a/spaces/facebook/MusicGen/audiocraft/utils/notebook.py b/spaces/facebook/MusicGen/audiocraft/utils/notebook.py deleted file mode 100644 index 019b9d19e5bef976bedddf428fd25da42a8a9726..0000000000000000000000000000000000000000 --- a/spaces/facebook/MusicGen/audiocraft/utils/notebook.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -try: - import IPython.display as ipd # type: ignore -except ImportError: - # Note in a notebook... - pass - - -import torch - - -def display_audio(samples: torch.Tensor, sample_rate: int): - """Renders an audio player for the given audio samples. - - Args: - samples (torch.Tensor): a Tensor of decoded audio samples - with shapes [B, C, T] or [C, T] - sample_rate (int): sample rate audio should be displayed with. - """ - assert samples.dim() == 2 or samples.dim() == 3 - - samples = samples.detach().cpu() - if samples.dim() == 2: - samples = samples[None, ...] - - for audio in samples: - ipd.display(ipd.Audio(audio, rate=sample_rate)) diff --git a/spaces/facebook/MusicGen/docs/METRICS.md b/spaces/facebook/MusicGen/docs/METRICS.md deleted file mode 100644 index 506ce35db708967bb9de6edf9c46df2564f0b0fd..0000000000000000000000000000000000000000 --- a/spaces/facebook/MusicGen/docs/METRICS.md +++ /dev/null @@ -1,131 +0,0 @@ -# AudioCraft objective metrics - -In addition to training losses, AudioCraft provides a set of objective metrics -for audio synthesis and audio generation. As these metrics may require -extra dependencies and can be costly to train, they are often disabled by default. -This section provides guidance for setting up and using these metrics in -the AudioCraft training pipelines. - -## Available metrics - -### Audio synthesis quality metrics - -#### SI-SNR - -We provide an implementation of the Scale-Invariant Signal-to-Noise Ratio in PyTorch. -No specific requirement is needed for this metric. Please activate the metric at the -evaluation stage with the appropriate flag: - -**Warning:** We report the opposite of the SI-SNR, e.g. multiplied by -1. This is due to internal - details where the SI-SNR score can also be used as a training loss function, where lower - values should indicate better reconstruction. Negative values are such expected and a good sign! Those should be again multiplied by `-1` before publication :) - -```shell -dora run <...> evaluate.metrics.sisnr=true -``` - -#### ViSQOL - -We provide a Python wrapper around the ViSQOL [official implementation](https://github.com/google/visqol) -to conveniently run ViSQOL within the training pipelines. - -One must specify the path to the ViSQOL installation through the configuration in order -to enable ViSQOL computations in AudioCraft: - -```shell -# the first parameter is used to activate visqol computation while the second specify -# the path to visqol's library to be used by our python wrapper -dora run <...> evaluate.metrics.visqol=true metrics.visqol.bin= -``` - -See an example grid: [Compression with ViSQOL](../audiocraft/grids/compression/encodec_musicgen_32khz.py) - -To learn more about ViSQOL and how to build ViSQOL binary using bazel, please refer to the -instructions available in the [open source repository](https://github.com/google/visqol). - -### Audio generation metrics - -#### Frechet Audio Distance - -Similarly to ViSQOL, we use a Python wrapper around the Frechet Audio Distance -[official implementation](https://github.com/google-research/google-research/tree/master/frechet_audio_distance) -in TensorFlow. - -Note that we had to make several changes to the actual code in order to make it work. -Please refer to the [FrechetAudioDistanceMetric](../audiocraft/metrics/fad.py) class documentation -for more details. We do not plan to provide further support in obtaining a working setup for the -Frechet Audio Distance at this stage. - -```shell -# the first parameter is used to activate FAD metric computation while the second specify -# the path to FAD library to be used by our python wrapper -dora run <...> evaluate.metrics.fad=true metrics.fad.bin= -``` - -See an example grid: [Evaluation with FAD](../audiocraft/grids/musicgen/musicgen_pretrained_32khz_eval.py) - -#### Kullback-Leibler Divergence - -We provide a PyTorch implementation of the Kullback-Leibler Divergence computed over the probabilities -of the labels obtained by a state-of-the-art audio classifier. We provide our implementation of the KLD -using the [PaSST classifier](https://github.com/kkoutini/PaSST). - -In order to use the KLD metric over PaSST, you must install the PaSST library as an extra dependency: -```shell -pip install 'git+https://github.com/kkoutini/passt_hear21@0.0.19#egg=hear21passt' -``` - -Then similarly, you can use the metric activating the corresponding flag: - -```shell -# one could extend the kld metric with additional audio classifier models that can then be picked through the configuration -dora run <...> evaluate.metrics.kld=true metrics.kld.model=passt -``` - -#### Text consistency - -We provide a text-consistency metric, similarly to the MuLan Cycle Consistency from -[MusicLM](https://arxiv.org/pdf/2301.11325.pdf) or the CLAP score used in -[Make-An-Audio](https://arxiv.org/pdf/2301.12661v1.pdf). -More specifically, we provide a PyTorch implementation of a Text consistency metric -relying on a pre-trained [Contrastive Language-Audio Pretraining (CLAP)](https://github.com/LAION-AI/CLAP). - -Please install the CLAP library as an extra dependency prior to using the metric: -```shell -pip install laion_clap -``` - -Then similarly, you can use the metric activating the corresponding flag: - -```shell -# one could extend the text consistency metric with additional audio classifier models that can then be picked through the configuration -dora run ... evaluate.metrics.text_consistency=true metrics.text_consistency.model=clap -``` - -Note that the text consistency metric based on CLAP will require the CLAP checkpoint to be -provided in the configuration. - -#### Chroma cosine similarity - -Finally, as introduced in MusicGen, we provide a Chroma Cosine Similarity metric in PyTorch. -No specific requirement is needed for this metric. Please activate the metric at the -evaluation stage with the appropriate flag: - -```shell -dora run ... evaluate.metrics.chroma_cosine=true -``` - -#### Comparing against reconstructed audio - -For all the above audio generation metrics, we offer the option to compute the metric on the reconstructed audio -fed in EnCodec instead of the generated sample using the flag `.use_gt=true`. - -## Example usage - -You will find example of configuration for the different metrics introduced above in: -* The [musicgen's default solver](../config/solver/musicgen/default.yaml) for all audio generation metrics -* The [compression's default solver](../config/solver/compression/default.yaml) for all audio synthesis metrics - -Similarly, we provide different examples in our grids: -* [Evaluation with ViSQOL](../audiocraft/grids/compression/encodec_musicgen_32khz.py) -* [Evaluation with FAD and others](../audiocraft/grids/musicgen/musicgen_pretrained_32khz_eval.py) diff --git a/spaces/falterWliame/Face_Mask_Detection/AVG PC TuneUp 2020 Crack With License Key Free Download !NEW!.md b/spaces/falterWliame/Face_Mask_Detection/AVG PC TuneUp 2020 Crack With License Key Free Download !NEW!.md deleted file mode 100644 index 49bd2f1f5c409b4b3b666b0edc78d370bf976a5b..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/AVG PC TuneUp 2020 Crack With License Key Free Download !NEW!.md +++ /dev/null @@ -1,6 +0,0 @@ -

    AVG PC TuneUp 2020 Crack With License Key Free Download


    DOWNLOAD ✯✯✯ https://urlca.com/2uDcLc



    -
    -AVG Driver updater is eligible for download.you can also check out the .... AVG. PC TuneUp 2020 Crack + Product Key Free Download [Latest] . 1fdad05405
    -
    -
    -

    diff --git a/spaces/falterWliame/Face_Mask_Detection/Digital Signal Processing Sanjit Mitra 4th Edition Pdf.58.md b/spaces/falterWliame/Face_Mask_Detection/Digital Signal Processing Sanjit Mitra 4th Edition Pdf.58.md deleted file mode 100644 index bee69b2ba0a4e59326e402d8ac437f9d8c7701b3..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Digital Signal Processing Sanjit Mitra 4th Edition Pdf.58.md +++ /dev/null @@ -1,10 +0,0 @@ -

    digital signal processing sanjit mitra 4th edition pdf.58


    DOWNLOADhttps://urlca.com/2uDcvy



    -
    -This download is managed by Wrox Professional. Check the version of your software before downloading.. Sanjit Mitra 4th edition solution manual pdf Download free books from the database that cover. dl I download a whole bunch of books for free but the online registration is not. That I'm using the PDF on a Samsung. Hello, I have a question. I'm looking for a book on Signal Processing that has a chapter on the. or can someone tell me if I can download a free copy of this book online? sanjit k mitra free pdf download sanjit k mitra free pdf download sanjit k mitra 4th edition solution manual. pdf download free book sanjit k mitra 4th edition solution manual. book sanjit k mitra free pdf download sanjit k mitra 4th edition solution manual. sanjit k mitra free pdf download sanjit k mitra 4th edition solution manual. pdf download free book sanjit k mitra 4th edition solution manual.. 5th Edition book sanjit k mitra 4th edition solution manual PDF download free sanjit k mitra 4th edition solution manual. Solution manual for Sanjit Mitra 4th Edition. Download free book sanjit k mitra 4th edition solution manual. Solution book pdf free download link or read online here in PDF. Download free sanjit k mitra free pdf download sanjit k mitra 4th edition solution manual. How To Register For an Online Copy of Book Sanjit Mitra 4th Edition. Download pdf sanjit mitra 4th edition solution manual. This book is available for free download: sanjit mitra free pdf download sanjit mitra 4th edition solution manual. sanjit mitra free pdf download sanjit mitra 4th edition solution manual. pdf download free book sanjit k mitra 4th edition solution manual. Sanjit Mitra 4th Edition Solution Manual.. book sanjit k mitra free pdf download sanjit k mitra 4th edition solution manual. how to download books free online sanjit k mitra 4th edition solution manual PDF. Download book sanjit k mitra free pdf download sanjit k mitra 4th edition solution manual.Q: - -Windows 2008 R2 2 NICs, one is VLAN tagged, the other is not. How do I tell which is which? - -In Windows Server 2008 R2, I have two NICs (virtual) which are connected to two physical NICs. One is VLAN 4fefd39f24
    -
    -
    -

    diff --git a/spaces/falterWliame/Face_Mask_Detection/Ewp Ewprod Hanging Asphyxia Lisa Carele Drowned Mpeg.md b/spaces/falterWliame/Face_Mask_Detection/Ewp Ewprod Hanging Asphyxia Lisa Carele Drowned Mpeg.md deleted file mode 100644 index 149321ac7229398aad205e38deb4af3e1c3b9a38..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Ewp Ewprod Hanging Asphyxia Lisa Carele Drowned Mpeg.md +++ /dev/null @@ -1,11 +0,0 @@ - -

    ewp ewprod hanging asphyxia lisa carele drowned 40.. ewp-ewprod-hanging-asphyxia-lisa-carele-drowned-40.rar. en el cielo no hay cielo. ewp-ewprod-hanging-asphyxia-lisa-carele-drowned. inherencia explotacion de las personas.

    -

    Ewp Ewprod Hanging Asphyxia Lisa Carele Drowned Mpeg


    Download Ziphttps://urlca.com/2uDc6O



    -

    ellen ewp-ewprod-hanging-asphyxia-lisa-carele-drowned-40-2.rar. ewp-ewprod-hanging-asphyxia-lisa-carele-drowned-40-2. ewp-ewprod-hanging-asphyxia-lisa-carele-drowned. ewp-ewprod-hanging-asphyxia-lisa-carele-drowned-40.

    -

    https://gametimereviews.com/ewp-ewprod-hanging-asphyxia-lisa-carele-drowned-mpeg/. ewp - ewprod - hanging - asphyxia - lisa. name: 1318 snff ewprod com snuff siterip lisa carele drowned format: mpeg. ewprod com snuff siterip monika asphyxia slow strangling seeds of greed. size: 91.6 mb.

    -

    ewprod.com.. ewprod - asphyxia - slow - strangling seeds of greed.mpg downloadewprod ewprod.mpg. ewprod - hanging - asphyxia - on jekyll island. name: 1318 snff ewprod com snuff siterip lisa carele drowned format: mpeg. ewprod com snuff siterip monika asphyxia slow strangling seeds of greed. size: 91.6 mb.

    -

    -

    https://gametimereviews.com/ewp-ewprod-hanging-asphyxia-lisa-carele-drowned-mpeg/. ewprod.com. ewprod - ewprod - hanging - asphyxia - lisa. name: 1318 snff ewprod com snuff siterip lisa carele drowned format: mpeg. ewprod com snuff siterip monika asphyxia slow strangling seeds of greed. size: 91.6 mb.

    -

    https://gametimereviews.com/ewp-ewprod-hanging-asphyxia-lisa-carele-drowned-mpeg/. all the 128 level ewp ewprod hanging asphyxia lisa carele drowned mpeg naruto shippuden. name: 1318 snff ewprod com snuff siterip lisa carele drowned format: mpeg. ewprod com snuff siterip monika asphyxia slow strangling seeds of greed. size: 91.62 mb.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Mapinfo Professional 11.5 Full Version Crack LINK And Patch.md b/spaces/falterWliame/Face_Mask_Detection/Mapinfo Professional 11.5 Full Version Crack LINK And Patch.md deleted file mode 100644 index 10ceb1196254e7c69a6b1b8016ff63375834aabb..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Mapinfo Professional 11.5 Full Version Crack LINK And Patch.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Mapinfo professional 11.5 full version crack and patch


    Download ✓✓✓ https://urlca.com/2uDcKG



    - -Nov 21, 2016 - MapInfo Professional 12.5 Crack Full Version with serial number permits users to visualize, analyze, edit, interpret, understand and output data, 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/fatiXbelha/sd/Download Film Cars 3 Sub Indo 1080p A Must-See for All Fans of the Franchise.md b/spaces/fatiXbelha/sd/Download Film Cars 3 Sub Indo 1080p A Must-See for All Fans of the Franchise.md deleted file mode 100644 index a2c701917c26a7f6780a6de8f4a44f1ede1412c6..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Film Cars 3 Sub Indo 1080p A Must-See for All Fans of the Franchise.md +++ /dev/null @@ -1,123 +0,0 @@ - -

    How to Download Film Cars 3 Sub Indo 1080p

    -

    If you are a fan of animated movies, you might have heard of Cars 3, the third installment of the popular Cars franchise. But did you know that you can download this film in sub indo with 1080p resolution? In this article, we will explain what sub indo and 1080p are, why they are desirable, and how you can download film Cars 3 sub indo 1080p legally and safely.

    -

    download film cars 3 sub indo 1080p


    Download ===== https://urllie.com/2uNCFD



    -

    What is Cars 3?

    -

    Cars 3 is a 2017 American computer-animated sports comedy-adventure film produced by Pixar Animation Studios for Walt Disney Pictures. It is the sequel to Cars 2 (2011) and the third installment of the Cars film series. The film follows the legendary race car Lightning McQueen as he tries to prove himself to a new generation of racers with the help of a young technician named Cruz Ramirez.

    -

    Plot summary

    -

    Blindsided by a new generation of blazing-fast racers led by arrogant hotshot Jackson Storm, the legendary Lightning McQueen is suddenly pushed out of the sport he loves. To get back in the game, he will need the help of an eager young racing technician with her own plan to win, inspiration from the late Fabulous Hudson Hornet, and a few unexpected twists and turns of fate. Proving that #95 isn't through yet will test the heart of a champion on Piston Cup Racing's biggest stage!

    -

    Cast and crew

    -

    The film features the voices of Owen Wilson as Lightning McQueen, Cristela Alonzo as Cruz Ramirez, Chris Cooper as Smokey, Nathan Fillion as Sterling, Larry the Cable Guy as Mater, Armie Hammer as Jackson Storm, and many others. The film was directed by Brian Fee (in his directorial debut) and produced by Kevin Reher and Andrea Warren, from a screenplay by Kiel Murray, Bob Peterson, and Mike Rich.

    -

    Reception and awards

    -

    The film received generally positive reviews from critics, who praised its animation, story, emotional depth, and voice acting. It grossed over $383 million worldwide against a budget of $175 million. It also won several awards, including Best Animated Feature at the Hollywood Film Awards and Best Animated Film at the National Board of Review.

    -

    What is Sub Indo?

    -

    Sub Indo is short for subtitle Indonesia, which means Indonesian subtitles. Subtitles are text that appear on the screen to translate or transcribe the dialogue or narration of a film or TV show. Sub Indo is usually used by Indonesian speakers who want to watch foreign films or TV shows in their native language.

    -

    [Nonton Cars 3 Film di Disney+ Hotstar](^1^). This is a streaming service that offers the movie in HD quality with various subtitles, including Indonesian. You can watch it online or download it for offline viewing.
    -[Cars 3 — 2017 : Free Download, Borrow, and Streaming : Internet Archive](^2^). This is an archive site that hosts a copy of the movie in 720p quality with English subtitles. You can download it for free or stream it online.

    -

    Definition and examples

    -

    A subtitle Indonesia file is usually a text file that contains the timing and text of the subtitles in Indonesian. The file can have various formats, such as SRT, SSA, ASS, or SUB. The file can be downloaded from various websites or created by fansubbers (fans who create subtitles). For example, here is a sample of a subtitle Indonesia file for Cars 3:

    -
    
    -1 00:00:01,000 --> 00:00:04,000 Ketika kamu adalah mobil balap tercepat di dunia,
    - 2 00:00:04,000 --> 00:00:06,000 kamu hanya tahu satu hal.
    - 3 00:00:06,000 --> 00:00:08,000 Kamu ingin tetap menjadi yang terbaik.
    - 4 00:00:08,000 --> 00:00:10,000 Tapi ada hal-hal yang tidak bisa kamu kendalikan.
    - 5 00:00:10,000 --> 00:00:12,000 Dan kadang-kadang,
    - 6 00:00:12,000 --> 00:00:14,000 kamu harus menghadapi kenyataan.
    -
    -

    The subtitle Indonesia file can be played along with the video file using a media player that supports subtitles, such as VLC, KMPlayer, or GOM Player. The subtitles can be adjusted to match the video speed, size, font, color, and position.

    -

    Benefits and drawbacks

    -

    Some of the benefits of watching film Cars 3 sub indo are:

    -
      -
    • You can enjoy the film in your native language and understand the dialogue and jokes better.
    • -
    • You can learn new words and phrases in English and improve your language skills.
    • -
    • You can appreciate the original voice acting and sound effects of the film.
    • -
    -

    Some of the drawbacks of watching film Cars 3 sub indo are:

    -
      -
    • You might miss some of the visual details and action scenes while reading the subtitles.
    • -
    • You might find some of the subtitles inaccurate, incomplete, or out of sync with the video.
    • -
    • You might encounter some technical issues or errors when downloading or playing the subtitle file.
    • -
    -

    What is 1080p?

    -

    1080p is a term that refers to the resolution or quality of a video. Resolution is the number of pixels (tiny dots) that make up an image on a screen. The more pixels, the sharper and clearer the image. 1080p means that the video has a resolution of 1920 x 1080 pixels, which is also known as Full HD or FHD.

    -

    Definition and examples

    -

    A 1080p video file is usually a compressed file that contains the video data in a format such as MP4, MKV, AVI, or MOV. The file can have various bitrates, which is the amount of data that is transferred per second. The higher the bitrate, the higher the quality and size of the file. For example, here is a comparison of different bitrates for a 1080p video file:

    - - - - - - - -
    BitrateQualitySize (per minute)
    1 MbpsPoor7.5 MB
    5 MbpsFair37.5 MB
    10 MbpsGood75 MB
    20 MbpsExcellent150 MB
    50 MbpsPristine375 MB
    -

    A 1080p video file can be downloaded from various websites or created by rippers (people who extract video from DVDs or Blu-rays). For example, here is a sample of a 1080p video file for Cars 3:

    -
    
    -General Format : Matroska Format version : Version 4 File size : 2.18 GiB Duration : 1 h 42 min Overall bit rate : 3 041 kb/s Movie name : Cars.3.2017.1080p.BluRay.x264-[YTS.AG] Video ID : 1 Format : AVC Format/Info : Advanced Video Codec Format profile : High@L4.1 Format settings : CABAC / 4 Ref Frames Format settings, CABAC : Yes Format settings, Reference frames : 4 frames Codec ID : V_MPEG4/ISO/AVC Duration : 1 h 42 min Bit rate : 2 850 kb/s Width : 1 920 pixels Height : 808 pixels Display aspect ratio : 2.40:1 Frame rate mode : Constant Frame rate : 23.976 (24000/1001) FPS Color space : YUV Chroma subsampling : 4:2:0 Bit depth : 8 bits Scan type : Progressive Bits/(Pixel*Frame) : 0.077 Stream size : 2.04 GiB (94% Writing mode) : Yes Title : Cars.3.2017.1080p.BluRay.x264-[YTS.AG] Language : English Default : Yes Forced : No Audio ID : 2 Format : AAC LC Format/Info : Advanced Audio Codec Low Complexity Codec ID : A_AAC-2 Duration : 1 h 42 min Bit rate : 192 kb/s Channel(s) : 2 channels Channel layout : L R Sampling rate : 48.0 kHz Frame rate : 46.875 FPS (1024 SPF) Compression mode : Lossy Stream size : 141 MiB (6%) Title : Cars.3.2017.1080p.BluRay.x264-[YTS.AG] Language : English Default : Yes Forced : No 
    -

    Benefits and drawbacks

    -

    Some of the benefits of watching film Cars 3 in 1080p are:

    -
      -
    • You can enjoy the film in high definition and see the details and colors of the animation better.
    • -
    • You can experience the film in full screen and immerse yourself in the story and action.
    • -
    • You can appreciate the quality and clarity of the video and audio of the film.
    • -
    -

    Some of the drawbacks of watching film Cars 3 in 1080p are:

    -
      -
    • You might need a large storage space and a fast internet connection to download the file.
    • -
    • You might need a compatible device and a high-resolution screen to play the file.
    • -
    • You might encounter some technical issues or errors when downloading or playing the file.
    • -
    -

    How to Download Film Cars 3 Sub Indo 1080p

    -

    Now that you know what sub indo and 1080p are, you might be wondering how to download film Cars 3 sub indo 1080p. Before you do that, you should be aware of some legal and ethical issues, as well as some requirements and steps. Here are some tips and tricks to help you download film Cars 3 sub indo 1080p successfully.

    -

    Legal and ethical issues

    -

    Downloading film Cars 3 sub indo 1080p might seem like a convenient and cheap way to watch the film, but it is not without risks. Downloading film Cars 3 sub indo 1080p might violate the intellectual property rights of the creators and distributors of the film, as well as the laws of your country or region. Downloading film Cars 3 sub indo 1080p might also expose you to malware, viruses, or phishing scams that can harm your device or data. Downloading film Cars 3 sub indo 1080p might also deprive the creators and distributors of the film of their deserved revenue and recognition.

    -

    Therefore, before you download film Cars 3 sub indo 1080p, you should consider the following questions:

    -
      -
    • Is downloading film Cars 3 sub indo 1080p legal in your country or region?
    • -
    • Is downloading film Cars 3 sub indo 1080p ethical and respectful to the creators and distributors of the film?
    • -
    • Is downloading film Cars 3 sub indo 1080p safe and secure for your device and data?
    • -
    • Is downloading film Cars 3 sub indo 1080p worth the time and effort?
    • -
    -

    If you are unsure about any of these questions, you should consult a legal expert or a trusted source before you proceed. Alternatively, you can opt for other ways to watch film Cars 3 sub indo 1080p, such as renting or buying a DVD or Blu-ray, streaming it online from a licensed platform, or watching it in a cinema.

    -

    Requirements and steps

    -

    If you decide to download film Cars 3 sub indo 1080p, you will need some requirements and steps to do so. Here are some of them:

    -
      -
    1. A device that can access the internet and play video files, such as a computer, laptop, tablet, or smartphone.
    2. -
    3. A media player that supports subtitles and various video formats, such as VLC, KMPlayer, or GOM Player.
    4. -
    5. A reliable internet connection that can handle large file downloads.
    6. -
    7. A website that offers film Cars 3 sub indo 1080p for download, such as YTS.AG, Ganool.EE, or LayarKaca21.
    8. -
    9. A subtitle Indonesia file for film Cars 3 that matches the video file, such as Subscene.com, OpenSubtitles.org, or SubtitleSeeker.com.
    10. -
    -

    The steps to download film Cars 3 sub indo 108 0p are as follows:

    -
      -
    1. Go to the website that offers film Cars 3 sub indo 1080p for download and search for the film title. You might need to create an account or verify your identity to access the download link.
    2. -
    3. Select the video file that suits your preferences and click on the download button. You might need to use a torrent client or a download manager to download the file.
    4. -
    5. Go to the website that offers subtitle Indonesia file for film Cars 3 and search for the film title. Make sure that the subtitle file matches the video file in terms of format, duration, and framerate.
    6. -
    7. Select the subtitle file that suits your preferences and click on the download button. You might need to unzip or extract the file if it is compressed.
    8. -
    9. Open the video file with your media player and load the subtitle file from the menu or settings. You might need to adjust the subtitle timing, size, font, color, and position to match the video.
    10. -
    11. Enjoy watching film Cars 3 sub indo 1080p on your device.
    12. -
    -

    Tips and tricks

    -

    Here are some tips and tricks to enhance your experience of watching film Cars 3 sub indo 1080p:

    -
      -
    • Choose a reputable and secure website to download film Cars 3 sub indo 1080p and avoid clicking on suspicious links or ads that might contain malware or viruses.
    • -
    • Choose a high-quality video file with a reasonable bitrate and size to ensure a smooth and clear playback of film Cars 3 sub indo 1080p.
    • -
    • Choose a subtitle file that is well-translated and synchronized with the video file to avoid confusion and frustration while watching film Cars 3 sub indo 1080p.
    • -
    • Use a good pair of headphones or speakers to enjoy the sound effects and music of film Cars 3 sub indo 1080p.
    • -
    • Watch film Cars 3 sub indo 1080p with your friends or family and share your thoughts and feelings about the film.
    • -
    -

    Conclusion

    -

    In conclusion, downloading film Cars 3 sub indo 1080p is a way to watch this animated film in your native language and in high definition. However, you should be aware of the legal and ethical issues, as well as the requirements and steps involved in doing so. You should also follow some tips and tricks to enhance your experience of watching film Cars 3 sub indo 1080p. We hope that this article has helped you understand how to download film Cars 3 sub indo 1080p and enjoy this film.

    -

    FAQs

    -

    Here are some frequently asked questions about downloading film Cars 3 sub indo 1080p:

    -

    Q: Is downloading film Cars 3 sub indo 1080p illegal?

    -

    A: Downloading film Cars 3 sub indo 1080p might be illegal depending on the laws of your country or region. You should check with a legal expert or a trusted source before you download film Cars 3 sub indo 1080p.

    -

    Q: Is downloading film Cars 3 sub indo 1080p ethical?

    -

    A: Downloading film Cars 3 sub indo 1080p might be unethical depending on your personal values and beliefs. You should consider the impact of your actions on the creators and distributors of the film before you download film Cars 3 sub indo 1080p.

    -

    Q: Is downloading film Cars 3 sub indo 1080p safe?

    -

    A: Downloading film Cars 3 sub indo 1080p might be unsafe depending on the source and quality of the files. You should use a reputable and secure website to download film Cars 3 sub indo 1080p and avoid clicking on suspicious links or ads that might contain malware or viruses. You should also scan the files with an antivirus software before you open them.

    -

    Q: Is downloading film Cars 3 sub indo 1080p worth it?

    -

    A: Downloading film Cars 3 sub indo 1080p might be worth it depending on your preferences and expectations. You should weigh the pros and cons of downloading film Cars 3 sub indo 1080p and compare it with other options to watch the film. You should also consider the value and quality of the film itself.

    -

    Q: Where can I download film Cars 3 sub indo 1080p?

    -

    A: There are many websites that offer film Cars 3 sub indo 1080p for download, such as YTS.AG, Ganool.EE, or LayarKaca21. However, you should be careful and selective when choosing a website to download film Cars 3 sub indo 1080p, as some of them might be illegal, unethical, or unsafe.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download My Talking Tom 2 and Enjoy Mini Games and Puzzles.md b/spaces/fatiXbelha/sd/Download My Talking Tom 2 and Enjoy Mini Games and Puzzles.md deleted file mode 100644 index 47fd0ef57bf7b0d143609bafb16369bcbe1a409c..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download My Talking Tom 2 and Enjoy Mini Games and Puzzles.md +++ /dev/null @@ -1,150 +0,0 @@ -
    -

    My Talking Tom 2: A Fun and Engaging Virtual Pet Game

    -

    Do you love cats? Do you wish you could have a cute and cuddly cat that talks to you, plays with you, and responds to your actions? If you answered yes to any of these questions, then you should definitely check out My Talking Tom 2, a virtual pet game that will keep you entertained for hours. In this game, you can adopt a baby Tom, a funny cat that can repeat what you say, make funny noises, and express his emotions. You can also take care of him by feeding him, bathing him, taking him to the toilet, curing his boo-boos, dressing him up, decorating his house, flying to different locations, playing mini-games, and collecting memories. In this article, we will tell you everything you need to know about this game, including what it is, how to play it, why you should play it, some tips and tricks for playing it, and some reviews from users and critics.

    -

    my talking tom 2 download


    DOWNLOADhttps://urllie.com/2uNFGx



    -

    What is My Talking Tom 2?

    -

    My Talking Tom 2 is a game where you can have your own virtual pet cat that you can interact with in various ways. It is the sequel to the popular My Talking Tom game that was released in 2013 by Outfit7 Limited, a company that specializes in creating games featuring animated characters. The game is available on Android and iOS devices for free, with optional in-app purchases.

    -

    The sequel to the popular My Talking Tom game

    -

    If you have played My Talking Tom before, you will find many familiar features in My Talking Tom 2. You can still talk to Tom and he will repeat what you say in a funny voice. You can still pet him, poke him, tickle him, or annoy him. You can still watch videos of other talking characters on YouTube. You can still play mini-games with him and earn coins. You can still customize his appearance and his house.

    -

    A virtual pet simulator with a cute and cuddly talking cat

    -

    If you are new to My Talking Tom 2, you will be amazed by how realistic and adorable Tom is. He has a range of emotions that are shown by his facial expressions and reactions. He can be happy, sad, angry, bored, hungry, sleepy, sick, or injured. He can also get dirty or have a booger hanging from his nose. He needs your attention and care to stay healthy and happy.

    -

    * my talking tom 2 download for pc
    -* my talking tom 2 download apk
    -* my talking tom 2 download free
    -* my talking tom 2 download mod apk
    -* my talking tom 2 download app store
    -* my talking tom 2 download play store
    -* my talking tom 2 download ios
    -* my talking tom 2 download android
    -* my talking tom 2 download windows 10
    -* my talking tom 2 download latest version
    -* my talking tom 2 download offline
    -* my talking tom 2 download noxplayer
    -* my talking tom 2 download hack
    -* my talking tom 2 download uptodown
    -* my talking tom 2 download game
    -* my talking tom 2 download online
    -* my talking tom 2 download mac
    -* my talking tom 2 download bluestacks
    -* my talking tom 2 download laptop
    -* my talking tom 2 download unlimited money
    -* my talking tom 2 download without wifi
    -* my talking tom 2 download google play
    -* my talking tom 2 download update
    -* my talking tom 2 download new version
    -* my talking tom 2 download for tablet
    -* my talking tom 2 download for chromebook
    -* my talking tom 2 download for kindle fire
    -* my talking tom 2 download for pc windows 7
    -* my talking tom 2 download for pc windows 8
    -* my talking tom 2 download for pc windows xp
    -* my talking tom 2 download full version
    -* my talking tom 2 download from aptoide
    -* my talking tom 2 download from softonic
    -* my talking tom 2 download from mob.org
    -* my talking tom 2 download from apkpure
    -* my talking tom 2 download in jio phone
    -* my talking tom 2 download in laptop
    -* my talking tom 2 download in pc free
    -* my talking tom 2 download in hindi
    -* my talking tom 2 download in tamil

    -

    A game with many features and activities to keep Tom happy and healthy

    -

    is a game that offers many features and activities to keep Tom happy and healthy. You can do the following things with Tom:

    -

    How to Play My Talking Tom 2?

    -

    Playing My Talking Tom 2 is easy and fun. You just need to tap on the screen to interact with Tom and his surroundings. You can also swipe, drag, or tilt your device to perform different actions. Here are some of the things you can do with Tom:

    -

    Interact with Tom in different ways

    -

    You can talk to Tom and he will repeat what you say in a funny voice. You can also make him laugh, sneeze, fart, burp, or hiccup by tapping on his mouth, nose, belly, or butt. You can also pet him, poke him, tickle him, or annoy him by tapping on different parts of his body. You can also play with his toys, such as a ball, a guitar, a skateboard, or a drone. You can also take pictures of him and share them with your friends.

    -

    Feed Tom, bathe him, take him to the toilet, and cure his boo-boos

    -

    You can feed Tom by dragging food items from the fridge to his mouth. You can choose from different types of food, such as fruits, vegetables, meat, fish, sweets, or drinks. Some food items will make him happy, while others will make him sick. You can also buy more food items from the shop with coins. You can bathe Tom by dragging soap bubbles from the bathtub to his body. You can choose from different types of soap, such as shampoo, shower gel, bubble bath, or slime. Some soap will make him clean, while others will make him dirty. You can also buy more soap from the shop with coins. You can take Tom to the toilet by tapping on the toilet icon. You can choose from different types of toilet paper, such as plain, patterned, scented, or spicy. Some toilet paper will make him comfortable, while others will make him uncomfortable. You can also buy more toilet paper from the shop with coins. You can cure Tom's boo-boos by tapping on the medicine cabinet icon. You can choose from different types of remedies, such as bandages, ice packs, syringes, or pills. Some remedies will heal him quickly, while others will make him worse. You can also buy more remedies from the shop with coins.

    -

    Play mini-games and puzzles with Tom and earn coins and rewards

    -

    You can play mini-games and puzzles with Tom by tapping on the game console icon. You can choose from different types of games, such as action, arcade, puzzle, or strategy. Some games will test your skills and reflexes, while others will challenge your logic and memory. You can also buy more games from the shop with coins. You can earn coins and rewards by playing games and completing puzzles. Coins can be used to buy more items from the shop. Rewards can be used to unlock new features and locations.

    -

    Customize Tom's appearance and his house with various outfits and furniture

    -

    You can customize Tom's appearance by tapping on the wardrobe icon. You can choose from different types of outfits, such as hats, glasses, shirts, pants, shoes, or accessories. Some outfits will make him look cool , while others will make him look funny. You can also buy more outfits from the shop with coins. You can customize Tom's house by tapping on the furniture icon. You can choose from different types of furniture, such as beds, sofas, tables, chairs, lamps, or paintings. Some furniture will make his house look cozy, while others will make it look crazy. You can also buy more furniture from the shop with coins.

    -

    Fly to different locations with Tom's plane and collect souvenirs

    -

    You can fly to different locations with Tom's plane by tapping on the plane icon. You can choose from different types of locations, such as cities, islands, mountains, or deserts. Some locations will have beautiful scenery, while others will have hidden surprises. You can also buy more locations from the shop with coins. You can collect souvenirs from each location by tapping on the camera icon. You can choose from different types of souvenirs, such as postcards, magnets, stickers, or statues. Some souvenirs will have interesting facts, while others will have funny jokes. You can also buy more souvenirs from the shop with coins.

    -

    Why You Should Play My Talking Tom 2?

    -

    My Talking Tom 2 is a game that has many benefits for you and your family. Here are some of the reasons why you should play this game:

    -

    It's free, family-friendly, and fun

    -

    My Talking Tom 2 is a game that you can download and play for free on your Android or iOS device. You don't need to pay anything to enjoy this game, unless you want to buy some extra items with real money. The game is also family-friendly and suitable for all ages. You can play this game with your kids, your parents, your friends, or anyone else who loves cats and games. The game is also fun and addictive. You will never get bored of playing this game, as there are always new things to do and discover with Tom.

    -

    It's entertaining, engaging, and educational

    -

    My Talking Tom 2 is a game that will keep you entertained for hours. You will have a lot of fun interacting with Tom and his environment. You will also be engaged by the challenges and rewards that the game offers. You will want to play more and more to level up Tom and unlock new features and locations. The game is also educational and can help you improve your skills and knowledge. You can learn new words and phrases by talking to Tom and listening to him repeat them. You can also learn new facts and jokes by collecting souvenirs from different locations.

    -

    It's creative, colorful, and charming

    -

    My Talking Tom 2 is a game that will stimulate your creativity and imagination. You can create your own unique Tom by choosing his outfits and furniture. You can also create your own stories and adventures by flying to different locations with Tom's plane. The game is also colorful and charming in its graphics and sound effects. You will love the bright and vivid colors that the game uses to depict Tom and his world. You will also love the cute and funny sounds that Tom makes when he talks or reacts to your actions.

    -

    Tips and Tricks for Playing My Talking Tom 2

    -

    If you want to play My Talking Tom 2 like a pro, you should follow these tips and tricks:

    -

    Keep Tom happy by playing with him regularly

    -

    Tom's happiness level is shown by the smiley face icon at the top of the screen. If Tom is happy, he will be more energetic and playful. If Tom is unhappy, he will be more tired and grumpy. To keep Tom happy, you should play with him regularly by tapping on him or his toys. You should also avoid doing things that annoy him, such as poking him too much or leaving him alone for too long.

    -

    Buy food that gives you more value for your money

    -

    Tom's hunger level is shown by the fork and knife icon at the top of the screen. If Tom is hungry, he will be more weak and sickly. If Tom is full, he will be more strong and healthy. To feed Tom, you should buy food that gives you more value for your money. For example, a banana costs 5 coins but fills up 10% of Tom's hunger level, while a cake costs 10 coins but fills up only 5% of Tom's hunger level.

    -

    Experiment with different soaps and remedies for Tom's hygiene and health

    -

    Tom's hygiene level is shown by the soap bubble icon at the top of the screen. If Tom is dirty, he will be more prone to infections and diseases. If Tom is clean, he will be more resistant to germs and viruses. To bathe Tom, you should experiment with different soaps and see how they affect his hygiene level. For example, a shampoo soap will make him clean and shiny, while a slime soap will make him dirty and slimy. Tom's health level is shown by the heart icon at the top of the screen. If Tom is sick or injured, he will be more sad and in pain. If Tom is healthy, he will be more happy and comfortable. To cure Tom's boo-boos, you should experiment with different remedies and see how they affect his health level. For example, a bandage remedy will heal his cuts and bruises, while a syringe remedy will make him dizzy and nauseous.

    -

    Level up Tom to unlock new features and celebrate his birthday

    -

    Tom's level is shown by the star icon at the top of the screen. As you play with Tom and take care of him, you will earn stars that will help you level up Tom. Each time you level up Tom, you will unlock new features and items for him, such as new toys, new games, new outfits, new furniture, or new locations. You will also celebrate his birthday every 10 levels and get a special gift from him.

    -

    Spend your stars wisely on items that you like

    -

    Stars are the currency that you can use to buy items from the shop. You can earn stars by playing games, completing puzzles, flying to locations, or leveling up Tom. You can also buy stars with real money if you want to. You can spend your stars on items that you like or need for Tom, such as food, soap, toilet paper, remedies, outfits, furniture, games, locations, or souvenirs. However, you should spend your stars wisely and not waste them on items that you don't like or use.

    -

    Reviews of My Talking Tom 2

    -

    My Talking Tom 2 is a game that has received many reviews from users and critics. Here are some of the reviews that the game has received:

    -

    Positive reviews from users and critics

    -

    Many users and critics have praised My Talking Tom 2 for being a fun and engaging game that offers many features and activities for players of all ages. Some of the positive reviews are:

    - - - - - - - - - - - - - - - - - - - - - -
    User/CriticReview
    A Google user"This game is awesome! I love how you can do so many things with Tom and his friends. The graphics are amazing and the sounds are hilarious. I play this game every day and I never get bored."
    A App Store user"This game is so cute and addictive! I love how Tom grows up and changes his appearance. The mini-games are fun and challenging. The locations are beautiful and interesting. I recommend this game to everyone who loves cats."
    A reviewer from Android Authority"My Talking Tom 2 is a game that improves on its predecessor in every way. It has more content, more customization, more interactivity, and more fun. It is a game that appeals to both kids and adults alike."
    A reviewer from Pocket Gamer"My Talking Tom 2 is a game that combines the best elements of virtual pet simulators, casual games, and social games. It is a game that offers a lot of variety, creativity, and humor."
    -

    Negative reviews from users and critics

    -

    Some users and critics have criticized My Talking Tom 2 for being a repetitive and boring game that has too many ads and in-app purchases. Some of the negative reviews are:

    - - - - - - - - - - - - - - - - - - - - - -
    User/CriticReview
    A Google user"This game is boring! It's the same thing over and over again. You just feed Tom, bathe him, play with him, etc. There's nothing new or exciting about it."
    A App Store user"This game is annoying! It has too many ads that pop up every few seconds. It also has too many in-app purchases that make you spend real money to get more items."
    A reviewer from TechRadar"My Talking Tom 2 is a game that fails to innovate or impress. It is a game that relies on its cute and funny character to attract players, but it lacks depth, challenge, and originality. It is a game that is more suitable for younger audiences who don't mind the simplicity and repetition."
    A reviewer from IGN"My Talking Tom 2 is a game that is a disappointment compared to its predecessor. It is a game that has fewer features, less interactivity, and more bugs. It is a game that feels like a cash grab rather than a quality product."
    -

    Conclusion

    -

    My Talking Tom 2 is a game that can be enjoyed by anyone who loves cats and games. It is a game that lets you have your own virtual pet cat that you can talk to, play with, and take care of. It is a game that has many features and activities to keep you entertained for hours. It is a game that is free, family-friendly, fun, entertaining, engaging, educational, creative, colorful, and charming. However, it is also a game that can be repetitive, boring, annoying, simplistic, unoriginal, and disappointing. It is a game that has too many ads and in-app purchases that can ruin your experience. It is a game that may not appeal to everyone's taste and preference. Ultimately, it is up to you to decide whether you want to play this game or not. If you are interested in trying it out, you can download it from the Google Play Store or the App Store for free.

    -

    FAQs

    -

    Here are some of the frequently asked questions about My Talking Tom 2:

    -

    Q: How can I get more coins and stars in the game?

    -

    A: You can get more coins and stars by playing mini-games, completing puzzles, flying to locations, leveling up Tom, or watching ads. You can also buy more coins and stars with real money if you want to.

    -

    Q: How can I get rid of the ads in the game?

    -

    A: You can get rid of the ads in the game by turning off your internet connection or by buying the ad-free version of the game with real money.

    -

    Q: How can I save my progress in the game?

    -

    A: You can save your progress in the game by connecting it to your Google Play or Game Center account. You can also sync your progress across multiple devices by using the same account.

    -

    Q: How can I play with other players in the game?

    -

    A: You can play with other players in the game by connecting it to your Facebook account. You can then visit your friends' houses, send them gifts, or chat with them.

    -

    Q: How can I contact the developers of the game?

    -

    A: You can contact the developers of the game by sending them an email at support@outfit7.com or by visiting their website at https://outfit7.com/.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Rune Games and Voice Chat APK and Support Indie Game Developers.md b/spaces/fatiXbelha/sd/Download Rune Games and Voice Chat APK and Support Indie Game Developers.md deleted file mode 100644 index aa399abbc4761426c13285d11daa79b52aea8960..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Rune Games and Voice Chat APK and Support Indie Game Developers.md +++ /dev/null @@ -1,154 +0,0 @@ - -

    How to Download Rune APK: A Guide for Android Users

    -

    If you are looking for a new way to play amazing games and chat with your friends, you might want to try Rune APK. Rune is an app that lets you find and play fun games with your friends, while also enjoying voice chat and social features. In this article, we will show you what Rune APK is, what are its features and benefits, and how to download and install it on your Android device. Let's get started!

    -

    What is Rune APK?

    -

    Rune APK is an app that allows you to play games and chat with your friends on your Android device. It is not available on the Google Play Store, so you need to download it from a third-party source. Rune APK is developed by Rune AI, a company that aims to create a social gaming platform for everyone.

    -

    download rune apk


    Download Zip →→→ https://urllie.com/2uNALH



    -

    Features of Rune APK

    -

    Some of the features of Rune APK are:

    -
      -
    • You can find and play amazing games with your friends, such as Among Us, Brawl Stars, Call of Duty Mobile, Fortnite, Minecraft, PUBG Mobile, Roblox, and more.
    • -
    • You can chat with other players using voice chat, text chat, or emojis.
    • -
    • You can create or join groups based on your interests, preferences, or game genres.
    • -
    • You can customize your profile with your name, photo, bio, and status.
    • -
    • You can adjust your settings such as notifications, sound effects, microphone volume, and language.
    • -
    -

    Benefits of Rune APK

    -

    Some of the benefits of Rune APK are:

    -
      -
    • You can enjoy a seamless gaming experience without any lag or interruption.
    • -
    • You can make new friends and socialize with people who share your passion for gaming.
    • -
    • You can discover new games and genres that you might not have tried before.
    • -
    • You can improve your communication and teamwork skills by playing with others.
    • -
    • You can have fun and relax after a long day.
    • -
    -

    How to Download and Install Rune APK

    -

    Downloading and installing Rune APK is easy and fast. Just follow these simple steps:

    -

    download rune games and voice chat apk
    -how to download rune apk on android
    -download rune apk latest version
    -download rune apk for pc
    -download rune apk mod
    -download rune apk pure
    -download rune apk free
    -download rune apk offline
    -download rune apk no ads
    -download rune apk unlimited money
    -download rune apk from google play
    -download rune apk for ios
    -download rune apk old version
    -download rune apk hack
    -download rune apk full version
    -download rune apk without root
    -download rune apk mirror
    -download rune apk obb
    -download rune apk file
    -download rune apk cracked
    -download rune apk pro
    -download rune apk premium
    -download rune apk update
    -download rune apk beta
    -download rune apk 2023
    -download rune apk android 11
    -download rune apk android 10
    -download rune apk android 9
    -download rune apk android 8
    -download rune apk android 7
    -download rune apk android 6
    -download rune apk android 5
    -download rune apk android 4.4.2
    -download rune apk for samsung galaxy s21 ultra
    -download rune apk for samsung galaxy s20 fe
    -download rune apk for samsung galaxy note 20 ultra
    -download rune apk for samsung galaxy a52 5g
    -download rune apk for samsung galaxy a72 5g
    -download rune apk for samsung galaxy m51
    -download rune apk for samsung galaxy m31s
    -download rune apk for oneplus 9 pro
    -download rune apk for oneplus 9r
    -download rune apk for oneplus nord ce 5g
    -download rune apk for oneplus nord n10 5g
    -download rune apk for oneplus nord n100

    -

    Step 1: Enable Unknown Sources

    -

    Since Rune APK is not available on the Google Play Store, you need to enable unknown sources on your device. This will allow you to install apps from sources other than the official store. To do this:

    -
      -
    1. Go to your device's Settings.
    2. -
    3. Tap on Security or Privacy.
    4. -
    5. Find the option that says Unknown Sources or Install Unknown Apps.
    6. -
    7. Toggle it on or allow it for the browser or file manager that you will use to download the APK file.
    8. -
    -

    Step 2: Download the APK File

    -

    Next, you need to download the APK file from a reliable source. You can use this link to download it directly from the official website of Rune AI. Alternatively, you can search for other sources online, but make sure they are safe and trustworthy. To download the APK file:

    -
      -
    1. Open your browser or file manager and go to the link or source that you have chosen.
    2. -
    3. Tap on the Download button or icon.
    4. -
    5. Wait for the download to complete.
    6. -
    -

    Step 3: Install the APK File

    -

    Once you have downloaded the APK file, you need to install it on your device. To do this:

    -
      -
    1. Locate the downloaded file in your Downloads folder or notification bar.Tap on the file and select Install.
    2. -
    3. Wait for the installation to finish.
    4. -
    -

    Step 4: Launch the App and Enjoy

    -

    Finally, you can launch the app and start playing games and chatting with your friends. To do this:

    -
      -
    1. Go to your device's home screen or app drawer.
    2. -
    3. Find the Rune icon and tap on it.
    4. -
    5. Sign up or log in with your email, phone number, or Facebook account.
    6. -
    7. Explore the app and have fun!
    8. -
    -

    How to Use Rune APK

    -

    Using Rune APK is simple and intuitive. Here are some tips on how to use it:

    -

    How to Find and Play Games with Friends

    -

    To find and play games with your friends, you can either join an existing group or create your own. To join a group:

    -
      -
    1. Tap on the Games tab at the bottom of the screen.
    2. -
    3. Browse through the list of games and genres that you are interested in.
    4. -
    5. Tap on the game that you want to play.
    6. -
    7. Tap on the Join button to join a group of players who are playing that game.
    8. -
    9. Wait for the group leader to start the game and enjoy!
    10. -
    -

    To create a group:

    -
      -
    1. Tap on the Games tab at the bottom of the screen.
    2. -
    3. Browse through the list of games and genres that you are interested in.
    4. -
    5. Tap on the game that you want to play.
    6. -
    7. Tap on the Create button to create your own group of players for that game.
    8. -
    9. Invite your friends or wait for other players to join your group.
    10. -
    11. Start the game and enjoy!
    12. -
    -

    How to Chat with Other Players

    -

    To chat with other players, you can use voice chat, text chat, or emojis. To use voice chat:

    -
      -
    1. Tap on the Voice Chat button at the top right corner of the screen.
    2. -
    3. Select the microphone icon to turn on your microphone.
    4. -
    5. Speak into your device's microphone and communicate with other players.
    6. -
    7. Select the speaker icon to adjust the volume of other players' voices.
    8. -
    9. Select the mute icon to mute or unmute yourself or other players.
    10. -
    -

    To use text chat:

    -
      -
    1. Tap on the Text Chat button at the top right corner of the screen.
    2. -
    3. Type your message in the text box at the bottom of the screen.
    4. -
    5. Tap on the send icon to send your message.
    6. -
    7. Read and reply to other players' messages.
    8. -
    -

    To use emojis:

    -
      -
    1. Tap on the Emoji button at the top right corner of the screen.
    2. -
    3. Select an emoji from the list that expresses your emotion or reaction.
    4. -
    5. Tap on the emoji to send it to other players.
    6. -
    -

    How to Customize Your Profile and Settings

    -

    To customize your profile and settings, you can tap on the Profile tab at the bottom of the screen. Here, you can:

    -
      < -- Edit your name, photo, bio, and status by tapping on them. - View your stats, achievements, badges, and friends by tapping on them. - Change your notifications, sound effects, microphone volume, and language by tapping on Settings. - Log out or delete your account by tapping on Logout or Delete Account.
    -

    Conclusion

    -

    Rune APK is a great app for Android users who love gaming and socializing. It allows you to find and play amazing games with your friends, while also enjoying voice chat and social features. It is easy to download and install, and simple to use. If you are looking for a new way to have fun and relax, you should give Rune APK a try!

    -

    FAQs

    -

    Here are some frequently asked questions about Rune APK:

    -
      < -- Q: Is Rune APK safe and legal? - A: Yes, Rune APK is safe and legal. It does not contain any viruses or malware, and it does not violate any laws or regulations. However, you should always download it from a reliable source and enable unknown sources on your device. - Q: What are the requirements for Rune APK? - A: Rune APK requires an Android device with version 5.0 or higher, a stable internet connection, a microphone, and enough storage space. - Q: How much does Rune APK cost? - A: Rune APK is free to download and - A: Rune APK is free to download and use. However, it may contain ads or in-app purchases that require real money. - Q: Can I play Rune APK on PC or iOS devices? - A: No, Rune APK is only compatible with Android devices. However, you can use an Android emulator on your PC or Mac to run Rune APK. For iOS devices, you can try other similar apps that are available on the App Store. - Q: How can I contact the developers of Rune APK? - A: You can contact the developers of Rune APK by visiting their website, sending them an email at support@rune.ai, or following them on social media platforms such as Facebook, Twitter, Instagram, and YouTube.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git "a/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" "b/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" deleted file mode 100644 index 3da831fd07e361a532777c83bb02cff265b94abd..0000000000000000000000000000000000000000 --- "a/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" +++ /dev/null @@ -1,194 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_execption, write_results_to_file, get_conf -import re, requests, unicodedata, os -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -def download_arxiv_(url_pdf): - if 'arxiv.org' not in url_pdf: - if ('.' in url_pdf) and ('/' not in url_pdf): - new_url = 'https://arxiv.org/abs/'+url_pdf - print('下载编号:', url_pdf, '自动定位:', new_url) - # download_arxiv_(new_url) - return download_arxiv_(new_url) - else: - print('不能识别的URL!') - return None - if 'abs' in url_pdf: - url_pdf = url_pdf.replace('abs', 'pdf') - url_pdf = url_pdf + '.pdf' - - url_abs = url_pdf.replace('.pdf', '').replace('pdf', 'abs') - title, other_info = get_name(_url_=url_abs) - - paper_id = title.split()[0] # '[1712.00559]' - if '2' in other_info['year']: - title = other_info['year'] + ' ' + title - - known_conf = ['NeurIPS', 'NIPS', 'Nature', 'Science', 'ICLR', 'AAAI'] - for k in known_conf: - if k in other_info['comment']: - title = k + ' ' + title - - download_dir = './gpt_log/arxiv/' - os.makedirs(download_dir, exist_ok=True) - - title_str = title.replace('?', '?')\ - .replace(':', ':')\ - .replace('\"', '“')\ - .replace('\n', '')\ - .replace(' ', ' ')\ - .replace(' ', ' ') - - requests_pdf_url = url_pdf - file_path = download_dir+title_str - # if os.path.exists(file_path): - # print('返回缓存文件') - # return './gpt_log/arxiv/'+title_str - - print('下载中') - proxies, = get_conf('proxies') - r = requests.get(requests_pdf_url, proxies=proxies) - with open(file_path, 'wb+') as f: - f.write(r.content) - print('下载完成') - - # print('输出下载命令:','aria2c -o \"%s\" %s'%(title_str,url_pdf)) - # subprocess.call('aria2c --all-proxy=\"172.18.116.150:11084\" -o \"%s\" %s'%(download_dir+title_str,url_pdf), shell=True) - - x = "%s %s %s.bib" % (paper_id, other_info['year'], other_info['authors']) - x = x.replace('?', '?')\ - .replace(':', ':')\ - .replace('\"', '“')\ - .replace('\n', '')\ - .replace(' ', ' ')\ - .replace(' ', ' ') - return './gpt_log/arxiv/'+title_str, other_info - - -def get_name(_url_): - import os - from bs4 import BeautifulSoup - print('正在获取文献名!') - print(_url_) - - # arxiv_recall = {} - # if os.path.exists('./arxiv_recall.pkl'): - # with open('./arxiv_recall.pkl', 'rb') as f: - # arxiv_recall = pickle.load(f) - - # if _url_ in arxiv_recall: - # print('在缓存中') - # return arxiv_recall[_url_] - - proxies, = get_conf('proxies') - res = requests.get(_url_, proxies=proxies) - - bs = BeautifulSoup(res.text, 'html.parser') - other_details = {} - - # get year - try: - year = bs.find_all(class_='dateline')[0].text - year = re.search(r'(\d{4})', year, re.M | re.I).group(1) - other_details['year'] = year - abstract = bs.find_all(class_='abstract mathjax')[0].text - other_details['abstract'] = abstract - except: - other_details['year'] = '' - print('年份获取失败') - - # get author - try: - authors = bs.find_all(class_='authors')[0].text - authors = authors.split('Authors:')[1] - other_details['authors'] = authors - except: - other_details['authors'] = '' - print('authors获取失败') - - # get comment - try: - comment = bs.find_all(class_='metatable')[0].text - real_comment = None - for item in comment.replace('\n', ' ').split(' '): - if 'Comments' in item: - real_comment = item - if real_comment is not None: - other_details['comment'] = real_comment - else: - other_details['comment'] = '' - except: - other_details['comment'] = '' - print('年份获取失败') - - title_str = BeautifulSoup( - res.text, 'html.parser').find('title').contents[0] - print('获取成功:', title_str) - # arxiv_recall[_url_] = (title_str+'.pdf', other_details) - # with open('./arxiv_recall.pkl', 'wb') as f: - # pickle.dump(arxiv_recall, f) - - return title_str+'.pdf', other_details - - - -@CatchException -def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - - CRAZY_FUNCTION_INFO = "下载arxiv论文并翻译摘要,函数插件作者[binary-husky]。正在提取摘要并下载PDF文档……" - import glob - import os - - # 基本信息:功能、贡献者 - chatbot.append(["函数插件功能?", CRAZY_FUNCTION_INFO]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import pdfminer, bs4 - except: - report_execption(chatbot, history, - a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 清空历史,以免输入溢出 - history = [] - - # 提取摘要,下载PDF文档 - try: - pdf_path, info = download_arxiv_(txt) - except: - report_execption(chatbot, history, - a = f"解析项目: {txt}", - b = f"下载pdf文件未成功") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 翻译摘要等 - i_say = f"请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。材料如下:{str(info)}" - i_say_show_user = f'请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。论文:{pdf_path}' - chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - msg = '正常' - # ** gpt request ** - # 单线,获取文章meta信息 - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, - chatbot=chatbot, history=[], - sys_prompt="Your job is to collect information from materials and translate to Chinese。", - ) - - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - # 写入文件 - import shutil - # 重置文件的创建时间 - shutil.copyfile(pdf_path, f'./gpt_log/{os.path.basename(pdf_path)}'); os.remove(pdf_path) - res = write_results_to_file(history) - chatbot.append(("完成了吗?", res + "\n\nPDF文件也已经下载")) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - diff --git "a/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243.py" "b/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243.py" deleted file mode 100644 index cbda23b83d759e6a3a4da5847c37ddff662daab2..0000000000000000000000000000000000000000 --- "a/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243.py" +++ /dev/null @@ -1,166 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_execption, write_results_to_file -import re -import unicodedata -fast_debug = False -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive - -def is_paragraph_break(match): - """ - 根据给定的匹配结果来判断换行符是否表示段落分隔。 - 如果换行符前为句子结束标志(句号,感叹号,问号),且下一个字符为大写字母,则换行符更有可能表示段落分隔。 - 也可以根据之前的内容长度来判断段落是否已经足够长。 - """ - prev_char, next_char = match.groups() - - # 句子结束标志 - sentence_endings = ".!?" - - # 设定一个最小段落长度阈值 - min_paragraph_length = 140 - - if prev_char in sentence_endings and next_char.isupper() and len(match.string[:match.start(1)]) > min_paragraph_length: - return "\n\n" - else: - return " " - -def normalize_text(text): - """ - 通过把连字(ligatures)等文本特殊符号转换为其基本形式来对文本进行归一化处理。 - 例如,将连字 "fi" 转换为 "f" 和 "i"。 - """ - # 对文本进行归一化处理,分解连字 - normalized_text = unicodedata.normalize("NFKD", text) - - # 替换其他特殊字符 - cleaned_text = re.sub(r'[^\x00-\x7F]+', '', normalized_text) - - return cleaned_text - -def clean_text(raw_text): - """ - 对从 PDF 提取出的原始文本进行清洗和格式化处理。 - 1. 对原始文本进行归一化处理。 - 2. 替换跨行的连词 - 3. 根据 heuristic 规则判断换行符是否是段落分隔,并相应地进行替换 - """ - # 对文本进行归一化处理 - normalized_text = normalize_text(raw_text) - - # 替换跨行的连词 - text = re.sub(r'(\w+-\n\w+)', lambda m: m.group(1).replace('-\n', ''), normalized_text) - - # 根据前后相邻字符的特点,找到原文本中的换行符 - newlines = re.compile(r'(\S)\n(\S)') - - # 根据 heuristic 规则,用空格或段落分隔符替换原换行符 - final_text = re.sub(newlines, lambda m: m.group(1) + is_paragraph_break(m) + m.group(2), text) - - return final_text.strip() - -def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import time, glob, os, fitz - print('begin analysis on:', file_manifest) - for index, fp in enumerate(file_manifest): - with fitz.open(fp) as doc: - file_content = "" - for page in doc: - file_content += page.get_text() - file_content = clean_text(file_content) - print(file_content) - - prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else "" - i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```' - i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}' - chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=[], - sys_prompt="总结文章。" - ) # 带超时倒计时 - - - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - if not fast_debug: time.sleep(2) - - all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)]) - i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。' - chatbot.append((i_say, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=history, - sys_prompt="总结文章。" - ) # 带超时倒计时 - - chatbot[-1] = (i_say, gpt_say) - history.append(i_say); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - res = write_results_to_file(history) - chatbot.append(("完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - - -@CatchException -def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - import glob, os - - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "批量总结PDF文档。函数插件贡献者: ValeriaWong,Eralien"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import fitz - except: - report_execption(chatbot, history, - a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 清空历史,以免输入溢出 - history = [] - - # 检测输入参数,如没有给定输入参数,直接退出 - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 搜索需要处理的文件清单 - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \ - # [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \ - # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ - # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] - - # 如果没找到任何文件 - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 开始正式执行任务 - yield from 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git a/spaces/fclong/summary/fengshen/data/task_dataloader/medicalQADataset.py b/spaces/fclong/summary/fengshen/data/task_dataloader/medicalQADataset.py deleted file mode 100644 index 3d76ed583c7d150769c81d830293909e1c110485..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/data/task_dataloader/medicalQADataset.py +++ /dev/null @@ -1,137 +0,0 @@ -# coding=utf8 -import os -import pytorch_lightning as pl -from torch.utils.data import DataLoader, Dataset -from tqdm import tqdm -from transformers import AutoTokenizer - - -class GPT2QADataset(Dataset): - ''' - Dataset Used for yuyuan medical qa task. - Just surpport small datasets, when deal with large datasets it may be slowly. - for large datasets please use mmapdatasets(doing) - ''' - - def __init__(self, data_path, name, args): - super().__init__() - self.tokenizer = AutoTokenizer.from_pretrained( - args.pretrained_model_path) - if self.tokenizer.pad_token is None: - self.tokenizer.add_special_tokens({'pad_token': '<|endoftext|>'}) - self.data_size = os.path.getsize(data_path)/1024/1024/1024 - self.data_type_name = name - self.data = self.load_data(data_path) - self.max_seq_length = args.max_seq_length - - def __len__(self): - return len(self.data) - - def __getitem__(self, index): - return self.encode(self.data[index]) - - def load_data(self, data_path): - # 有进度条展示 - if self.data_size <= 5: - with open(data_path, "rt", encoding='utf8') as f: - lines = f.readlines() - total_num = len(lines) - data_gen = lines - else: - data_gen = open(data_path, "rt", encoding='utf8') - total_num = None - - data = [] - with tqdm(total=total_num, desc=f'{self.data_type_name}处理进度', mininterval=0.3) as bar: - for idx, line in enumerate(data_gen): - data.append(self.data_parse(line)) - bar.update() - - if self.data_size > 5: - data_gen.close() - return data - - def data_parse(self, line): - """ - 解析不同格式的数据 - """ - dic = eval(line.strip()) - return dic - - def encode(self, item): - """ - 将数据转换成模型训练的输入 - """ - inputs_dict = self.tokenizer.encode_plus(item['Question']+item['answer'], - max_length=self.max_seq_length, padding='max_length', - truncation=True, return_tensors='pt') - target = inputs_dict['input_ids'] - labels = target.clone().detach() - labels[target == self.tokenizer.pad_token_id] = -100 - return { - "input_ids": inputs_dict['input_ids'].squeeze(), - "attention_mask": inputs_dict['attention_mask'].squeeze(), - "labels": labels.squeeze(), - "question": item['Question'], - "answer": item['answer'] - } - - -class GPT2QADataModel(pl.LightningDataModule): - @staticmethod - def add_data_specific_args(parent_args): - parser = parent_args.add_argument_group('GPT2QADataModel') - parser.add_argument('--data_dir', type=str, required=True) - parser.add_argument('--num_workers', default=2, type=int) - parser.add_argument('--train_data', default='train.txt', type=str) - parser.add_argument('--valid_data', default='valid.txt', type=str) - parser.add_argument('--test_data', default='test.txt', type=str) - parser.add_argument('--train_batchsize', type=int, required=True) - parser.add_argument('--valid_batchsize', type=int, required=True) - parser.add_argument('--max_seq_length', default=1024, type=int) - return parent_args - - def __init__(self, args): - super().__init__() - self.args = args - self.train_batchsize = args.train_batchsize - self.valid_batchsize = args.valid_batchsize - if not args.do_eval_only: - self.train_data = GPT2QADataset(os.path.join( - args.data_dir, args.train_data), '训练集', args) - self.valid_data = GPT2QADataset(os.path.join( - args.data_dir, args.valid_data), '验证集', args) - self.test_data = GPT2QADataset(os.path.join( - args.data_dir, args.test_data), '测试集', args) - - def train_dataloader(self): - return DataLoader( - self.train_data, shuffle=True, - batch_size=self.train_batchsize, - pin_memory=False, num_workers=self.args.num_workers) - - def val_dataloader(self): - return DataLoader(self.valid_data, shuffle=False, - batch_size=self.valid_batchsize, - pin_memory=False, num_workers=self.args.num_workers) - - def predict_dataloader(self): - return DataLoader(self.test_data, shuffle=False, - batch_size=self.valid_batchsize, pin_memory=False, - num_workers=self.args.num_workers) - - -if __name__ == '__main__': - import argparse - modelfile = '/cognitive_comp/wuziwei/pretrained_model_hf/medical_v2' - datafile = '/cognitive_comp/wuziwei/task-data/medical_qa/medical_qa_train.txt' - parser = argparse.ArgumentParser(description='hf test', allow_abbrev=False) - group = parser.add_argument_group(title='test args') - group.add_argument('--pretrained-model-path', type=str, default=modelfile, - help='Number of transformer layers.') - group.add_argument('--max-seq-length', type=int, default=1024) - args = parser.parse_args() - - testml = GPT2QADataset(datafile, 'medical_qa', args=args) - - print(testml[10]) diff --git a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/tools/initialize.py b/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/tools/initialize.py deleted file mode 100644 index 855bd49aac15503896030830f9b081d15957ae03..0000000000000000000000000000000000000000 --- a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/tools/initialize.py +++ /dev/null @@ -1,160 +0,0 @@ -from argparse import ArgumentParser, Namespace -from typing import ( - List, - Tuple, -) - -import numpy as np -from PIL import Image -import torch -from torch import nn -import torch.nn.functional as F -from torchvision.transforms import ( - Compose, - Grayscale, - Resize, - ToTensor, -) - -from models.encoder import Encoder -from models.encoder4editing import ( - get_latents as get_e4e_latents, - setup_model as setup_e4e_model, -) -from utils.misc import ( - optional_string, - iterable_to_str, - stem, -) - - - -class ColorEncoderArguments: - def __init__(self): - parser = ArgumentParser("Encode an image via a feed-forward encoder") - - self.add_arguments(parser) - - self.parser = parser - - @staticmethod - def add_arguments(parser: ArgumentParser): - parser.add_argument("--encoder_ckpt", default=None, - help="encoder checkpoint path. initialize w with encoder output if specified") - parser.add_argument("--encoder_size", type=int, default=256, - help="Resize to this size to pass as input to the encoder") - - -class InitializerArguments: - @classmethod - def add_arguments(cls, parser: ArgumentParser): - ColorEncoderArguments.add_arguments(parser) - cls.add_e4e_arguments(parser) - parser.add_argument("--mix_layer_range", default=[10, 18], type=int, nargs=2, - help="replace layers to in the e4e code by the color code") - - parser.add_argument("--init_latent", default=None, help="path to init wp") - - @staticmethod - def to_string(args: Namespace): - return (f"init{stem(args.init_latent).lstrip('0')[:10]}" if args.init_latent - else f"init({iterable_to_str(args.mix_layer_range)})") - #+ optional_string(args.init_noise > 0, f"-initN{args.init_noise}") - - @staticmethod - def add_e4e_arguments(parser: ArgumentParser): - parser.add_argument("--e4e_ckpt", default='checkpoint/e4e_ffhq_encode.pt', - help="e4e checkpoint path.") - parser.add_argument("--e4e_size", type=int, default=256, - help="Resize to this size to pass as input to the e4e") - - - -def create_color_encoder(args: Namespace): - encoder = Encoder(1, args.encoder_size, 512) - ckpt = torch.load(args.encoder_ckpt) - encoder.load_state_dict(ckpt["model"]) - return encoder - - -def transform_input(img: Image): - tsfm = Compose([ - Grayscale(), - Resize(args.encoder_size), - ToTensor(), - ]) - return tsfm(img) - - -def encode_color(imgs: torch.Tensor, args: Namespace) -> torch.Tensor: - assert args.encoder_size is not None - - imgs = Resize(args.encoder_size)(imgs) - - color_encoder = create_color_encoder(args).to(imgs.device) - color_encoder.eval() - with torch.no_grad(): - latent = color_encoder(imgs) - return latent.detach() - - -def resize(imgs: torch.Tensor, size: int) -> torch.Tensor: - return F.interpolate(imgs, size=size, mode='bilinear') - - -class Initializer(nn.Module): - def __init__(self, args: Namespace): - super().__init__() - - self.path = None - if args.init_latent is not None: - self.path = args.init_latent - return - - - assert args.encoder_size is not None - self.color_encoder = create_color_encoder(args) - self.color_encoder.eval() - self.color_encoder_size = args.encoder_size - - self.e4e, e4e_opts = setup_e4e_model(args.e4e_ckpt) - assert 'cars_' not in e4e_opts.dataset_type - self.e4e.decoder.eval() - self.e4e.eval() - self.e4e_size = args.e4e_size - - self.mix_layer_range = args.mix_layer_range - - def encode_color(self, imgs: torch.Tensor) -> torch.Tensor: - """ - Get the color W code - """ - imgs = resize(imgs, self.color_encoder_size) - - latent = self.color_encoder(imgs) - - return latent - - def encode_shape(self, imgs: torch.Tensor) -> torch.Tensor: - imgs = resize(imgs, self.e4e_size) - imgs = (imgs - 0.5) / 0.5 - if imgs.shape[1] == 1: # 1 channel - imgs = imgs.repeat(1, 3, 1, 1) - return get_e4e_latents(self.e4e, imgs) - - def load(self, device: torch.device): - latent_np = np.load(self.path) - return torch.tensor(latent_np, device=device)[None, ...] - - def forward(self, imgs: torch.Tensor) -> torch.Tensor: - if self.path is not None: - return self.load(imgs.device) - - shape_code = self.encode_shape(imgs) - color_code = self.encode_color(imgs) - - # style mix - latent = shape_code - start, end = self.mix_layer_range - latent[:, start:end] = color_code - return latent diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bar Bar Din Ye Aaye The Best Birthday Wishes with Mohammed Rafis Song.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bar Bar Din Ye Aaye The Best Birthday Wishes with Mohammed Rafis Song.md deleted file mode 100644 index 6e5f7e57043b218455ff48b11d9a943ed71c8217..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bar Bar Din Ye Aaye The Best Birthday Wishes with Mohammed Rafis Song.md +++ /dev/null @@ -1,125 +0,0 @@ -
      -

      Download Baar Baar Din Ye Aaye - The Ultimate Birthday Song

      -

      Are you looking for a perfect birthday song to wish your loved ones on their special day? Do you want to make them feel happy and cherished with a melodious tune and heartfelt words? If yes, then you should download Baar Baar Din Ye Aaye, the ultimate birthday song that has been loved by millions of people for decades. In this article, we will tell you everything you need to know about this song, its features, its benefits, and how to download it for free. So, read on and get ready to celebrate your birthday with Baar Baar Din Ye Aaye.

      -

      Introduction

      -

      What is Baar Baar Din Ye Aaye?

      -

      Baar Baar Din Ye Aaye is a Hindi birthday song that was originally sung by the legendary singer Mohammed Rafi for the 1967 movie Farz. The song is composed by the famous duo Laxmikant-Pyarelal and written by Anand Bakshi. The song features the actor Jeetendra and the actress Babita in a cheerful and colorful setting, where they celebrate the birthday of their friend with a cake, balloons, and confetti. The song has a catchy tune and a simple chorus that repeats "Baar baar din ye aaye, baar baar dil ye gaaye, tum jiyo hazaaron saal, ye meri hai aarzoo, happy birthday to you" which means "May this day come again and again, may this heart sing again and again, may you live for thousands of years, this is my wish, happy birthday to you".

      -

      download bar bar din ye aaye


      Download File ··· https://gohhs.com/2uPsZ0



      -

      Why is it so popular?

      -

      Baar Baar Din Ye Aaye is one of the most popular birthday songs in India and across the world. It has been played on countless occasions, from family gatherings to school functions, from radio stations to TV shows, from weddings to anniversaries. It has also been covered by many singers and musicians in different languages and styles. Some of the reasons why this song is so popular are:

      -
        -
      • It has a universal appeal that transcends generations, cultures, and regions.
      • -
      • It has a positive and uplifting message that expresses love, gratitude, and happiness.
      • -
      • It has a memorable melody and rhythm that makes it easy to sing along and dance to.
      • -
      • It has a nostalgic charm that reminds people of their childhood memories and old friends.
      • -
      -

      How to download it for free?

      -

      If you want to download Baar Baar Din Ye Aaye for free, you have many options to choose from. You can either stream it online or download it offline from various websites and apps that offer free music downloads. Some of the best sources to download this song are:

      -
        -
      • [Wynk Music](^2^): This is a popular music app that lets you download songs in MP3 format for offline listening. You can also set this song as your hello tune for free.
      • -
      • [YouTube](^1^): This is the most widely used video platform that hosts the original video of this song along with many other versions and remixes. You can watch the video online or download it using a YouTube downloader tool.
      • -
      • [Gaana]: This is another popular music app that offers unlimited access to millions of songs in various languages. You can stream or download this song in high quality audio.
      • -
      -

      Features of Baar Baar Din Ye Aaye

      -

      The singer and the music composer

      The singer of this song is none other than Mohammed Rafi, one of the most influential and versatile singers of Indian cinema. He has sung over 7,000 songs in various languages and genres, and has won numerous awards and accolades for his contribution to music. He is known for his expressive voice, his range, and his ability to adapt to any mood and situation. He has sung for many actors and composers, but his collaboration with Laxmikant-Pyarelal was one of the most successful and prolific ones. They have created many hit songs together, including Baar Baar Din Ye Aaye.

      -

      The music composer of this song is the duo of Laxmikant-Pyarelal, who are regarded as one of the most successful and influential music directors of Bollywood. They have composed music for over 500 films in various languages and styles, and have won several awards and honors for their work. They are known for their innovative and diverse use of instruments, their catchy tunes, and their mastery of various musical forms. They have worked with many singers and lyricists, but their partnership with Mohammed Rafi and Anand Bakshi was one of the most remarkable ones. They have given many evergreen songs to the industry, including Baar Baar Din Ye Aaye.

      -

      The lyrics and the meaning

      -

      The lyrics of this song are written by Anand Bakshi, who is considered as one of the greatest lyricists of Hindi cinema. He has penned over 4,000 songs in various languages and genres, and has won several awards and recognition for his talent. He is known for his simple yet profound words, his poetic flair, and his ability to connect with the listeners. He has written for many composers and singers, but his association with Laxmikant-Pyarelal and Mohammed Rafi was one of the most fruitful ones. They have created many memorable songs together, including Baar Baar Din Ye Aaye.

      -

      The meaning of this song is very clear and straightforward. It is a birthday song that wishes the birthday person a long and happy life. It expresses the love and affection of the singer for the birthday person, and hopes that they will always be happy and healthy. It also conveys the joy and excitement of celebrating the birthday with friends and family. It is a song that celebrates life and its blessings.

      -

      The video and the actors

      -

      The video of this song is from the movie Farz, which was released in 1967. The movie is a spy thriller that stars Jeetendra as a secret agent who falls in love with Babita, a dancer who works for the villain. The movie was a blockbuster hit and was praised for its action, comedy, music, and romance. The movie also marked the debut of Babita, who went on to become a popular actress in the 1960s and 1970s.

      -

      download bar bar din ye aaye mp3 song
      -download bar bar din ye aaye birthday song
      -download bar bar din ye aaye by mohammed rafi
      -download bar bar din ye aaye from farz movie
      -download bar bar din ye aaye with name
      -download bar bar din ye aaye video song
      -download bar bar din ye aaye instrumental
      -download bar bar din ye aaye female version
      -download bar bar din ye aaye remix
      -download bar bar din ye aaye karaoke
      -download bar bar din ye aaye ringtone
      -download bar bar din ye aaye whatsapp status
      -download bar bar din ye aaye lyrics
      -download bar bar din ye aaye in hindi
      -download bar bar din ye aaye in english
      -download baar baar din yeh aaye happy birthday to you
      -download baar baar din yeh aaye tu jiye hazaro saal
      -download baar baar din yeh aaye jeetendra babita song
      -download baar baar din yeh aaye laxmikant pyarelal song
      -download baar baar din yeh aaye anand bakshi song
      -download baar baar din yeh aaye gaane sune ansune song
      -download baar baar din yeh aaye goldmines telefilms song
      -download baar baar din yeh aaye youtube video
      -download baar baar din yeh aaye wynk music song
      -download baar baar din yeh aaye soundcloud song
      -how to download bar bar din ye aaye song
      -where to download bar bar din ye aaye song
      -best site to download bar bar din ye aaye song
      -free download of bar bar din ye aaye song
      -high quality download of bar bar din ye aaye song
      -low quality download of bar bar din ye aaye song
      -online download of bar bar din ye aaye song
      -offline download of bar bar din ye aaye song
      -direct download of bar bar din ye aaye song
      -fast download of bar bar din ye aaye song
      -slow download of bar bar din ye aaye song
      -easy download of bar bar din ye aaye song
      -hard download of bar bar din ye aaye song
      -safe download of bar bar din ye aaye song
      -virus free download of bar bar din ye aaye song
      -legal download of bar bar din ye aaye song
      -illegal download of bar bar din ye aaye song
      -original version of the downloaded song -bar-bar-din-yeh-aayi-happy-birthday-to-you.mp3
      -new version of the downloaded song -bar-bar-din-yeh-aayi-happy-birthday-to-you.mp3
      -old version of the downloaded song -bar-bar-din-yeh-aayi-happy-birthday-to-you.mp3
      -latest version of the downloaded song -bar-bar-din-yeh-aayi-happy-birthday-to-you.mp3

      -

      The video of this song features Jeetendra and Babita in a lively and colorful setting, where they celebrate the birthday of their friend with a cake, balloons, confetti, and fireworks. The video also shows them dancing with other guests and having fun. The video captures the essence of the song perfectly, as it shows the happiness and warmth of the occasion.

      -

      Benefits of Baar Baar Din Ye Aaye

      -

      It makes the birthday person feel special

      -

      One of the main benefits of this song is that it makes the birthday person feel special and loved. It is a song that conveys the sincere wishes and emotions of the singer for the birthday person. It tells them that they are important, that they are valued, that they are cherished. It also tells them that they have a lot to look forward to in life, that they have a lot to achieve, that they have a lot to enjoy. It is a song that boosts their confidence and self-esteem.

      -

      It creates a festive mood and a sense of nostalgia

      -

      Another benefit of this song is that it creates a festive mood and a sense of nostalgia among the listeners. It is a song that brings out the joy and excitement of celebrating a birthday with friends and family. It is a song that reminds people of their childhood memories and old friends. It is a song that makes people smile and laugh. It is a song that makes people feel young again.

      -

      It suits any age group and any occasion

      -

      A third benefit of this song is that it suits any age group and any occasion. It is a song that can be played for anyone who is celebrating their birthday, whether they are young or old, male or female, rich or poor. It is a song that can be played on any occasion, whether it is a formal party or an informal gathering, whether it is at home or outside, whether it is day or night. It is a song that can fit any mood, whether it is cheerful or sentimental, whether it is playful or romantic. It is a song that can please anyone who hears it.

      -

      Conclusion

      -

      Summary of the main points

      -

      To conclude, Baar Baar Din Ye Aaye is the ultimate birthday song that you should download for your loved ones. It is a song that has many features, such as:

      -
        -
      • It is sung by Mohammed Rafi, composed by Laxmikant-Pyarelal, and written by Anand Bakshi.
      • -
      • It has a catchy tune, a simple chorus, and a positive message.
      • -
      • It has a video that features Jeetendra and Babita in a festive setting.
      • -
      -

      It is also a song that has many benefits, such as:

      -
        -
      • It makes the birthday person feel special and loved.
      • -
      • It creates a festive mood and a sense of nostalgia.
      • -
      • It suits any age group and any occasion.
      • -
      -

      Call to action

      -

      So, what are you waiting for? Download Baar Baar Din Ye Aaye today and make your birthday celebrations more memorable and fun. You can download it for free from Wynk Music, YouTube, or Gaana. You can also share it with your friends and family on social media and spread the happiness. And don't forget to sing along and wish your loved ones a happy birthday with Baar Baar Din Ye Aaye.

      -

      Frequently Asked Questions

      -

      Q: Who is the original singer of Baar Baar Din Ye Aaye?

      -

      A: The original singer of Baar Baar Din Ye Aaye is Mohammed Rafi, one of the most influential and versatile singers of Indian cinema.

      -

      Q: Which movie is Baar Baar Din Ye Aaye from?

      -

      A: Baar Baar Din Ye Aaye is from the movie Farz, which was released in 1967. The movie is a spy thriller that stars Jeetendra and Babita.

      -

      Q: How can I download Baar Baar Din Ye Aaye for free?

      -

      A: You can download Baar Baar Din Ye Aaye for free from Wynk Music, YouTube, or Gaana. You can also stream it online from these sources.

      -

      Q: What are the benefits of Baar Baar Din Ye Aaye?

      -

      A: Some of the benefits of Baar Baar Din Ye Aaye are:

      -
        -
      • It makes the birthday person feel special and loved.
      • -
      • It creates a festive mood and a sense of nostalgia.
      • -
      • It suits any age group and any occasion.
      • -
      -

      Q: What are some other popular birthday songs in Hindi?

      -

      A: Some other popular birthday songs in Hindi are:

      -
        -
      • Tum Jiyo Hazaaron Saal from Sujata
      • -
      • Birthday Song from ABCD 2
      • -
      • Baar Baar Yeh Din Aaye from Happy Birthday To You
      • -
      • Tu Kitni Achhi Hai from Raja Aur Runk
      • -
      • Birthday Bash from Dilliwaali Zaalim Girlfriend
      • -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Hello Neighbor Hide and Seek APK MOD for Android.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Hello Neighbor Hide and Seek APK MOD for Android.md deleted file mode 100644 index c640731207f85c45f2cf336229c235e8a7612d24..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Hello Neighbor Hide and Seek APK MOD for Android.md +++ /dev/null @@ -1,109 +0,0 @@ -
      -

      Hello Neighbor: Hide and Seek - A Prequel to the Stealth Horror Hit

      |

      If you are a fan of the Hello Neighbor game, you might be interested in its prequel, Hello Neighbor: Hide and Seek. This game follows the tragic story of the Neighbor's family in this dramatic prequel to the original stealth horror hit. You will experience playing a game of hide and seek with your brother as you both deal with the loss of a family member. The game explains events that lead up to the main game and reveals some secrets that you might not have known before.

      -

      hello neighbor hide and seek apkmody


      Download ===== https://gohhs.com/2uPpHt



      -

      In this article, we will tell you everything you need to know about Hello Neighbor: Hide and Seek, including how to download it, how to play it, and what are the reviews of it. We will also provide you with some tips and tricks to help you complete the game and have more fun.

      -

      What is Hello Neighbor: Hide and Seek?

      -

      Hello Neighbor: Hide and Seek is a game that was developed by Dynamic Pixels and published by tinyBuild. It was released on December 10, 2019 for Windows, PlayStation 4, Xbox One, Nintendo Switch, iOS, Android, and Stadia. It is a prequel to the popular game Hello Neighbor, which was released in 2017.

      -

      The story of the Neighbor's family

      -

      The game takes place in the imagination of a small girl, who is playing hide and seek with her brother. The girl is the daughter of the Neighbor, who is the antagonist of the main game. The game shows how the family deals with the death of their mother, who died in a car accident. The game also reveals some secrets about the Neighbor's past, such as why he became so paranoid and obsessed with his basement.

      -

      The gameplay of hide and seek

      -

      The game consists of five stages, each representing a different scenario of hide and seek. The girl has to collect stuffed animals and put them in a cage, while avoiding being caught by her brother, who is wearing a tiger mask. The brother acts as an advanced AI that learns from your actions and tries to find you. The game also features puzzles that you have to solve in order to progress.

      -

      The game has a lot of surreal elements, such as giant toys, animals, and objects that change the landscape. The game also has some horror elements, such as jump scares, dark atmosphere, and creepy sounds. The game is meant to be challenging and unpredictable, as you never know what will happen next.

      -

      hello neighbor hide and seek free download apkmody
      -hello neighbor hide and seek mod apk unlimited money
      -hello neighbor hide and seek apk obb android
      -hello neighbor hide and seek full game apkmody
      -hello neighbor hide and seek walkthrough apkmody
      -hello neighbor hide and seek cheats codes apkmody
      -hello neighbor hide and seek prequel official website
      -hello neighbor hide and seek alpha demo download
      -hello neighbor hide and seek youtube gameplay videos
      -hello neighbor hide and seek review ratings apkmody
      -hello neighbor hide and seek tips tricks guide
      -hello neighbor hide and seek all stages secrets
      -hello neighbor hide and seek uuu client tutorial
      -hello neighbor hide and seek before mod kit
      -hello neighbor hide and seek launch trailer watch
      -hello neighbor hide and seek steam version apkmody
      -hello neighbor hide and seek xbox one ps4 switch
      -hello neighbor hide and seek mobile ios apkmody
      -hello neighbor hide and seek epic games store
      -hello neighbor hide and seek system requirements apkmody
      -hello neighbor hide and seek soundtrack music apkmody
      -hello neighbor hide and seek update patch notes
      -hello neighbor hide and seek online multiplayer mode
      -hello neighbor hide and seek custom maps mods apkmody
      -hello neighbor hide and seek fan art wallpapers apkmody
      -hello neighbor hide and seek story plot spoilers
      -hello neighbor hide and seek characters names roles
      -hello neighbor hide and seek voice actors cast apkmody
      -hello neighbor hide and seek easter eggs references apkmody
      -hello neighbor hide and seek speedrun world record apkmody
      -hello neighbor hide and seek achievements trophies list
      -hello neighbor hide and seek bugs glitches fixes apkmody
      -hello neighbor hide and seek best settings options apkmody
      -hello neighbor hide and seek controller support apkmody
      -hello neighbor hide and seek vr mode compatible devices
      -hello neighbor hide and seek developer publisher info apkmody
      -hello neighbor hide and seek release date history apkmody
      -hello neighbor hide and seek genre tags description apkmody
      -hello neighbor hide and seek similar games recommendations apkmody
      -hello neighbor hide and seek faq answers apkmody

      -

      How to download Hello Neighbor: Hide and Seek?

      -

      Download links for different platforms

      -

      If you want to play Hello Neighbor: Hide and Seek, you can download it from various sources depending on your platform. Here are some links that you can use:

      -
        -
      • For Windows PC, you can buy the game from Steam ([6](https://store.steampowered.com/app/960420/Hello_Neighbor_Hide_and_Seek/)) or Epic Games Store ([20](https://www.epicgames.com/store/en-US/p/hello-neighbor-hide-and-seek)).
      • -
      • For PlayStation 4, you can buy the game from PlayStation Store ([21](https://store.playstation.com/en-us/product/UP0290-CUSA14405_00-HELLOHIDEANDSEEK)).
      • -
      • For Xbox One, you can buy the game from Microsoft Store ([22](https://www.microsoft.com/en-us/p/hello-neighbor-hide-and-seek/9nq8jxkzgk5x)).
      • What are the reviews of Hello Neighbor: Hide and Seek? -

        Hello Neighbor: Hide and Seek is a game that has received mixed or average reviews from critics and players. The game has a Metascore of 55 out of 100 based on 4 critic reviews for the Xbox One version, and no score yet based on 1 critic review for the Switch version. The game has a user score of 5.0 out of 10 based on 18 ratings for the Xbox One version, and no user score yet for the Switch version. The game is also available for PC, PlayStation 4, iOS, Android, and Stadia, but there are no Metacritic scores for these platforms.

        -

        The pros and cons of the game

        -

        According to some of the reviews, the game has some pros and cons that may affect your enjoyment of it. Here are some of them:

        -
          -
        • The game has an interesting premise and story that explores the tragic backstory of the Neighbor's family and reveals some secrets about the main game .
        • -
        • The game has some tense and scary moments that involve hiding and seeking from your brother, who acts as an advanced AI that learns from your actions and tries to catch you .
        • -
        • The game has some surreal and varied levels that represent different scenarios of hide and seek in the imagination of the girl, with giant toys, animals, and objects that change the landscape .
        • -
        • The game has some puzzles that require you to collect stuffed animals and put them in a cage, while avoiding or distracting your brother .
        • -
        • The game has sloppy controls, especially on the Switch version, that make movement and interaction difficult and frustrating .
        • -
        • The game has aimless direction and atrocious level design that make finding and reaching the stuffed animals a tedious trial-and-error process with no clear clues or instructions .
        • -
        • The game has poor graphics and sound quality that do not match the standards of current-generation consoles or devices .
        • -
        • The game has many bugs and glitches that affect the gameplay and performance, such as freezing, crashing, clipping, disappearing items, etc. .
        • -
        -

        The ratings and opinions of critics and players

        -

        Here are some excerpts from some of the reviews of Hello Neighbor: Hide and Seek:

        -

        "Hello Neighbor: Hide & Seek is a one-trick pony that had a game built around the premise of drawing a reaction out of the player via jump scares, which it does very well. If the levels and puzzles were more focused and honed-in, there could be a logical and interesting foundation for an experiential dread and tension. Forget moving out of the neighborhood, I’d suggest moving to the next county over." - Joel A. DeWitte from Nintendo World Report

        -

        "Hello Neighbor: Hide & Seek is a prequel to Hello Neighbor. It’s not a bad idea to tell us what happened before we started sneaking into our neighbor’s house. However, this prequel is not very good. The gameplay is boring, repetitive, frustrating, confusing… I could go on. The graphics are not great either. The only thing I liked about this game was its story. It’s sad, dark, twisted… It’s not what I expected from this colorful game." - A user review from Metacritic

        -

        "Hello Neighbor: Hide & Seek is a decent stealth horror game that has some genuine thrills and creepy moments. The game's story is intriguing and emotional, showing a different side of the Neighbor's character. The game's levels are creative and imaginative, offering a variety of challenges and surprises. The game's AI is smart and adaptive, making each hide-and-seek session unpredictable and intense. However, the game also suffers from some technical issues, such as glitches, bugs, crashes, etc. The game also lacks polish and refinement in terms of graphics, sound, controls, etc. The game also could use more guidance and clarity in terms of objectives, puzzles, items, etc. Overall, Hello Neighbor: Hide & Seek is a game that has potential but needs more work." - A user review from Steam

        How to play Hello Neighbor: Hide and Seek?

        -

        Hello Neighbor: Hide and Seek is a game that requires stealth, strategy, and creativity. You have to collect stuffed animals and put them in a cage, while avoiding being caught by your brother. You also have to solve puzzles and explore the levels to find hidden secrets and clues. Here are some tips and tricks to help you play the game:

        -

        Tips and tricks for hiding and seeking

        -
          -
        • Use the environment to your advantage. You can hide behind objects, under furniture, in closets, etc. You can also use objects to block doors, create distractions, or throw at your brother.
        • -
        • Listen to the sounds and music cues. They will tell you if your brother is near, if he has seen you, or if he has lost sight of you.
        • -
        • Be careful with your flashlight. It can help you see in the dark, but it can also alert your brother to your location.
        • -
        • Be aware of your stamina. You can run faster than your brother, but you will get tired quickly. You can replenish your stamina by resting or drinking water.
        • -
        • Be smart with your inventory. You can only carry four items at a time, so choose wisely what you need and what you can drop.
        • -
        • Save often. The game does not have an autosave feature, so you have to manually save your progress at certain points in the levels.
        • -
        -

        Animal locations guide for stage 1

        -

        The first stage of the game is set in the living room of the house, where you have to collect six stuffed animals and put them in a cage near the fireplace. Here are the locations of the animals:

        - - - - - - - - -
        AnimalLocation
        GiraffeOn top of a bookshelf near the entrance.
        PenguinIn a box under the stairs.
        BearIn a closet near the kitchen.
        LionIn a drawer under the TV.
        RabbitIn a basket on the couch.
        ZebraIn a cabinet near the fireplace.
        -

        To get some of the animals, you may need to use other objects or tools, such as a chair, a magnet, a key, etc. You may also need to avoid or distract your brother, who will patrol the area and chase you if he sees you.

        A: Hello Neighbor: Hide and Seek is a relatively short game, depending on your skill and luck. The game can be completed in about 2 to 4 hours, or even less if you know what to do and where to go. However, the game also has some replay value, as you can try to find all the secrets and achievements, or play on a higher difficulty level. -
      • Q: Is Hello Neighbor: Hide and Seek scary?
      • -A: Hello Neighbor: Hide and Seek is not a typical horror game, but it does have some scary elements that may frighten or disturb some players. The game has some jump scares, dark atmosphere, and creepy sounds that create tension and suspense. The game also has some themes and scenes that deal with death, grief, violence, and madness that may be upsetting or triggering for some players. The game is rated T for Teen by ESRB, so it is not suitable for young children. -
      • Q: Is Hello Neighbor: Hide and Seek connected to Hello Neighbor?
      • -A: Yes, Hello Neighbor: Hide and Seek is a prequel to Hello Neighbor, which means it takes place before the events of the main game. The game explains some of the backstory and motives of the Neighbor, who is the antagonist of the main game. The game also reveals some secrets and Easter eggs that relate to the main game. However, you do not need to play Hello Neighbor to understand or enjoy Hello Neighbor: Hide and Seek, as it is a standalone game with its own story and characters. -
      • Q: Is Hello Neighbor: Hide and Seek multiplayer?
      • -A: No, Hello Neighbor: Hide and Seek is a single-player game that does not have any multiplayer modes or features. You can only play as the girl who is hiding from her brother, who is controlled by the AI. You cannot play as the brother or with other players online or locally. -
      • Q: Is Hello Neighbor: Hide and Seek free?
      • -A: No, Hello Neighbor: Hide and Seek is not a free game. You have to buy the game from various sources depending on your platform. The game costs $29.99 for Windows PC, PlayStation 4, Xbox One, Nintendo Switch, and Stadia. The game costs $14.99 for iOS and Android devices. -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/fffiloni/Video-Matting-Anything/GroundingDINO/groundingdino/util/utils.py b/spaces/fffiloni/Video-Matting-Anything/GroundingDINO/groundingdino/util/utils.py deleted file mode 100644 index e9f0318e306fa04bff0ada70486b41aaa69b07c8..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/Video-Matting-Anything/GroundingDINO/groundingdino/util/utils.py +++ /dev/null @@ -1,608 +0,0 @@ -import argparse -import json -import warnings -from collections import OrderedDict -from copy import deepcopy -from typing import Any, Dict, List - -import numpy as np -import torch -from transformers import AutoTokenizer - -from groundingdino.util.slconfig import SLConfig - - -def slprint(x, name="x"): - if isinstance(x, (torch.Tensor, np.ndarray)): - print(f"{name}.shape:", x.shape) - elif isinstance(x, (tuple, list)): - print("type x:", type(x)) - for i in range(min(10, len(x))): - slprint(x[i], f"{name}[{i}]") - elif isinstance(x, dict): - for k, v in x.items(): - slprint(v, f"{name}[{k}]") - else: - print(f"{name}.type:", type(x)) - - -def clean_state_dict(state_dict): - new_state_dict = OrderedDict() - for k, v in state_dict.items(): - if k[:7] == "module.": - k = k[7:] # remove `module.` - new_state_dict[k] = v - return new_state_dict - - -def renorm( - img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] -) -> torch.FloatTensor: - # img: tensor(3,H,W) or tensor(B,3,H,W) - # return: same as img - assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim() - if img.dim() == 3: - assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % ( - img.size(0), - str(img.size()), - ) - img_perm = img.permute(1, 2, 0) - mean = torch.Tensor(mean) - std = torch.Tensor(std) - img_res = img_perm * std + mean - return img_res.permute(2, 0, 1) - else: # img.dim() == 4 - assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % ( - img.size(1), - str(img.size()), - ) - img_perm = img.permute(0, 2, 3, 1) - mean = torch.Tensor(mean) - std = torch.Tensor(std) - img_res = img_perm * std + mean - return img_res.permute(0, 3, 1, 2) - - -class CocoClassMapper: - def __init__(self) -> None: - self.category_map_str = { - "1": 1, - "2": 2, - "3": 3, - "4": 4, - "5": 5, - "6": 6, - "7": 7, - "8": 8, - "9": 9, - "10": 10, - "11": 11, - "13": 12, - "14": 13, - "15": 14, - "16": 15, - "17": 16, - "18": 17, - "19": 18, - "20": 19, - "21": 20, - "22": 21, - "23": 22, - "24": 23, - "25": 24, - "27": 25, - "28": 26, - "31": 27, - "32": 28, - "33": 29, - "34": 30, - "35": 31, - "36": 32, - "37": 33, - "38": 34, - "39": 35, - "40": 36, - "41": 37, - "42": 38, - "43": 39, - "44": 40, - "46": 41, - "47": 42, - "48": 43, - "49": 44, - "50": 45, - "51": 46, - "52": 47, - "53": 48, - "54": 49, - "55": 50, - "56": 51, - "57": 52, - "58": 53, - "59": 54, - "60": 55, - "61": 56, - "62": 57, - "63": 58, - "64": 59, - "65": 60, - "67": 61, - "70": 62, - "72": 63, - "73": 64, - "74": 65, - "75": 66, - "76": 67, - "77": 68, - "78": 69, - "79": 70, - "80": 71, - "81": 72, - "82": 73, - "84": 74, - "85": 75, - "86": 76, - "87": 77, - "88": 78, - "89": 79, - "90": 80, - } - self.origin2compact_mapper = {int(k): v - 1 for k, v in self.category_map_str.items()} - self.compact2origin_mapper = {int(v - 1): int(k) for k, v in self.category_map_str.items()} - - def origin2compact(self, idx): - return self.origin2compact_mapper[int(idx)] - - def compact2origin(self, idx): - return self.compact2origin_mapper[int(idx)] - - -def to_device(item, device): - if isinstance(item, torch.Tensor): - return item.to(device) - elif isinstance(item, list): - return [to_device(i, device) for i in item] - elif isinstance(item, dict): - return {k: to_device(v, device) for k, v in item.items()} - else: - raise NotImplementedError( - "Call Shilong if you use other containers! type: {}".format(type(item)) - ) - - -# -def get_gaussian_mean(x, axis, other_axis, softmax=True): - """ - - Args: - x (float): Input images(BxCxHxW) - axis (int): The index for weighted mean - other_axis (int): The other index - - Returns: weighted index for axis, BxC - - """ - mat2line = torch.sum(x, axis=other_axis) - # mat2line = mat2line / mat2line.mean() * 10 - if softmax: - u = torch.softmax(mat2line, axis=2) - else: - u = mat2line / (mat2line.sum(2, keepdim=True) + 1e-6) - size = x.shape[axis] - ind = torch.linspace(0, 1, size).to(x.device) - batch = x.shape[0] - channel = x.shape[1] - index = ind.repeat([batch, channel, 1]) - mean_position = torch.sum(index * u, dim=2) - return mean_position - - -def get_expected_points_from_map(hm, softmax=True): - """get_gaussian_map_from_points - B,C,H,W -> B,N,2 float(0, 1) float(0, 1) - softargmax function - - Args: - hm (float): Input images(BxCxHxW) - - Returns: - weighted index for axis, BxCx2. float between 0 and 1. - - """ - # hm = 10*hm - B, C, H, W = hm.shape - y_mean = get_gaussian_mean(hm, 2, 3, softmax=softmax) # B,C - x_mean = get_gaussian_mean(hm, 3, 2, softmax=softmax) # B,C - # return torch.cat((x_mean.unsqueeze(-1), y_mean.unsqueeze(-1)), 2) - return torch.stack([x_mean, y_mean], dim=2) - - -# Positional encoding (section 5.1) -# borrow from nerf -class Embedder: - def __init__(self, **kwargs): - self.kwargs = kwargs - self.create_embedding_fn() - - def create_embedding_fn(self): - embed_fns = [] - d = self.kwargs["input_dims"] - out_dim = 0 - if self.kwargs["include_input"]: - embed_fns.append(lambda x: x) - out_dim += d - - max_freq = self.kwargs["max_freq_log2"] - N_freqs = self.kwargs["num_freqs"] - - if self.kwargs["log_sampling"]: - freq_bands = 2.0 ** torch.linspace(0.0, max_freq, steps=N_freqs) - else: - freq_bands = torch.linspace(2.0**0.0, 2.0**max_freq, steps=N_freqs) - - for freq in freq_bands: - for p_fn in self.kwargs["periodic_fns"]: - embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq)) - out_dim += d - - self.embed_fns = embed_fns - self.out_dim = out_dim - - def embed(self, inputs): - return torch.cat([fn(inputs) for fn in self.embed_fns], -1) - - -def get_embedder(multires, i=0): - import torch.nn as nn - - if i == -1: - return nn.Identity(), 3 - - embed_kwargs = { - "include_input": True, - "input_dims": 3, - "max_freq_log2": multires - 1, - "num_freqs": multires, - "log_sampling": True, - "periodic_fns": [torch.sin, torch.cos], - } - - embedder_obj = Embedder(**embed_kwargs) - embed = lambda x, eo=embedder_obj: eo.embed(x) - return embed, embedder_obj.out_dim - - -class APOPMeter: - def __init__(self) -> None: - self.tp = 0 - self.fp = 0 - self.tn = 0 - self.fn = 0 - - def update(self, pred, gt): - """ - Input: - pred, gt: Tensor() - """ - assert pred.shape == gt.shape - self.tp += torch.logical_and(pred == 1, gt == 1).sum().item() - self.fp += torch.logical_and(pred == 1, gt == 0).sum().item() - self.tn += torch.logical_and(pred == 0, gt == 0).sum().item() - self.tn += torch.logical_and(pred == 1, gt == 0).sum().item() - - def update_cm(self, tp, fp, tn, fn): - self.tp += tp - self.fp += fp - self.tn += tn - self.tn += fn - - -def inverse_sigmoid(x, eps=1e-5): - x = x.clamp(min=0, max=1) - x1 = x.clamp(min=eps) - x2 = (1 - x).clamp(min=eps) - return torch.log(x1 / x2) - - -def get_raw_dict(args): - """ - return the dicf contained in args. - - e.g: - >>> with open(path, 'w') as f: - json.dump(get_raw_dict(args), f, indent=2) - """ - if isinstance(args, argparse.Namespace): - return vars(args) - elif isinstance(args, dict): - return args - elif isinstance(args, SLConfig): - return args._cfg_dict - else: - raise NotImplementedError("Unknown type {}".format(type(args))) - - -def stat_tensors(tensor): - assert tensor.dim() == 1 - tensor_sm = tensor.softmax(0) - entropy = (tensor_sm * torch.log(tensor_sm + 1e-9)).sum() - - return { - "max": tensor.max(), - "min": tensor.min(), - "mean": tensor.mean(), - "var": tensor.var(), - "std": tensor.var() ** 0.5, - "entropy": entropy, - } - - -class NiceRepr: - """Inherit from this class and define ``__nice__`` to "nicely" print your - objects. - - Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function - Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``. - If the inheriting class has a ``__len__``, method then the default - ``__nice__`` method will return its length. - - Example: - >>> class Foo(NiceRepr): - ... def __nice__(self): - ... return 'info' - >>> foo = Foo() - >>> assert str(foo) == '' - >>> assert repr(foo).startswith('>> class Bar(NiceRepr): - ... pass - >>> bar = Bar() - >>> import pytest - >>> with pytest.warns(None) as record: - >>> assert 'object at' in str(bar) - >>> assert 'object at' in repr(bar) - - Example: - >>> class Baz(NiceRepr): - ... def __len__(self): - ... return 5 - >>> baz = Baz() - >>> assert str(baz) == '' - """ - - def __nice__(self): - """str: a "nice" summary string describing this module""" - if hasattr(self, "__len__"): - # It is a common pattern for objects to use __len__ in __nice__ - # As a convenience we define a default __nice__ for these objects - return str(len(self)) - else: - # In all other cases force the subclass to overload __nice__ - raise NotImplementedError(f"Define the __nice__ method for {self.__class__!r}") - - def __repr__(self): - """str: the string of the module""" - try: - nice = self.__nice__() - classname = self.__class__.__name__ - return f"<{classname}({nice}) at {hex(id(self))}>" - except NotImplementedError as ex: - warnings.warn(str(ex), category=RuntimeWarning) - return object.__repr__(self) - - def __str__(self): - """str: the string of the module""" - try: - classname = self.__class__.__name__ - nice = self.__nice__() - return f"<{classname}({nice})>" - except NotImplementedError as ex: - warnings.warn(str(ex), category=RuntimeWarning) - return object.__repr__(self) - - -def ensure_rng(rng=None): - """Coerces input into a random number generator. - - If the input is None, then a global random state is returned. - - If the input is a numeric value, then that is used as a seed to construct a - random state. Otherwise the input is returned as-is. - - Adapted from [1]_. - - Args: - rng (int | numpy.random.RandomState | None): - if None, then defaults to the global rng. Otherwise this can be an - integer or a RandomState class - Returns: - (numpy.random.RandomState) : rng - - a numpy random number generator - - References: - .. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501 - """ - - if rng is None: - rng = np.random.mtrand._rand - elif isinstance(rng, int): - rng = np.random.RandomState(rng) - else: - rng = rng - return rng - - -def random_boxes(num=1, scale=1, rng=None): - """Simple version of ``kwimage.Boxes.random`` - - Returns: - Tensor: shape (n, 4) in x1, y1, x2, y2 format. - - References: - https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390 - - Example: - >>> num = 3 - >>> scale = 512 - >>> rng = 0 - >>> boxes = random_boxes(num, scale, rng) - >>> print(boxes) - tensor([[280.9925, 278.9802, 308.6148, 366.1769], - [216.9113, 330.6978, 224.0446, 456.5878], - [405.3632, 196.3221, 493.3953, 270.7942]]) - """ - rng = ensure_rng(rng) - - tlbr = rng.rand(num, 4).astype(np.float32) - - tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2]) - tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3]) - br_x = np.maximum(tlbr[:, 0], tlbr[:, 2]) - br_y = np.maximum(tlbr[:, 1], tlbr[:, 3]) - - tlbr[:, 0] = tl_x * scale - tlbr[:, 1] = tl_y * scale - tlbr[:, 2] = br_x * scale - tlbr[:, 3] = br_y * scale - - boxes = torch.from_numpy(tlbr) - return boxes - - -class ModelEma(torch.nn.Module): - def __init__(self, model, decay=0.9997, device=None): - super(ModelEma, self).__init__() - # make a copy of the model for accumulating moving average of weights - self.module = deepcopy(model) - self.module.eval() - - # import ipdb; ipdb.set_trace() - - self.decay = decay - self.device = device # perform ema on different device from model if set - if self.device is not None: - self.module.to(device=device) - - def _update(self, model, update_fn): - with torch.no_grad(): - for ema_v, model_v in zip( - self.module.state_dict().values(), model.state_dict().values() - ): - if self.device is not None: - model_v = model_v.to(device=self.device) - ema_v.copy_(update_fn(ema_v, model_v)) - - def update(self, model): - self._update(model, update_fn=lambda e, m: self.decay * e + (1.0 - self.decay) * m) - - def set(self, model): - self._update(model, update_fn=lambda e, m: m) - - -class BestMetricSingle: - def __init__(self, init_res=0.0, better="large") -> None: - self.init_res = init_res - self.best_res = init_res - self.best_ep = -1 - - self.better = better - assert better in ["large", "small"] - - def isbetter(self, new_res, old_res): - if self.better == "large": - return new_res > old_res - if self.better == "small": - return new_res < old_res - - def update(self, new_res, ep): - if self.isbetter(new_res, self.best_res): - self.best_res = new_res - self.best_ep = ep - return True - return False - - def __str__(self) -> str: - return "best_res: {}\t best_ep: {}".format(self.best_res, self.best_ep) - - def __repr__(self) -> str: - return self.__str__() - - def summary(self) -> dict: - return { - "best_res": self.best_res, - "best_ep": self.best_ep, - } - - -class BestMetricHolder: - def __init__(self, init_res=0.0, better="large", use_ema=False) -> None: - self.best_all = BestMetricSingle(init_res, better) - self.use_ema = use_ema - if use_ema: - self.best_ema = BestMetricSingle(init_res, better) - self.best_regular = BestMetricSingle(init_res, better) - - def update(self, new_res, epoch, is_ema=False): - """ - return if the results is the best. - """ - if not self.use_ema: - return self.best_all.update(new_res, epoch) - else: - if is_ema: - self.best_ema.update(new_res, epoch) - return self.best_all.update(new_res, epoch) - else: - self.best_regular.update(new_res, epoch) - return self.best_all.update(new_res, epoch) - - def summary(self): - if not self.use_ema: - return self.best_all.summary() - - res = {} - res.update({f"all_{k}": v for k, v in self.best_all.summary().items()}) - res.update({f"regular_{k}": v for k, v in self.best_regular.summary().items()}) - res.update({f"ema_{k}": v for k, v in self.best_ema.summary().items()}) - return res - - def __repr__(self) -> str: - return json.dumps(self.summary(), indent=2) - - def __str__(self) -> str: - return self.__repr__() - - -def targets_to(targets: List[Dict[str, Any]], device): - """Moves the target dicts to the given device.""" - excluded_keys = [ - "questionId", - "tokens_positive", - "strings_positive", - "tokens", - "dataset_name", - "sentence_id", - "original_img_id", - "nb_eval", - "task_id", - "original_id", - "token_span", - "caption", - "dataset_type", - ] - return [ - {k: v.to(device) if k not in excluded_keys else v for k, v in t.items()} for t in targets - ] - - -def get_phrases_from_posmap( - posmap: torch.BoolTensor, tokenized: Dict, tokenizer: AutoTokenizer -): - assert isinstance(posmap, torch.Tensor), "posmap must be torch.Tensor" - if posmap.dim() == 1: - non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist() - token_ids = [tokenized["input_ids"][i] for i in non_zero_idx] - return tokenizer.decode(token_ids) - else: - raise NotImplementedError("posmap must be 1-dim") diff --git a/spaces/fffiloni/sd-img-variations/README.md b/spaces/fffiloni/sd-img-variations/README.md deleted file mode 100644 index 1cecbd598ca6444cae6564d43800b1b6f01a9ae1..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/sd-img-variations/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stable Diffusion Img Variations CPU -emoji: 🐓🐣🐣🐣🐣 -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_42.py b/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_42.py deleted file mode 100644 index 4bbc09a78cb8abacafe85a8d91034b0adce8d14d..0000000000000000000000000000000000000000 --- a/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_42.py +++ /dev/null @@ -1,18 +0,0 @@ -def is_spam(message): - # Words/phrases commonly found in spam messages - spam_words = ["↑", "무료거부", "멤버십", "무료체험", "https://me2.kr", "비밀번호", "수익", "상승", "룰렛", "무료강의", "예약"] - - # Check if any of the spam words/phrases are in the input message - for word in spam_words: - if word in message: - return True - - # Check if the message contains "광고" at the beginning - if message.startswith("(광고)") or message.startswith("* (광고)"): - return True - - # Check if the message contains excessive line breaks - if message.count("\n") >= 3: - return True - - return False \ No newline at end of file diff --git a/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_53.py b/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_53.py deleted file mode 100644 index 22d6195940738ff7066149fb9136674180c69a70..0000000000000000000000000000000000000000 --- a/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_53.py +++ /dev/null @@ -1,32 +0,0 @@ - -import re - -def is_spam(message): - """ - This function takes a message and returns True if it's a spam message and False otherwise. - """ - # check for spam keywords - spam_keywords = ["(광고)", "수익", "무료", "VIP", "안전", "건", "신입", "정보", "트레이딩", "대표님", "추천", "공개", "체험반", "보유종목", "프로", "실력", "초보", "개인정보", - "비밀번호", "복구", "님", "혜택"] - - # check for URL patterns - url_pattern = re.compile( - r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+') - - # check for phone numbers - phone_pattern = re.compile(r'(\d{2,4}-\d{3,4}-\d{3,4})|(\(\d{2,4}\)\d{3,4}-\d{3,4})') - - # check if message contains any spam keywords - if any(keyword in message for keyword in spam_keywords): - return True - - # check if message contains URLs - if url_pattern.search(message): - return True - - # check if message contains phone numbers - if phone_pattern.search(message): - return True - - # if message passed all the checks, it is not spam - return False diff --git a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/social_ai_envs/socialaiparamenv.py b/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/social_ai_envs/socialaiparamenv.py deleted file mode 100644 index 49cb290848fa9a272cb9d4c2b579aa33ddb3c9de..0000000000000000000000000000000000000000 --- a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/social_ai_envs/socialaiparamenv.py +++ /dev/null @@ -1,430 +0,0 @@ -import warnings -from itertools import chain -from gym_minigrid.minigrid import * -from gym_minigrid.parametric_env import * -from gym_minigrid.register import register -from gym_minigrid.social_ai_envs import InformationSeekingEnv, MarblePassEnv, LeverDoorEnv, MarblePushEnv, AppleStealingEnv, ObjectsCollaborationEnv -from gym_minigrid.social_ai_envs.socialaigrammar import SocialAIGrammar, SocialAIActions, SocialAIActionSpace -from gym_minigrid.curriculums import * - -import inspect, importlib - -# for used for automatic registration of environments -defined_classes = [name for name, _ in inspect.getmembers(importlib.import_module(__name__), inspect.isclass)] - - -class SocialAIParamEnv(gym.Env): - """ - Meta-Environment containing all other environment (multi-task learning) - """ - - def __init__( - self, - size=10, - hidden_npc=False, - see_through_walls=False, - max_steps=80, # before it was 50, 80 is maybe better because of emulation ? - switch_no_light=True, - lever_active_steps=10, - curriculum=None, - expert_curriculum_thresholds=(0.9, 0.8), - expert_curriculum_average_interval=100, - expert_curriculum_minimum_episodes=1000, - n_colors=3, - egocentric_observation=True, - ): - if n_colors != 3: - warnings.warn(f"You are ussing {n_colors} instead of the usual 3.") - - self.lever_active_steps = lever_active_steps - self.egocentric_observation = egocentric_observation - - # Number of cells (width and height) in the agent view - self.agent_view_size = 7 - - # Number of object dimensions (i.e. number of channels in symbolic image) - # if egocentric is not used absolute coordiantes are added to the encoding - self.encoding_size = 6 + 2*bool(not egocentric_observation) - - self.max_steps = max_steps - - self.switch_no_light = switch_no_light - - # Observations are dictionaries containing an - # encoding of the grid and a textual 'mission' string - self.observation_space = spaces.Box( - low=0, - high=255, - shape=(self.agent_view_size, self.agent_view_size, self.encoding_size), - dtype='uint8' - ) - self.observation_space = spaces.Dict({ - 'image': self.observation_space - }) - - self.hidden_npc = hidden_npc - - # construct the tree - self.parameter_tree = self.construct_tree() - - # print tree for logging purposes - # self.parameter_tree.print_tree() - - if curriculum in ["intro_seq", "intro_seq_scaf"]: - print("Scaffolding Expert") - self.expert_curriculum_thresholds = expert_curriculum_thresholds - self.expert_curriculum_average_interval = expert_curriculum_average_interval - self.expert_curriculum_minimum_episodes = expert_curriculum_minimum_episodes - self.curriculum = ScaffoldingExpertCurriculum( - phase_thresholds=self.expert_curriculum_thresholds, - average_interval=self.expert_curriculum_average_interval, - minimum_episodes=self.expert_curriculum_minimum_episodes, - type=curriculum, - ) - - else: - self.curriculum = curriculum - - self.current_env = None - - self.envs = {} - - if self.parameter_tree.root.label == "Env_type": - for env_type in self.parameter_tree.root.children: - if env_type.label == "Information_seeking": - e = InformationSeekingEnv( - max_steps=max_steps, - size=size, - switch_no_light=self.switch_no_light, - see_through_walls=see_through_walls, - n_colors=n_colors, - hidden_npc=self.hidden_npc, - egocentric_observation=self.egocentric_observation, - ) - self.envs["Info"] = e - - elif env_type.label == "Collaboration": - e = MarblePassEnv(max_steps=max_steps, size=size, hidden_npc=self.hidden_npc, egocentric_observation=egocentric_observation) - self.envs["Collaboration_Marble_Pass"] = e - - e = LeverDoorEnv(max_steps=max_steps, size=size, lever_active_steps=self.lever_active_steps, hidden_npc=self.hidden_npc, egocentric_observation=egocentric_observation) - self.envs["Collaboration_Lever_Door"] = e - - e = MarblePushEnv(max_steps=max_steps, size=size, lever_active_steps=self.lever_active_steps, hidden_npc=self.hidden_npc, egocentric_observation=egocentric_observation) - self.envs["Collaboration_Marble_Push"] = e - - e = ObjectsCollaborationEnv(max_steps=max_steps, size=size, hidden_npc=self.hidden_npc, switch_no_light=self.switch_no_light, egocentric_observation=egocentric_observation) - self.envs["Collaboration_Objects"] = e - - elif env_type.label == "AppleStealing": - e = AppleStealingEnv(max_steps=max_steps, size=size, see_through_walls=see_through_walls, - hidden_npc=self.hidden_npc, egocentric_observation=egocentric_observation) - self.envs["OthersPerceptionInference"] = e - - else: - raise ValueError(f"Undefined env type {env_type.label}.") - - else: - raise ValueError("Env_type should be the root node") - - self.all_npc_utterance_actions = sorted(list(set(chain(*[e.all_npc_utterance_actions for e in self.envs.values()])))) - - self.grammar = SocialAIGrammar() - - # set up the action space - self.action_space = SocialAIActionSpace - self.actions = SocialAIActions - self.npc_prim_actions_dict = SocialAINPCActionsDict - - # all envs must have the same grammar - for env in self.envs.values(): - assert isinstance(env.grammar, type(self.grammar)) - assert env.actions is self.actions - assert env.action_space is self.action_space - - # suggestion: encoding size is automatically set to max? - assert env.encoding_size is self.encoding_size - assert env.observation_space == self.observation_space - assert env.prim_actions_dict == self.npc_prim_actions_dict - - self.reset() - - def draw_tree(self, ignore_labels=[], savedir="viz"): - self.parameter_tree.draw_tree("{}/param_tree_{}".format(savedir, self.spec.id), ignore_labels=ignore_labels) - - def print_tree(self): - self.parameter_tree.print_tree() - - def construct_tree(self): - tree = ParameterTree() - - env_type_nd = tree.add_node("Env_type", type="param") - - # Information seeking - inf_seeking_nd = tree.add_node("Information_seeking", parent=env_type_nd, type="value") - - prag_fr_compl_nd = tree.add_node("Pragmatic_frame_complexity", parent=inf_seeking_nd, type="param") - tree.add_node("No", parent=prag_fr_compl_nd, type="value") - tree.add_node("Eye_contact", parent=prag_fr_compl_nd, type="value") - tree.add_node("Ask", parent=prag_fr_compl_nd, type="value") - tree.add_node("Ask_Eye_contact", parent=prag_fr_compl_nd, type="value") - - # scaffolding - scaffolding_nd = tree.add_node("Scaffolding", parent=inf_seeking_nd, type="param") - scaffolding_N_nd = tree.add_node("N", parent=scaffolding_nd, type="value") - scaffolding_Y_nd = tree.add_node("Y", parent=scaffolding_nd, type="value") - - cue_type_nd = tree.add_node("Cue_type", parent=scaffolding_N_nd, type="param") - tree.add_node("Language_Color", parent=cue_type_nd, type="value") - tree.add_node("Language_Feedback", parent=cue_type_nd, type="value") - tree.add_node("Pointing", parent=cue_type_nd, type="value") - tree.add_node("Emulation", parent=cue_type_nd, type="value") - - - N_bo_nd = tree.add_node("N", parent=inf_seeking_nd, type="param") - tree.add_node("2", parent=N_bo_nd, type="value") - tree.add_node("1", parent=N_bo_nd, type="value") - - problem_nd = tree.add_node("Problem", parent=inf_seeking_nd, type="param") - - doors_nd = tree.add_node("Doors", parent=problem_nd, type="value") - version_nd = tree.add_node("N", parent=doors_nd, type="param") - tree.add_node("2", parent=version_nd, type="value") - peer_nd = tree.add_node("Peer", parent=doors_nd, type="param") - tree.add_node("Y", parent=peer_nd, type="value") - - boxes_nd = tree.add_node("Boxes", parent=problem_nd, type="value") - version_nd = tree.add_node("N", parent=boxes_nd, type="param") - tree.add_node("2", parent=version_nd, type="value") - peer_nd = tree.add_node("Peer", parent=boxes_nd, type="param") - tree.add_node("Y", parent=peer_nd, type="value") - - switches_nd = tree.add_node("Switches", parent=problem_nd, type="value") - version_nd = tree.add_node("N", parent=switches_nd, type="param") - tree.add_node("2", parent=version_nd, type="value") - peer_nd = tree.add_node("Peer", parent=switches_nd, type="param") - tree.add_node("Y", parent=peer_nd, type="value") - - generators_nd = tree.add_node("Generators", parent=problem_nd, type="value") - version_nd = tree.add_node("N", parent=generators_nd, type="param") - tree.add_node("2", parent=version_nd, type="value") - peer_nd = tree.add_node("Peer", parent=generators_nd, type="param") - tree.add_node("Y", parent=peer_nd, type="value") - - levers_nd = tree.add_node("Levers", parent=problem_nd, type="value") - version_nd = tree.add_node("N", parent=levers_nd, type="param") - tree.add_node("2", parent=version_nd, type="value") - peer_nd = tree.add_node("Peer", parent=levers_nd, type="param") - tree.add_node("Y", parent=peer_nd, type="value") - - doors_nd = tree.add_node("Marble", parent=problem_nd, type="value") - version_nd = tree.add_node("N", parent=doors_nd, type="param") - tree.add_node("2", parent=version_nd, type="value") - peer_nd = tree.add_node("Peer", parent=doors_nd, type="param") - tree.add_node("Y", parent=peer_nd, type="value") - - # Collaboration - collab_nd = tree.add_node("Collaboration", parent=env_type_nd, type="value") - - colab_type_nd = tree.add_node("Problem", parent=collab_nd, type="param") - - problem_nd = tree.add_node("Boxes", parent=colab_type_nd, type="value") - role_nd = tree.add_node("Role", parent=problem_nd, type="param") - tree.add_node("A", parent=role_nd, type="value") - tree.add_node("B", parent=role_nd, type="value") - role_nd = tree.add_node("Version", parent=problem_nd, type="param") - tree.add_node("Social", parent=role_nd, type="value") - - problem_nd = tree.add_node("Switches", parent=colab_type_nd, type="value") - role_nd = tree.add_node("Role", parent=problem_nd, type="param") - tree.add_node("A", parent=role_nd, type="value") - tree.add_node("B", parent=role_nd, type="value") - role_nd = tree.add_node("Version", parent=problem_nd, type="param") - tree.add_node("Social", parent=role_nd, type="value") - - problem_nd = tree.add_node("Generators", parent=colab_type_nd, type="value") - role_nd = tree.add_node("Role", parent=problem_nd, type="param") - tree.add_node("A", parent=role_nd, type="value") - tree.add_node("B", parent=role_nd, type="value") - role_nd = tree.add_node("Version", parent=problem_nd, type="param") - tree.add_node("Social", parent=role_nd, type="value") - - problem_nd = tree.add_node("Marble", parent=colab_type_nd, type="value") - role_nd = tree.add_node("Role", parent=problem_nd, type="param") - tree.add_node("A", parent=role_nd, type="value") - tree.add_node("B", parent=role_nd, type="value") - role_nd = tree.add_node("Version", parent=problem_nd, type="param") - tree.add_node("Social", parent=role_nd, type="value") - - problem_nd = tree.add_node("MarblePass", parent=colab_type_nd, type="value") - role_nd = tree.add_node("Role", parent=problem_nd, type="param") - tree.add_node("A", parent=role_nd, type="value") - tree.add_node("B", parent=role_nd, type="value") - role_nd = tree.add_node("Version", parent=problem_nd, type="param") - tree.add_node("Social", parent=role_nd, type="value") - tree.add_node("Asocial", parent=role_nd, type="value") - - problem_nd = tree.add_node("MarblePush", parent=colab_type_nd, type="value") - role_nd = tree.add_node("Role", parent=problem_nd, type="param") - tree.add_node("A", parent=role_nd, type="value") - tree.add_node("B", parent=role_nd, type="value") - role_nd = tree.add_node("Version", parent=problem_nd, type="param") - tree.add_node("Social", parent=role_nd, type="value") - - problem_nd = tree.add_node("LeverDoor", parent=colab_type_nd, type="value") - role_nd = tree.add_node("Role", parent=problem_nd, type="param") - tree.add_node("A", parent=role_nd, type="value") - tree.add_node("B", parent=role_nd, type="value") - role_nd = tree.add_node("Version", parent=problem_nd, type="param") - tree.add_node("Social", parent=role_nd, type="value") - - # Perspective taking - collab_nd = tree.add_node("AppleStealing", parent=env_type_nd, type="value") - - role_nd = tree.add_node("Version", parent=collab_nd, type="param") - tree.add_node("Asocial", parent=role_nd, type="value") - social_nd = tree.add_node("Social", parent=role_nd, type="value") - - move_nd = tree.add_node("NPC_movement", parent=social_nd, type="param") - tree.add_node("Walking", parent=move_nd, type="value") - tree.add_node("Rotating", parent=move_nd, type="value") - - obstacles_nd = tree.add_node("Obstacles", parent=collab_nd, type="param") - tree.add_node("No", parent=obstacles_nd, type="value") - tree.add_node("A_bit", parent=obstacles_nd, type="value") - tree.add_node("Medium", parent=obstacles_nd, type="value") - tree.add_node("A_lot", parent=obstacles_nd, type="value") - - return tree - - def construct_env_from_params(self, params): - params_labels = {k.label: v.label for k, v in params.items()} - if params_labels['Env_type'] == "Collaboration": - - if params_labels["Problem"] == "MarblePass": - env = self.envs["Collaboration_Marble_Pass"] - - elif params_labels["Problem"] == "LeverDoor": - env = self.envs["Collaboration_Lever_Door"] - - elif params_labels["Problem"] == "MarblePush": - env = self.envs["Collaboration_Marble_Push"] - - elif params_labels["Problem"] in ["Boxes", "Switches", "Generators", "Marble"]: - env = self.envs["Collaboration_Objects"] - - else: - raise ValueError("params badly defined.") - - elif params_labels['Env_type'] == "Information_seeking": - env = self.envs["Info"] - - elif params_labels['Env_type'] == "AppleStealing": - env = self.envs["OthersPerceptionInference"] - - else: - raise ValueError("params badly defined.") - - reset_kwargs = params_labels - - return env, reset_kwargs - - def reset(self, with_info=False): - # select a new social environment at random, for each new episode - - old_window = None - if self.current_env: # a previous env exists, save old window - old_window = self.current_env.window - - self.current_params = self.parameter_tree.sample_env_params(ACL=self.curriculum) - - self.current_env, reset_kwargs = self.construct_env_from_params(self.current_params) - assert reset_kwargs is not {} - assert reset_kwargs is not None - - # print("Sampled parameters:") - # for k, v in reset_kwargs.items(): - # print(f'\t{k}:{v}') - - if with_info: - obs, info = self.current_env.reset_with_info(**reset_kwargs) - else: - obs = self.current_env.reset(**reset_kwargs) - - # carry on window if this env is not the first - if old_window: - self.current_env.window = old_window - - if with_info: - return obs, info - else: - return obs - - def reset_with_info(self): - return self.reset(with_info=True) - - - def seed(self, seed=1337): - # Seed the random number generator - for env in self.envs.values(): - env.seed(seed) - - return [seed] - - def set_curriculum_parameters(self, params): - if self.curriculum is not None: - self.curriculum.set_parameters(params) - - def step(self, action): - assert self.current_env - assert self.current_env.parameters is not None - - obs, reward, done, info = self.current_env.step(action) - - info["parameters"] = self.current_params - - if done: - if info["success"]: - # self.current_env.outcome_info = "SUCCESS: agent got {} reward \n".format(np.round(reward, 1)) - self.current_env.outcome_info = "SUCCESS\n" - else: - self.current_env.outcome_info = "FAILURE\n" - - if self.curriculum is not None: - for k, v in self.curriculum.get_info().items(): - info["curriculum_info_"+k] = v - - return obs, reward, done, info - - - @property - def window(self): - assert self.current_env - return self.current_env.window - - @window.setter - def window(self, value): - self.current_env.window = value - - def render(self, *args, **kwargs): - assert self.current_env - return self.current_env.render(*args, **kwargs) - - @property - def step_count(self): - return self.current_env.step_count - - def get_mission(self): - return self.current_env.get_mission() - - -defined_classes_ = [name for name, _ in inspect.getmembers(importlib.import_module(__name__), inspect.isclass)] - -envs = list(set(defined_classes_) - set(defined_classes)) -assert all([e.endswith("Env") for e in envs]) - -for env in envs: - register( - id='SocialAI-{}-v1'.format(env), - entry_point='gym_minigrid.social_ai_envs:{}'.format(env) - ) diff --git a/spaces/freddyaboulton/gradio_pdf/src/backend/gradio_pdf/templates/component/wrapper-98f94c21-9201c0de.js b/spaces/freddyaboulton/gradio_pdf/src/backend/gradio_pdf/templates/component/wrapper-98f94c21-9201c0de.js deleted file mode 100644 index 8b8be45d2d45e61ce10f530cfd7820d3e5f81b71..0000000000000000000000000000000000000000 --- a/spaces/freddyaboulton/gradio_pdf/src/backend/gradio_pdf/templates/component/wrapper-98f94c21-9201c0de.js +++ /dev/null @@ -1,2449 +0,0 @@ -import { r as S } from "./Index-f36f7747.js"; -function z(s) { - return s && s.__esModule && Object.prototype.hasOwnProperty.call(s, "default") ? s.default : s; -} -function gt(s) { - if (s.__esModule) - return s; - var e = s.default; - if (typeof e == "function") { - var t = function r() { - return this instanceof r ? Reflect.construct(e, arguments, this.constructor) : e.apply(this, arguments); - }; - t.prototype = e.prototype; - } else - t = {}; - return Object.defineProperty(t, "__esModule", { value: !0 }), Object.keys(s).forEach(function(r) { - var i = Object.getOwnPropertyDescriptor(s, r); - Object.defineProperty(t, r, i.get ? i : { - enumerable: !0, - get: function() { - return s[r]; - } - }); - }), t; -} -const { Duplex: yt } = S; -function Oe(s) { - s.emit("close"); -} -function vt() { - !this.destroyed && this._writableState.finished && this.destroy(); -} -function Qe(s) { - this.removeListener("error", Qe), this.destroy(), this.listenerCount("error") === 0 && this.emit("error", s); -} -function St(s, e) { - let t = !0; - const r = new yt({ - ...e, - autoDestroy: !1, - emitClose: !1, - objectMode: !1, - writableObjectMode: !1 - }); - return s.on("message", function(n, o) { - const l = !o && r._readableState.objectMode ? n.toString() : n; - r.push(l) || s.pause(); - }), s.once("error", function(n) { - r.destroyed || (t = !1, r.destroy(n)); - }), s.once("close", function() { - r.destroyed || r.push(null); - }), r._destroy = function(i, n) { - if (s.readyState === s.CLOSED) { - n(i), process.nextTick(Oe, r); - return; - } - let o = !1; - s.once("error", function(f) { - o = !0, n(f); - }), s.once("close", function() { - o || n(i), process.nextTick(Oe, r); - }), t && s.terminate(); - }, r._final = function(i) { - if (s.readyState === s.CONNECTING) { - s.once("open", function() { - r._final(i); - }); - return; - } - s._socket !== null && (s._socket._writableState.finished ? (i(), r._readableState.endEmitted && r.destroy()) : (s._socket.once("finish", function() { - i(); - }), s.close())); - }, r._read = function() { - s.isPaused && s.resume(); - }, r._write = function(i, n, o) { - if (s.readyState === s.CONNECTING) { - s.once("open", function() { - r._write(i, n, o); - }); - return; - } - s.send(i, o); - }, r.on("end", vt), r.on("error", Qe), r; -} -var Et = St; -const Vs = /* @__PURE__ */ z(Et); -var te = { exports: {} }, U = { - BINARY_TYPES: ["nodebuffer", "arraybuffer", "fragments"], - EMPTY_BUFFER: Buffer.alloc(0), - GUID: "258EAFA5-E914-47DA-95CA-C5AB0DC85B11", - kForOnEventAttribute: Symbol("kIsForOnEventAttribute"), - kListener: Symbol("kListener"), - kStatusCode: Symbol("status-code"), - kWebSocket: Symbol("websocket"), - NOOP: () => { - } -}, bt, xt; -const { EMPTY_BUFFER: kt } = U, Se = Buffer[Symbol.species]; -function wt(s, e) { - if (s.length === 0) - return kt; - if (s.length === 1) - return s[0]; - const t = Buffer.allocUnsafe(e); - let r = 0; - for (let i = 0; i < s.length; i++) { - const n = s[i]; - t.set(n, r), r += n.length; - } - return r < e ? new Se(t.buffer, t.byteOffset, r) : t; -} -function Je(s, e, t, r, i) { - for (let n = 0; n < i; n++) - t[r + n] = s[n] ^ e[n & 3]; -} -function et(s, e) { - for (let t = 0; t < s.length; t++) - s[t] ^= e[t & 3]; -} -function Ot(s) { - return s.length === s.buffer.byteLength ? s.buffer : s.buffer.slice(s.byteOffset, s.byteOffset + s.length); -} -function Ee(s) { - if (Ee.readOnly = !0, Buffer.isBuffer(s)) - return s; - let e; - return s instanceof ArrayBuffer ? e = new Se(s) : ArrayBuffer.isView(s) ? e = new Se(s.buffer, s.byteOffset, s.byteLength) : (e = Buffer.from(s), Ee.readOnly = !1), e; -} -te.exports = { - concat: wt, - mask: Je, - toArrayBuffer: Ot, - toBuffer: Ee, - unmask: et -}; -if (!process.env.WS_NO_BUFFER_UTIL) - try { - const s = require("bufferutil"); - xt = te.exports.mask = function(e, t, r, i, n) { - n < 48 ? Je(e, t, r, i, n) : s.mask(e, t, r, i, n); - }, bt = te.exports.unmask = function(e, t) { - e.length < 32 ? et(e, t) : s.unmask(e, t); - }; - } catch { - } -var ne = te.exports; -const Ce = Symbol("kDone"), ue = Symbol("kRun"); -let Ct = class { - /** - * Creates a new `Limiter`. - * - * @param {Number} [concurrency=Infinity] The maximum number of jobs allowed - * to run concurrently - */ - constructor(e) { - this[Ce] = () => { - this.pending--, this[ue](); - }, this.concurrency = e || 1 / 0, this.jobs = [], this.pending = 0; - } - /** - * Adds a job to the queue. - * - * @param {Function} job The job to run - * @public - */ - add(e) { - this.jobs.push(e), this[ue](); - } - /** - * Removes a job from the queue and runs it if possible. - * - * @private - */ - [ue]() { - if (this.pending !== this.concurrency && this.jobs.length) { - const e = this.jobs.shift(); - this.pending++, e(this[Ce]); - } - } -}; -var Tt = Ct; -const W = S, Te = ne, Lt = Tt, { kStatusCode: tt } = U, Nt = Buffer[Symbol.species], Pt = Buffer.from([0, 0, 255, 255]), se = Symbol("permessage-deflate"), w = Symbol("total-length"), V = Symbol("callback"), C = Symbol("buffers"), J = Symbol("error"); -let K, Rt = class { - /** - * Creates a PerMessageDeflate instance. - * - * @param {Object} [options] Configuration options - * @param {(Boolean|Number)} [options.clientMaxWindowBits] Advertise support - * for, or request, a custom client window size - * @param {Boolean} [options.clientNoContextTakeover=false] Advertise/ - * acknowledge disabling of client context takeover - * @param {Number} [options.concurrencyLimit=10] The number of concurrent - * calls to zlib - * @param {(Boolean|Number)} [options.serverMaxWindowBits] Request/confirm the - * use of a custom server window size - * @param {Boolean} [options.serverNoContextTakeover=false] Request/accept - * disabling of server context takeover - * @param {Number} [options.threshold=1024] Size (in bytes) below which - * messages should not be compressed if context takeover is disabled - * @param {Object} [options.zlibDeflateOptions] Options to pass to zlib on - * deflate - * @param {Object} [options.zlibInflateOptions] Options to pass to zlib on - * inflate - * @param {Boolean} [isServer=false] Create the instance in either server or - * client mode - * @param {Number} [maxPayload=0] The maximum allowed message length - */ - constructor(e, t, r) { - if (this._maxPayload = r | 0, this._options = e || {}, this._threshold = this._options.threshold !== void 0 ? this._options.threshold : 1024, this._isServer = !!t, this._deflate = null, this._inflate = null, this.params = null, !K) { - const i = this._options.concurrencyLimit !== void 0 ? this._options.concurrencyLimit : 10; - K = new Lt(i); - } - } - /** - * @type {String} - */ - static get extensionName() { - return "permessage-deflate"; - } - /** - * Create an extension negotiation offer. - * - * @return {Object} Extension parameters - * @public - */ - offer() { - const e = {}; - return this._options.serverNoContextTakeover && (e.server_no_context_takeover = !0), this._options.clientNoContextTakeover && (e.client_no_context_takeover = !0), this._options.serverMaxWindowBits && (e.server_max_window_bits = this._options.serverMaxWindowBits), this._options.clientMaxWindowBits ? e.client_max_window_bits = this._options.clientMaxWindowBits : this._options.clientMaxWindowBits == null && (e.client_max_window_bits = !0), e; - } - /** - * Accept an extension negotiation offer/response. - * - * @param {Array} configurations The extension negotiation offers/reponse - * @return {Object} Accepted configuration - * @public - */ - accept(e) { - return e = this.normalizeParams(e), this.params = this._isServer ? this.acceptAsServer(e) : this.acceptAsClient(e), this.params; - } - /** - * Releases all resources used by the extension. - * - * @public - */ - cleanup() { - if (this._inflate && (this._inflate.close(), this._inflate = null), this._deflate) { - const e = this._deflate[V]; - this._deflate.close(), this._deflate = null, e && e( - new Error( - "The deflate stream was closed while data was being processed" - ) - ); - } - } - /** - * Accept an extension negotiation offer. - * - * @param {Array} offers The extension negotiation offers - * @return {Object} Accepted configuration - * @private - */ - acceptAsServer(e) { - const t = this._options, r = e.find((i) => !(t.serverNoContextTakeover === !1 && i.server_no_context_takeover || i.server_max_window_bits && (t.serverMaxWindowBits === !1 || typeof t.serverMaxWindowBits == "number" && t.serverMaxWindowBits > i.server_max_window_bits) || typeof t.clientMaxWindowBits == "number" && !i.client_max_window_bits)); - if (!r) - throw new Error("None of the extension offers can be accepted"); - return t.serverNoContextTakeover && (r.server_no_context_takeover = !0), t.clientNoContextTakeover && (r.client_no_context_takeover = !0), typeof t.serverMaxWindowBits == "number" && (r.server_max_window_bits = t.serverMaxWindowBits), typeof t.clientMaxWindowBits == "number" ? r.client_max_window_bits = t.clientMaxWindowBits : (r.client_max_window_bits === !0 || t.clientMaxWindowBits === !1) && delete r.client_max_window_bits, r; - } - /** - * Accept the extension negotiation response. - * - * @param {Array} response The extension negotiation response - * @return {Object} Accepted configuration - * @private - */ - acceptAsClient(e) { - const t = e[0]; - if (this._options.clientNoContextTakeover === !1 && t.client_no_context_takeover) - throw new Error('Unexpected parameter "client_no_context_takeover"'); - if (!t.client_max_window_bits) - typeof this._options.clientMaxWindowBits == "number" && (t.client_max_window_bits = this._options.clientMaxWindowBits); - else if (this._options.clientMaxWindowBits === !1 || typeof this._options.clientMaxWindowBits == "number" && t.client_max_window_bits > this._options.clientMaxWindowBits) - throw new Error( - 'Unexpected or invalid parameter "client_max_window_bits"' - ); - return t; - } - /** - * Normalize parameters. - * - * @param {Array} configurations The extension negotiation offers/reponse - * @return {Array} The offers/response with normalized parameters - * @private - */ - normalizeParams(e) { - return e.forEach((t) => { - Object.keys(t).forEach((r) => { - let i = t[r]; - if (i.length > 1) - throw new Error(`Parameter "${r}" must have only a single value`); - if (i = i[0], r === "client_max_window_bits") { - if (i !== !0) { - const n = +i; - if (!Number.isInteger(n) || n < 8 || n > 15) - throw new TypeError( - `Invalid value for parameter "${r}": ${i}` - ); - i = n; - } else if (!this._isServer) - throw new TypeError( - `Invalid value for parameter "${r}": ${i}` - ); - } else if (r === "server_max_window_bits") { - const n = +i; - if (!Number.isInteger(n) || n < 8 || n > 15) - throw new TypeError( - `Invalid value for parameter "${r}": ${i}` - ); - i = n; - } else if (r === "client_no_context_takeover" || r === "server_no_context_takeover") { - if (i !== !0) - throw new TypeError( - `Invalid value for parameter "${r}": ${i}` - ); - } else - throw new Error(`Unknown parameter "${r}"`); - t[r] = i; - }); - }), e; - } - /** - * Decompress data. Concurrency limited. - * - * @param {Buffer} data Compressed data - * @param {Boolean} fin Specifies whether or not this is the last fragment - * @param {Function} callback Callback - * @public - */ - decompress(e, t, r) { - K.add((i) => { - this._decompress(e, t, (n, o) => { - i(), r(n, o); - }); - }); - } - /** - * Compress data. Concurrency limited. - * - * @param {(Buffer|String)} data Data to compress - * @param {Boolean} fin Specifies whether or not this is the last fragment - * @param {Function} callback Callback - * @public - */ - compress(e, t, r) { - K.add((i) => { - this._compress(e, t, (n, o) => { - i(), r(n, o); - }); - }); - } - /** - * Decompress data. - * - * @param {Buffer} data Compressed data - * @param {Boolean} fin Specifies whether or not this is the last fragment - * @param {Function} callback Callback - * @private - */ - _decompress(e, t, r) { - const i = this._isServer ? "client" : "server"; - if (!this._inflate) { - const n = `${i}_max_window_bits`, o = typeof this.params[n] != "number" ? W.Z_DEFAULT_WINDOWBITS : this.params[n]; - this._inflate = W.createInflateRaw({ - ...this._options.zlibInflateOptions, - windowBits: o - }), this._inflate[se] = this, this._inflate[w] = 0, this._inflate[C] = [], this._inflate.on("error", Bt), this._inflate.on("data", st); - } - this._inflate[V] = r, this._inflate.write(e), t && this._inflate.write(Pt), this._inflate.flush(() => { - const n = this._inflate[J]; - if (n) { - this._inflate.close(), this._inflate = null, r(n); - return; - } - const o = Te.concat( - this._inflate[C], - this._inflate[w] - ); - this._inflate._readableState.endEmitted ? (this._inflate.close(), this._inflate = null) : (this._inflate[w] = 0, this._inflate[C] = [], t && this.params[`${i}_no_context_takeover`] && this._inflate.reset()), r(null, o); - }); - } - /** - * Compress data. - * - * @param {(Buffer|String)} data Data to compress - * @param {Boolean} fin Specifies whether or not this is the last fragment - * @param {Function} callback Callback - * @private - */ - _compress(e, t, r) { - const i = this._isServer ? "server" : "client"; - if (!this._deflate) { - const n = `${i}_max_window_bits`, o = typeof this.params[n] != "number" ? W.Z_DEFAULT_WINDOWBITS : this.params[n]; - this._deflate = W.createDeflateRaw({ - ...this._options.zlibDeflateOptions, - windowBits: o - }), this._deflate[w] = 0, this._deflate[C] = [], this._deflate.on("data", Ut); - } - this._deflate[V] = r, this._deflate.write(e), this._deflate.flush(W.Z_SYNC_FLUSH, () => { - if (!this._deflate) - return; - let n = Te.concat( - this._deflate[C], - this._deflate[w] - ); - t && (n = new Nt(n.buffer, n.byteOffset, n.length - 4)), this._deflate[V] = null, this._deflate[w] = 0, this._deflate[C] = [], t && this.params[`${i}_no_context_takeover`] && this._deflate.reset(), r(null, n); - }); - } -}; -var oe = Rt; -function Ut(s) { - this[C].push(s), this[w] += s.length; -} -function st(s) { - if (this[w] += s.length, this[se]._maxPayload < 1 || this[w] <= this[se]._maxPayload) { - this[C].push(s); - return; - } - this[J] = new RangeError("Max payload size exceeded"), this[J].code = "WS_ERR_UNSUPPORTED_MESSAGE_LENGTH", this[J][tt] = 1009, this.removeListener("data", st), this.reset(); -} -function Bt(s) { - this[se]._inflate = null, s[tt] = 1007, this[V](s); -} -var re = { exports: {} }; -const $t = {}, Mt = /* @__PURE__ */ Object.freeze(/* @__PURE__ */ Object.defineProperty({ - __proto__: null, - default: $t -}, Symbol.toStringTag, { value: "Module" })), It = /* @__PURE__ */ gt(Mt); -var Le; -const { isUtf8: Ne } = S, Dt = [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - // 0 - 15 - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - // 16 - 31 - 0, - 1, - 0, - 1, - 1, - 1, - 1, - 1, - 0, - 0, - 1, - 1, - 0, - 1, - 1, - 0, - // 32 - 47 - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - // 48 - 63 - 0, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - // 64 - 79 - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 0, - 0, - 0, - 1, - 1, - // 80 - 95 - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - // 96 - 111 - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 0, - 1, - 0, - 1, - 0 - // 112 - 127 -]; -function Wt(s) { - return s >= 1e3 && s <= 1014 && s !== 1004 && s !== 1005 && s !== 1006 || s >= 3e3 && s <= 4999; -} -function be(s) { - const e = s.length; - let t = 0; - for (; t < e; ) - if (!(s[t] & 128)) - t++; - else if ((s[t] & 224) === 192) { - if (t + 1 === e || (s[t + 1] & 192) !== 128 || (s[t] & 254) === 192) - return !1; - t += 2; - } else if ((s[t] & 240) === 224) { - if (t + 2 >= e || (s[t + 1] & 192) !== 128 || (s[t + 2] & 192) !== 128 || s[t] === 224 && (s[t + 1] & 224) === 128 || // Overlong - s[t] === 237 && (s[t + 1] & 224) === 160) - return !1; - t += 3; - } else if ((s[t] & 248) === 240) { - if (t + 3 >= e || (s[t + 1] & 192) !== 128 || (s[t + 2] & 192) !== 128 || (s[t + 3] & 192) !== 128 || s[t] === 240 && (s[t + 1] & 240) === 128 || // Overlong - s[t] === 244 && s[t + 1] > 143 || s[t] > 244) - return !1; - t += 4; - } else - return !1; - return !0; -} -re.exports = { - isValidStatusCode: Wt, - isValidUTF8: be, - tokenChars: Dt -}; -if (Ne) - Le = re.exports.isValidUTF8 = function(s) { - return s.length < 24 ? be(s) : Ne(s); - }; -else if (!process.env.WS_NO_UTF_8_VALIDATE) - try { - const s = It; - Le = re.exports.isValidUTF8 = function(e) { - return e.length < 32 ? be(e) : s(e); - }; - } catch { - } -var ae = re.exports; -const { Writable: At } = S, Pe = oe, { - BINARY_TYPES: Ft, - EMPTY_BUFFER: Re, - kStatusCode: jt, - kWebSocket: Gt -} = U, { concat: de, toArrayBuffer: Vt, unmask: Ht } = ne, { isValidStatusCode: zt, isValidUTF8: Ue } = ae, X = Buffer[Symbol.species], A = 0, Be = 1, $e = 2, Me = 3, _e = 4, Yt = 5; -let qt = class extends At { - /** - * Creates a Receiver instance. - * - * @param {Object} [options] Options object - * @param {String} [options.binaryType=nodebuffer] The type for binary data - * @param {Object} [options.extensions] An object containing the negotiated - * extensions - * @param {Boolean} [options.isServer=false] Specifies whether to operate in - * client or server mode - * @param {Number} [options.maxPayload=0] The maximum allowed message length - * @param {Boolean} [options.skipUTF8Validation=false] Specifies whether or - * not to skip UTF-8 validation for text and close messages - */ - constructor(e = {}) { - super(), this._binaryType = e.binaryType || Ft[0], this._extensions = e.extensions || {}, this._isServer = !!e.isServer, this._maxPayload = e.maxPayload | 0, this._skipUTF8Validation = !!e.skipUTF8Validation, this[Gt] = void 0, this._bufferedBytes = 0, this._buffers = [], this._compressed = !1, this._payloadLength = 0, this._mask = void 0, this._fragmented = 0, this._masked = !1, this._fin = !1, this._opcode = 0, this._totalPayloadLength = 0, this._messageLength = 0, this._fragments = [], this._state = A, this._loop = !1; - } - /** - * Implements `Writable.prototype._write()`. - * - * @param {Buffer} chunk The chunk of data to write - * @param {String} encoding The character encoding of `chunk` - * @param {Function} cb Callback - * @private - */ - _write(e, t, r) { - if (this._opcode === 8 && this._state == A) - return r(); - this._bufferedBytes += e.length, this._buffers.push(e), this.startLoop(r); - } - /** - * Consumes `n` bytes from the buffered data. - * - * @param {Number} n The number of bytes to consume - * @return {Buffer} The consumed bytes - * @private - */ - consume(e) { - if (this._bufferedBytes -= e, e === this._buffers[0].length) - return this._buffers.shift(); - if (e < this._buffers[0].length) { - const r = this._buffers[0]; - return this._buffers[0] = new X( - r.buffer, - r.byteOffset + e, - r.length - e - ), new X(r.buffer, r.byteOffset, e); - } - const t = Buffer.allocUnsafe(e); - do { - const r = this._buffers[0], i = t.length - e; - e >= r.length ? t.set(this._buffers.shift(), i) : (t.set(new Uint8Array(r.buffer, r.byteOffset, e), i), this._buffers[0] = new X( - r.buffer, - r.byteOffset + e, - r.length - e - )), e -= r.length; - } while (e > 0); - return t; - } - /** - * Starts the parsing loop. - * - * @param {Function} cb Callback - * @private - */ - startLoop(e) { - let t; - this._loop = !0; - do - switch (this._state) { - case A: - t = this.getInfo(); - break; - case Be: - t = this.getPayloadLength16(); - break; - case $e: - t = this.getPayloadLength64(); - break; - case Me: - this.getMask(); - break; - case _e: - t = this.getData(e); - break; - default: - this._loop = !1; - return; - } - while (this._loop); - e(t); - } - /** - * Reads the first two bytes of a frame. - * - * @return {(RangeError|undefined)} A possible error - * @private - */ - getInfo() { - if (this._bufferedBytes < 2) { - this._loop = !1; - return; - } - const e = this.consume(2); - if (e[0] & 48) - return this._loop = !1, g( - RangeError, - "RSV2 and RSV3 must be clear", - !0, - 1002, - "WS_ERR_UNEXPECTED_RSV_2_3" - ); - const t = (e[0] & 64) === 64; - if (t && !this._extensions[Pe.extensionName]) - return this._loop = !1, g( - RangeError, - "RSV1 must be clear", - !0, - 1002, - "WS_ERR_UNEXPECTED_RSV_1" - ); - if (this._fin = (e[0] & 128) === 128, this._opcode = e[0] & 15, this._payloadLength = e[1] & 127, this._opcode === 0) { - if (t) - return this._loop = !1, g( - RangeError, - "RSV1 must be clear", - !0, - 1002, - "WS_ERR_UNEXPECTED_RSV_1" - ); - if (!this._fragmented) - return this._loop = !1, g( - RangeError, - "invalid opcode 0", - !0, - 1002, - "WS_ERR_INVALID_OPCODE" - ); - this._opcode = this._fragmented; - } else if (this._opcode === 1 || this._opcode === 2) { - if (this._fragmented) - return this._loop = !1, g( - RangeError, - `invalid opcode ${this._opcode}`, - !0, - 1002, - "WS_ERR_INVALID_OPCODE" - ); - this._compressed = t; - } else if (this._opcode > 7 && this._opcode < 11) { - if (!this._fin) - return this._loop = !1, g( - RangeError, - "FIN must be set", - !0, - 1002, - "WS_ERR_EXPECTED_FIN" - ); - if (t) - return this._loop = !1, g( - RangeError, - "RSV1 must be clear", - !0, - 1002, - "WS_ERR_UNEXPECTED_RSV_1" - ); - if (this._payloadLength > 125 || this._opcode === 8 && this._payloadLength === 1) - return this._loop = !1, g( - RangeError, - `invalid payload length ${this._payloadLength}`, - !0, - 1002, - "WS_ERR_INVALID_CONTROL_PAYLOAD_LENGTH" - ); - } else - return this._loop = !1, g( - RangeError, - `invalid opcode ${this._opcode}`, - !0, - 1002, - "WS_ERR_INVALID_OPCODE" - ); - if (!this._fin && !this._fragmented && (this._fragmented = this._opcode), this._masked = (e[1] & 128) === 128, this._isServer) { - if (!this._masked) - return this._loop = !1, g( - RangeError, - "MASK must be set", - !0, - 1002, - "WS_ERR_EXPECTED_MASK" - ); - } else if (this._masked) - return this._loop = !1, g( - RangeError, - "MASK must be clear", - !0, - 1002, - "WS_ERR_UNEXPECTED_MASK" - ); - if (this._payloadLength === 126) - this._state = Be; - else if (this._payloadLength === 127) - this._state = $e; - else - return this.haveLength(); - } - /** - * Gets extended payload length (7+16). - * - * @return {(RangeError|undefined)} A possible error - * @private - */ - getPayloadLength16() { - if (this._bufferedBytes < 2) { - this._loop = !1; - return; - } - return this._payloadLength = this.consume(2).readUInt16BE(0), this.haveLength(); - } - /** - * Gets extended payload length (7+64). - * - * @return {(RangeError|undefined)} A possible error - * @private - */ - getPayloadLength64() { - if (this._bufferedBytes < 8) { - this._loop = !1; - return; - } - const e = this.consume(8), t = e.readUInt32BE(0); - return t > Math.pow(2, 53 - 32) - 1 ? (this._loop = !1, g( - RangeError, - "Unsupported WebSocket frame: payload length > 2^53 - 1", - !1, - 1009, - "WS_ERR_UNSUPPORTED_DATA_PAYLOAD_LENGTH" - )) : (this._payloadLength = t * Math.pow(2, 32) + e.readUInt32BE(4), this.haveLength()); - } - /** - * Payload length has been read. - * - * @return {(RangeError|undefined)} A possible error - * @private - */ - haveLength() { - if (this._payloadLength && this._opcode < 8 && (this._totalPayloadLength += this._payloadLength, this._totalPayloadLength > this._maxPayload && this._maxPayload > 0)) - return this._loop = !1, g( - RangeError, - "Max payload size exceeded", - !1, - 1009, - "WS_ERR_UNSUPPORTED_MESSAGE_LENGTH" - ); - this._masked ? this._state = Me : this._state = _e; - } - /** - * Reads mask bytes. - * - * @private - */ - getMask() { - if (this._bufferedBytes < 4) { - this._loop = !1; - return; - } - this._mask = this.consume(4), this._state = _e; - } - /** - * Reads data bytes. - * - * @param {Function} cb Callback - * @return {(Error|RangeError|undefined)} A possible error - * @private - */ - getData(e) { - let t = Re; - if (this._payloadLength) { - if (this._bufferedBytes < this._payloadLength) { - this._loop = !1; - return; - } - t = this.consume(this._payloadLength), this._masked && this._mask[0] | this._mask[1] | this._mask[2] | this._mask[3] && Ht(t, this._mask); - } - if (this._opcode > 7) - return this.controlMessage(t); - if (this._compressed) { - this._state = Yt, this.decompress(t, e); - return; - } - return t.length && (this._messageLength = this._totalPayloadLength, this._fragments.push(t)), this.dataMessage(); - } - /** - * Decompresses data. - * - * @param {Buffer} data Compressed data - * @param {Function} cb Callback - * @private - */ - decompress(e, t) { - this._extensions[Pe.extensionName].decompress(e, this._fin, (i, n) => { - if (i) - return t(i); - if (n.length) { - if (this._messageLength += n.length, this._messageLength > this._maxPayload && this._maxPayload > 0) - return t( - g( - RangeError, - "Max payload size exceeded", - !1, - 1009, - "WS_ERR_UNSUPPORTED_MESSAGE_LENGTH" - ) - ); - this._fragments.push(n); - } - const o = this.dataMessage(); - if (o) - return t(o); - this.startLoop(t); - }); - } - /** - * Handles a data message. - * - * @return {(Error|undefined)} A possible error - * @private - */ - dataMessage() { - if (this._fin) { - const e = this._messageLength, t = this._fragments; - if (this._totalPayloadLength = 0, this._messageLength = 0, this._fragmented = 0, this._fragments = [], this._opcode === 2) { - let r; - this._binaryType === "nodebuffer" ? r = de(t, e) : this._binaryType === "arraybuffer" ? r = Vt(de(t, e)) : r = t, this.emit("message", r, !0); - } else { - const r = de(t, e); - if (!this._skipUTF8Validation && !Ue(r)) - return this._loop = !1, g( - Error, - "invalid UTF-8 sequence", - !0, - 1007, - "WS_ERR_INVALID_UTF8" - ); - this.emit("message", r, !1); - } - } - this._state = A; - } - /** - * Handles a control message. - * - * @param {Buffer} data Data to handle - * @return {(Error|RangeError|undefined)} A possible error - * @private - */ - controlMessage(e) { - if (this._opcode === 8) - if (this._loop = !1, e.length === 0) - this.emit("conclude", 1005, Re), this.end(); - else { - const t = e.readUInt16BE(0); - if (!zt(t)) - return g( - RangeError, - `invalid status code ${t}`, - !0, - 1002, - "WS_ERR_INVALID_CLOSE_CODE" - ); - const r = new X( - e.buffer, - e.byteOffset + 2, - e.length - 2 - ); - if (!this._skipUTF8Validation && !Ue(r)) - return g( - Error, - "invalid UTF-8 sequence", - !0, - 1007, - "WS_ERR_INVALID_UTF8" - ); - this.emit("conclude", t, r), this.end(); - } - else - this._opcode === 9 ? this.emit("ping", e) : this.emit("pong", e); - this._state = A; - } -}; -var rt = qt; -function g(s, e, t, r, i) { - const n = new s( - t ? `Invalid WebSocket frame: ${e}` : e - ); - return Error.captureStackTrace(n, g), n.code = i, n[jt] = r, n; -} -const qs = /* @__PURE__ */ z(rt), { randomFillSync: Kt } = S, Ie = oe, { EMPTY_BUFFER: Xt } = U, { isValidStatusCode: Zt } = ae, { mask: De, toBuffer: M } = ne, x = Symbol("kByteLength"), Qt = Buffer.alloc(4); -let Jt = class P { - /** - * Creates a Sender instance. - * - * @param {(net.Socket|tls.Socket)} socket The connection socket - * @param {Object} [extensions] An object containing the negotiated extensions - * @param {Function} [generateMask] The function used to generate the masking - * key - */ - constructor(e, t, r) { - this._extensions = t || {}, r && (this._generateMask = r, this._maskBuffer = Buffer.alloc(4)), this._socket = e, this._firstFragment = !0, this._compress = !1, this._bufferedBytes = 0, this._deflating = !1, this._queue = []; - } - /** - * Frames a piece of data according to the HyBi WebSocket protocol. - * - * @param {(Buffer|String)} data The data to frame - * @param {Object} options Options object - * @param {Boolean} [options.fin=false] Specifies whether or not to set the - * FIN bit - * @param {Function} [options.generateMask] The function used to generate the - * masking key - * @param {Boolean} [options.mask=false] Specifies whether or not to mask - * `data` - * @param {Buffer} [options.maskBuffer] The buffer used to store the masking - * key - * @param {Number} options.opcode The opcode - * @param {Boolean} [options.readOnly=false] Specifies whether `data` can be - * modified - * @param {Boolean} [options.rsv1=false] Specifies whether or not to set the - * RSV1 bit - * @return {(Buffer|String)[]} The framed data - * @public - */ - static frame(e, t) { - let r, i = !1, n = 2, o = !1; - t.mask && (r = t.maskBuffer || Qt, t.generateMask ? t.generateMask(r) : Kt(r, 0, 4), o = (r[0] | r[1] | r[2] | r[3]) === 0, n = 6); - let l; - typeof e == "string" ? (!t.mask || o) && t[x] !== void 0 ? l = t[x] : (e = Buffer.from(e), l = e.length) : (l = e.length, i = t.mask && t.readOnly && !o); - let f = l; - l >= 65536 ? (n += 8, f = 127) : l > 125 && (n += 2, f = 126); - const a = Buffer.allocUnsafe(i ? l + n : n); - return a[0] = t.fin ? t.opcode | 128 : t.opcode, t.rsv1 && (a[0] |= 64), a[1] = f, f === 126 ? a.writeUInt16BE(l, 2) : f === 127 && (a[2] = a[3] = 0, a.writeUIntBE(l, 4, 6)), t.mask ? (a[1] |= 128, a[n - 4] = r[0], a[n - 3] = r[1], a[n - 2] = r[2], a[n - 1] = r[3], o ? [a, e] : i ? (De(e, r, a, n, l), [a]) : (De(e, r, e, 0, l), [a, e])) : [a, e]; - } - /** - * Sends a close message to the other peer. - * - * @param {Number} [code] The status code component of the body - * @param {(String|Buffer)} [data] The message component of the body - * @param {Boolean} [mask=false] Specifies whether or not to mask the message - * @param {Function} [cb] Callback - * @public - */ - close(e, t, r, i) { - let n; - if (e === void 0) - n = Xt; - else { - if (typeof e != "number" || !Zt(e)) - throw new TypeError("First argument must be a valid error code number"); - if (t === void 0 || !t.length) - n = Buffer.allocUnsafe(2), n.writeUInt16BE(e, 0); - else { - const l = Buffer.byteLength(t); - if (l > 123) - throw new RangeError("The message must not be greater than 123 bytes"); - n = Buffer.allocUnsafe(2 + l), n.writeUInt16BE(e, 0), typeof t == "string" ? n.write(t, 2) : n.set(t, 2); - } - } - const o = { - [x]: n.length, - fin: !0, - generateMask: this._generateMask, - mask: r, - maskBuffer: this._maskBuffer, - opcode: 8, - readOnly: !1, - rsv1: !1 - }; - this._deflating ? this.enqueue([this.dispatch, n, !1, o, i]) : this.sendFrame(P.frame(n, o), i); - } - /** - * Sends a ping message to the other peer. - * - * @param {*} data The message to send - * @param {Boolean} [mask=false] Specifies whether or not to mask `data` - * @param {Function} [cb] Callback - * @public - */ - ping(e, t, r) { - let i, n; - if (typeof e == "string" ? (i = Buffer.byteLength(e), n = !1) : (e = M(e), i = e.length, n = M.readOnly), i > 125) - throw new RangeError("The data size must not be greater than 125 bytes"); - const o = { - [x]: i, - fin: !0, - generateMask: this._generateMask, - mask: t, - maskBuffer: this._maskBuffer, - opcode: 9, - readOnly: n, - rsv1: !1 - }; - this._deflating ? this.enqueue([this.dispatch, e, !1, o, r]) : this.sendFrame(P.frame(e, o), r); - } - /** - * Sends a pong message to the other peer. - * - * @param {*} data The message to send - * @param {Boolean} [mask=false] Specifies whether or not to mask `data` - * @param {Function} [cb] Callback - * @public - */ - pong(e, t, r) { - let i, n; - if (typeof e == "string" ? (i = Buffer.byteLength(e), n = !1) : (e = M(e), i = e.length, n = M.readOnly), i > 125) - throw new RangeError("The data size must not be greater than 125 bytes"); - const o = { - [x]: i, - fin: !0, - generateMask: this._generateMask, - mask: t, - maskBuffer: this._maskBuffer, - opcode: 10, - readOnly: n, - rsv1: !1 - }; - this._deflating ? this.enqueue([this.dispatch, e, !1, o, r]) : this.sendFrame(P.frame(e, o), r); - } - /** - * Sends a data message to the other peer. - * - * @param {*} data The message to send - * @param {Object} options Options object - * @param {Boolean} [options.binary=false] Specifies whether `data` is binary - * or text - * @param {Boolean} [options.compress=false] Specifies whether or not to - * compress `data` - * @param {Boolean} [options.fin=false] Specifies whether the fragment is the - * last one - * @param {Boolean} [options.mask=false] Specifies whether or not to mask - * `data` - * @param {Function} [cb] Callback - * @public - */ - send(e, t, r) { - const i = this._extensions[Ie.extensionName]; - let n = t.binary ? 2 : 1, o = t.compress, l, f; - if (typeof e == "string" ? (l = Buffer.byteLength(e), f = !1) : (e = M(e), l = e.length, f = M.readOnly), this._firstFragment ? (this._firstFragment = !1, o && i && i.params[i._isServer ? "server_no_context_takeover" : "client_no_context_takeover"] && (o = l >= i._threshold), this._compress = o) : (o = !1, n = 0), t.fin && (this._firstFragment = !0), i) { - const a = { - [x]: l, - fin: t.fin, - generateMask: this._generateMask, - mask: t.mask, - maskBuffer: this._maskBuffer, - opcode: n, - readOnly: f, - rsv1: o - }; - this._deflating ? this.enqueue([this.dispatch, e, this._compress, a, r]) : this.dispatch(e, this._compress, a, r); - } else - this.sendFrame( - P.frame(e, { - [x]: l, - fin: t.fin, - generateMask: this._generateMask, - mask: t.mask, - maskBuffer: this._maskBuffer, - opcode: n, - readOnly: f, - rsv1: !1 - }), - r - ); - } - /** - * Dispatches a message. - * - * @param {(Buffer|String)} data The message to send - * @param {Boolean} [compress=false] Specifies whether or not to compress - * `data` - * @param {Object} options Options object - * @param {Boolean} [options.fin=false] Specifies whether or not to set the - * FIN bit - * @param {Function} [options.generateMask] The function used to generate the - * masking key - * @param {Boolean} [options.mask=false] Specifies whether or not to mask - * `data` - * @param {Buffer} [options.maskBuffer] The buffer used to store the masking - * key - * @param {Number} options.opcode The opcode - * @param {Boolean} [options.readOnly=false] Specifies whether `data` can be - * modified - * @param {Boolean} [options.rsv1=false] Specifies whether or not to set the - * RSV1 bit - * @param {Function} [cb] Callback - * @private - */ - dispatch(e, t, r, i) { - if (!t) { - this.sendFrame(P.frame(e, r), i); - return; - } - const n = this._extensions[Ie.extensionName]; - this._bufferedBytes += r[x], this._deflating = !0, n.compress(e, r.fin, (o, l) => { - if (this._socket.destroyed) { - const f = new Error( - "The socket was closed while data was being compressed" - ); - typeof i == "function" && i(f); - for (let a = 0; a < this._queue.length; a++) { - const c = this._queue[a], h = c[c.length - 1]; - typeof h == "function" && h(f); - } - return; - } - this._bufferedBytes -= r[x], this._deflating = !1, r.readOnly = !1, this.sendFrame(P.frame(l, r), i), this.dequeue(); - }); - } - /** - * Executes queued send operations. - * - * @private - */ - dequeue() { - for (; !this._deflating && this._queue.length; ) { - const e = this._queue.shift(); - this._bufferedBytes -= e[3][x], Reflect.apply(e[0], this, e.slice(1)); - } - } - /** - * Enqueues a send operation. - * - * @param {Array} params Send operation parameters. - * @private - */ - enqueue(e) { - this._bufferedBytes += e[3][x], this._queue.push(e); - } - /** - * Sends a frame. - * - * @param {Buffer[]} list The frame to send - * @param {Function} [cb] Callback - * @private - */ - sendFrame(e, t) { - e.length === 2 ? (this._socket.cork(), this._socket.write(e[0]), this._socket.write(e[1], t), this._socket.uncork()) : this._socket.write(e[0], t); - } -}; -var it = Jt; -const Ks = /* @__PURE__ */ z(it), { kForOnEventAttribute: F, kListener: pe } = U, We = Symbol("kCode"), Ae = Symbol("kData"), Fe = Symbol("kError"), je = Symbol("kMessage"), Ge = Symbol("kReason"), I = Symbol("kTarget"), Ve = Symbol("kType"), He = Symbol("kWasClean"); -class B { - /** - * Create a new `Event`. - * - * @param {String} type The name of the event - * @throws {TypeError} If the `type` argument is not specified - */ - constructor(e) { - this[I] = null, this[Ve] = e; - } - /** - * @type {*} - */ - get target() { - return this[I]; - } - /** - * @type {String} - */ - get type() { - return this[Ve]; - } -} -Object.defineProperty(B.prototype, "target", { enumerable: !0 }); -Object.defineProperty(B.prototype, "type", { enumerable: !0 }); -class Y extends B { - /** - * Create a new `CloseEvent`. - * - * @param {String} type The name of the event - * @param {Object} [options] A dictionary object that allows for setting - * attributes via object members of the same name - * @param {Number} [options.code=0] The status code explaining why the - * connection was closed - * @param {String} [options.reason=''] A human-readable string explaining why - * the connection was closed - * @param {Boolean} [options.wasClean=false] Indicates whether or not the - * connection was cleanly closed - */ - constructor(e, t = {}) { - super(e), this[We] = t.code === void 0 ? 0 : t.code, this[Ge] = t.reason === void 0 ? "" : t.reason, this[He] = t.wasClean === void 0 ? !1 : t.wasClean; - } - /** - * @type {Number} - */ - get code() { - return this[We]; - } - /** - * @type {String} - */ - get reason() { - return this[Ge]; - } - /** - * @type {Boolean} - */ - get wasClean() { - return this[He]; - } -} -Object.defineProperty(Y.prototype, "code", { enumerable: !0 }); -Object.defineProperty(Y.prototype, "reason", { enumerable: !0 }); -Object.defineProperty(Y.prototype, "wasClean", { enumerable: !0 }); -class le extends B { - /** - * Create a new `ErrorEvent`. - * - * @param {String} type The name of the event - * @param {Object} [options] A dictionary object that allows for setting - * attributes via object members of the same name - * @param {*} [options.error=null] The error that generated this event - * @param {String} [options.message=''] The error message - */ - constructor(e, t = {}) { - super(e), this[Fe] = t.error === void 0 ? null : t.error, this[je] = t.message === void 0 ? "" : t.message; - } - /** - * @type {*} - */ - get error() { - return this[Fe]; - } - /** - * @type {String} - */ - get message() { - return this[je]; - } -} -Object.defineProperty(le.prototype, "error", { enumerable: !0 }); -Object.defineProperty(le.prototype, "message", { enumerable: !0 }); -class xe extends B { - /** - * Create a new `MessageEvent`. - * - * @param {String} type The name of the event - * @param {Object} [options] A dictionary object that allows for setting - * attributes via object members of the same name - * @param {*} [options.data=null] The message content - */ - constructor(e, t = {}) { - super(e), this[Ae] = t.data === void 0 ? null : t.data; - } - /** - * @type {*} - */ - get data() { - return this[Ae]; - } -} -Object.defineProperty(xe.prototype, "data", { enumerable: !0 }); -const es = { - /** - * Register an event listener. - * - * @param {String} type A string representing the event type to listen for - * @param {(Function|Object)} handler The listener to add - * @param {Object} [options] An options object specifies characteristics about - * the event listener - * @param {Boolean} [options.once=false] A `Boolean` indicating that the - * listener should be invoked at most once after being added. If `true`, - * the listener would be automatically removed when invoked. - * @public - */ - addEventListener(s, e, t = {}) { - for (const i of this.listeners(s)) - if (!t[F] && i[pe] === e && !i[F]) - return; - let r; - if (s === "message") - r = function(n, o) { - const l = new xe("message", { - data: o ? n : n.toString() - }); - l[I] = this, Z(e, this, l); - }; - else if (s === "close") - r = function(n, o) { - const l = new Y("close", { - code: n, - reason: o.toString(), - wasClean: this._closeFrameReceived && this._closeFrameSent - }); - l[I] = this, Z(e, this, l); - }; - else if (s === "error") - r = function(n) { - const o = new le("error", { - error: n, - message: n.message - }); - o[I] = this, Z(e, this, o); - }; - else if (s === "open") - r = function() { - const n = new B("open"); - n[I] = this, Z(e, this, n); - }; - else - return; - r[F] = !!t[F], r[pe] = e, t.once ? this.once(s, r) : this.on(s, r); - }, - /** - * Remove an event listener. - * - * @param {String} type A string representing the event type to remove - * @param {(Function|Object)} handler The listener to remove - * @public - */ - removeEventListener(s, e) { - for (const t of this.listeners(s)) - if (t[pe] === e && !t[F]) { - this.removeListener(s, t); - break; - } - } -}; -var ts = { - CloseEvent: Y, - ErrorEvent: le, - Event: B, - EventTarget: es, - MessageEvent: xe -}; -function Z(s, e, t) { - typeof s == "object" && s.handleEvent ? s.handleEvent.call(s, t) : s.call(e, t); -} -const { tokenChars: j } = ae; -function k(s, e, t) { - s[e] === void 0 ? s[e] = [t] : s[e].push(t); -} -function ss(s) { - const e = /* @__PURE__ */ Object.create(null); - let t = /* @__PURE__ */ Object.create(null), r = !1, i = !1, n = !1, o, l, f = -1, a = -1, c = -1, h = 0; - for (; h < s.length; h++) - if (a = s.charCodeAt(h), o === void 0) - if (c === -1 && j[a] === 1) - f === -1 && (f = h); - else if (h !== 0 && (a === 32 || a === 9)) - c === -1 && f !== -1 && (c = h); - else if (a === 59 || a === 44) { - if (f === -1) - throw new SyntaxError(`Unexpected character at index ${h}`); - c === -1 && (c = h); - const v = s.slice(f, c); - a === 44 ? (k(e, v, t), t = /* @__PURE__ */ Object.create(null)) : o = v, f = c = -1; - } else - throw new SyntaxError(`Unexpected character at index ${h}`); - else if (l === void 0) - if (c === -1 && j[a] === 1) - f === -1 && (f = h); - else if (a === 32 || a === 9) - c === -1 && f !== -1 && (c = h); - else if (a === 59 || a === 44) { - if (f === -1) - throw new SyntaxError(`Unexpected character at index ${h}`); - c === -1 && (c = h), k(t, s.slice(f, c), !0), a === 44 && (k(e, o, t), t = /* @__PURE__ */ Object.create(null), o = void 0), f = c = -1; - } else if (a === 61 && f !== -1 && c === -1) - l = s.slice(f, h), f = c = -1; - else - throw new SyntaxError(`Unexpected character at index ${h}`); - else if (i) { - if (j[a] !== 1) - throw new SyntaxError(`Unexpected character at index ${h}`); - f === -1 ? f = h : r || (r = !0), i = !1; - } else if (n) - if (j[a] === 1) - f === -1 && (f = h); - else if (a === 34 && f !== -1) - n = !1, c = h; - else if (a === 92) - i = !0; - else - throw new SyntaxError(`Unexpected character at index ${h}`); - else if (a === 34 && s.charCodeAt(h - 1) === 61) - n = !0; - else if (c === -1 && j[a] === 1) - f === -1 && (f = h); - else if (f !== -1 && (a === 32 || a === 9)) - c === -1 && (c = h); - else if (a === 59 || a === 44) { - if (f === -1) - throw new SyntaxError(`Unexpected character at index ${h}`); - c === -1 && (c = h); - let v = s.slice(f, c); - r && (v = v.replace(/\\/g, ""), r = !1), k(t, l, v), a === 44 && (k(e, o, t), t = /* @__PURE__ */ Object.create(null), o = void 0), l = void 0, f = c = -1; - } else - throw new SyntaxError(`Unexpected character at index ${h}`); - if (f === -1 || n || a === 32 || a === 9) - throw new SyntaxError("Unexpected end of input"); - c === -1 && (c = h); - const p = s.slice(f, c); - return o === void 0 ? k(e, p, t) : (l === void 0 ? k(t, p, !0) : r ? k(t, l, p.replace(/\\/g, "")) : k(t, l, p), k(e, o, t)), e; -} -function rs(s) { - return Object.keys(s).map((e) => { - let t = s[e]; - return Array.isArray(t) || (t = [t]), t.map((r) => [e].concat( - Object.keys(r).map((i) => { - let n = r[i]; - return Array.isArray(n) || (n = [n]), n.map((o) => o === !0 ? i : `${i}=${o}`).join("; "); - }) - ).join("; ")).join(", "); - }).join(", "); -} -var nt = { format: rs, parse: ss }; -const is = S, ns = S, os = S, ot = S, as = S, { randomBytes: ls, createHash: fs } = S, { URL: me } = S, T = oe, hs = rt, cs = it, { - BINARY_TYPES: ze, - EMPTY_BUFFER: Q, - GUID: us, - kForOnEventAttribute: ge, - kListener: ds, - kStatusCode: _s, - kWebSocket: y, - NOOP: at -} = U, { - EventTarget: { addEventListener: ps, removeEventListener: ms } -} = ts, { format: gs, parse: ys } = nt, { toBuffer: vs } = ne, Ss = 30 * 1e3, lt = Symbol("kAborted"), ye = [8, 13], O = ["CONNECTING", "OPEN", "CLOSING", "CLOSED"], Es = /^[!#$%&'*+\-.0-9A-Z^_`|a-z~]+$/; -let m = class d extends is { - /** - * Create a new `WebSocket`. - * - * @param {(String|URL)} address The URL to which to connect - * @param {(String|String[])} [protocols] The subprotocols - * @param {Object} [options] Connection options - */ - constructor(e, t, r) { - super(), this._binaryType = ze[0], this._closeCode = 1006, this._closeFrameReceived = !1, this._closeFrameSent = !1, this._closeMessage = Q, this._closeTimer = null, this._extensions = {}, this._paused = !1, this._protocol = "", this._readyState = d.CONNECTING, this._receiver = null, this._sender = null, this._socket = null, e !== null ? (this._bufferedAmount = 0, this._isServer = !1, this._redirects = 0, t === void 0 ? t = [] : Array.isArray(t) || (typeof t == "object" && t !== null ? (r = t, t = []) : t = [t]), ht(this, e, t, r)) : this._isServer = !0; - } - /** - * This deviates from the WHATWG interface since ws doesn't support the - * required default "blob" type (instead we define a custom "nodebuffer" - * type). - * - * @type {String} - */ - get binaryType() { - return this._binaryType; - } - set binaryType(e) { - ze.includes(e) && (this._binaryType = e, this._receiver && (this._receiver._binaryType = e)); - } - /** - * @type {Number} - */ - get bufferedAmount() { - return this._socket ? this._socket._writableState.length + this._sender._bufferedBytes : this._bufferedAmount; - } - /** - * @type {String} - */ - get extensions() { - return Object.keys(this._extensions).join(); - } - /** - * @type {Boolean} - */ - get isPaused() { - return this._paused; - } - /** - * @type {Function} - */ - /* istanbul ignore next */ - get onclose() { - return null; - } - /** - * @type {Function} - */ - /* istanbul ignore next */ - get onerror() { - return null; - } - /** - * @type {Function} - */ - /* istanbul ignore next */ - get onopen() { - return null; - } - /** - * @type {Function} - */ - /* istanbul ignore next */ - get onmessage() { - return null; - } - /** - * @type {String} - */ - get protocol() { - return this._protocol; - } - /** - * @type {Number} - */ - get readyState() { - return this._readyState; - } - /** - * @type {String} - */ - get url() { - return this._url; - } - /** - * Set up the socket and the internal resources. - * - * @param {(net.Socket|tls.Socket)} socket The network socket between the - * server and client - * @param {Buffer} head The first packet of the upgraded stream - * @param {Object} options Options object - * @param {Function} [options.generateMask] The function used to generate the - * masking key - * @param {Number} [options.maxPayload=0] The maximum allowed message size - * @param {Boolean} [options.skipUTF8Validation=false] Specifies whether or - * not to skip UTF-8 validation for text and close messages - * @private - */ - setSocket(e, t, r) { - const i = new hs({ - binaryType: this.binaryType, - extensions: this._extensions, - isServer: this._isServer, - maxPayload: r.maxPayload, - skipUTF8Validation: r.skipUTF8Validation - }); - this._sender = new cs(e, this._extensions, r.generateMask), this._receiver = i, this._socket = e, i[y] = this, e[y] = this, i.on("conclude", ks), i.on("drain", ws), i.on("error", Os), i.on("message", Cs), i.on("ping", Ts), i.on("pong", Ls), e.setTimeout(0), e.setNoDelay(), t.length > 0 && e.unshift(t), e.on("close", ut), e.on("data", fe), e.on("end", dt), e.on("error", _t), this._readyState = d.OPEN, this.emit("open"); - } - /** - * Emit the `'close'` event. - * - * @private - */ - emitClose() { - if (!this._socket) { - this._readyState = d.CLOSED, this.emit("close", this._closeCode, this._closeMessage); - return; - } - this._extensions[T.extensionName] && this._extensions[T.extensionName].cleanup(), this._receiver.removeAllListeners(), this._readyState = d.CLOSED, this.emit("close", this._closeCode, this._closeMessage); - } - /** - * Start a closing handshake. - * - * +----------+ +-----------+ +----------+ - * - - -|ws.close()|-->|close frame|-->|ws.close()|- - - - * | +----------+ +-----------+ +----------+ | - * +----------+ +-----------+ | - * CLOSING |ws.close()|<--|close frame|<--+-----+ CLOSING - * +----------+ +-----------+ | - * | | | +---+ | - * +------------------------+-->|fin| - - - - - * | +---+ | +---+ - * - - - - -|fin|<---------------------+ - * +---+ - * - * @param {Number} [code] Status code explaining why the connection is closing - * @param {(String|Buffer)} [data] The reason why the connection is - * closing - * @public - */ - close(e, t) { - if (this.readyState !== d.CLOSED) { - if (this.readyState === d.CONNECTING) { - const r = "WebSocket was closed before the connection was established"; - b(this, this._req, r); - return; - } - if (this.readyState === d.CLOSING) { - this._closeFrameSent && (this._closeFrameReceived || this._receiver._writableState.errorEmitted) && this._socket.end(); - return; - } - this._readyState = d.CLOSING, this._sender.close(e, t, !this._isServer, (r) => { - r || (this._closeFrameSent = !0, (this._closeFrameReceived || this._receiver._writableState.errorEmitted) && this._socket.end()); - }), this._closeTimer = setTimeout( - this._socket.destroy.bind(this._socket), - Ss - ); - } - } - /** - * Pause the socket. - * - * @public - */ - pause() { - this.readyState === d.CONNECTING || this.readyState === d.CLOSED || (this._paused = !0, this._socket.pause()); - } - /** - * Send a ping. - * - * @param {*} [data] The data to send - * @param {Boolean} [mask] Indicates whether or not to mask `data` - * @param {Function} [cb] Callback which is executed when the ping is sent - * @public - */ - ping(e, t, r) { - if (this.readyState === d.CONNECTING) - throw new Error("WebSocket is not open: readyState 0 (CONNECTING)"); - if (typeof e == "function" ? (r = e, e = t = void 0) : typeof t == "function" && (r = t, t = void 0), typeof e == "number" && (e = e.toString()), this.readyState !== d.OPEN) { - ve(this, e, r); - return; - } - t === void 0 && (t = !this._isServer), this._sender.ping(e || Q, t, r); - } - /** - * Send a pong. - * - * @param {*} [data] The data to send - * @param {Boolean} [mask] Indicates whether or not to mask `data` - * @param {Function} [cb] Callback which is executed when the pong is sent - * @public - */ - pong(e, t, r) { - if (this.readyState === d.CONNECTING) - throw new Error("WebSocket is not open: readyState 0 (CONNECTING)"); - if (typeof e == "function" ? (r = e, e = t = void 0) : typeof t == "function" && (r = t, t = void 0), typeof e == "number" && (e = e.toString()), this.readyState !== d.OPEN) { - ve(this, e, r); - return; - } - t === void 0 && (t = !this._isServer), this._sender.pong(e || Q, t, r); - } - /** - * Resume the socket. - * - * @public - */ - resume() { - this.readyState === d.CONNECTING || this.readyState === d.CLOSED || (this._paused = !1, this._receiver._writableState.needDrain || this._socket.resume()); - } - /** - * Send a data message. - * - * @param {*} data The message to send - * @param {Object} [options] Options object - * @param {Boolean} [options.binary] Specifies whether `data` is binary or - * text - * @param {Boolean} [options.compress] Specifies whether or not to compress - * `data` - * @param {Boolean} [options.fin=true] Specifies whether the fragment is the - * last one - * @param {Boolean} [options.mask] Specifies whether or not to mask `data` - * @param {Function} [cb] Callback which is executed when data is written out - * @public - */ - send(e, t, r) { - if (this.readyState === d.CONNECTING) - throw new Error("WebSocket is not open: readyState 0 (CONNECTING)"); - if (typeof t == "function" && (r = t, t = {}), typeof e == "number" && (e = e.toString()), this.readyState !== d.OPEN) { - ve(this, e, r); - return; - } - const i = { - binary: typeof e != "string", - mask: !this._isServer, - compress: !0, - fin: !0, - ...t - }; - this._extensions[T.extensionName] || (i.compress = !1), this._sender.send(e || Q, i, r); - } - /** - * Forcibly close the connection. - * - * @public - */ - terminate() { - if (this.readyState !== d.CLOSED) { - if (this.readyState === d.CONNECTING) { - const e = "WebSocket was closed before the connection was established"; - b(this, this._req, e); - return; - } - this._socket && (this._readyState = d.CLOSING, this._socket.destroy()); - } - } -}; -Object.defineProperty(m, "CONNECTING", { - enumerable: !0, - value: O.indexOf("CONNECTING") -}); -Object.defineProperty(m.prototype, "CONNECTING", { - enumerable: !0, - value: O.indexOf("CONNECTING") -}); -Object.defineProperty(m, "OPEN", { - enumerable: !0, - value: O.indexOf("OPEN") -}); -Object.defineProperty(m.prototype, "OPEN", { - enumerable: !0, - value: O.indexOf("OPEN") -}); -Object.defineProperty(m, "CLOSING", { - enumerable: !0, - value: O.indexOf("CLOSING") -}); -Object.defineProperty(m.prototype, "CLOSING", { - enumerable: !0, - value: O.indexOf("CLOSING") -}); -Object.defineProperty(m, "CLOSED", { - enumerable: !0, - value: O.indexOf("CLOSED") -}); -Object.defineProperty(m.prototype, "CLOSED", { - enumerable: !0, - value: O.indexOf("CLOSED") -}); -[ - "binaryType", - "bufferedAmount", - "extensions", - "isPaused", - "protocol", - "readyState", - "url" -].forEach((s) => { - Object.defineProperty(m.prototype, s, { enumerable: !0 }); -}); -["open", "error", "close", "message"].forEach((s) => { - Object.defineProperty(m.prototype, `on${s}`, { - enumerable: !0, - get() { - for (const e of this.listeners(s)) - if (e[ge]) - return e[ds]; - return null; - }, - set(e) { - for (const t of this.listeners(s)) - if (t[ge]) { - this.removeListener(s, t); - break; - } - typeof e == "function" && this.addEventListener(s, e, { - [ge]: !0 - }); - } - }); -}); -m.prototype.addEventListener = ps; -m.prototype.removeEventListener = ms; -var ft = m; -function ht(s, e, t, r) { - const i = { - protocolVersion: ye[1], - maxPayload: 104857600, - skipUTF8Validation: !1, - perMessageDeflate: !0, - followRedirects: !1, - maxRedirects: 10, - ...r, - createConnection: void 0, - socketPath: void 0, - hostname: void 0, - protocol: void 0, - timeout: void 0, - method: "GET", - host: void 0, - path: void 0, - port: void 0 - }; - if (!ye.includes(i.protocolVersion)) - throw new RangeError( - `Unsupported protocol version: ${i.protocolVersion} (supported versions: ${ye.join(", ")})` - ); - let n; - if (e instanceof me) - n = e, s._url = e.href; - else { - try { - n = new me(e); - } catch { - throw new SyntaxError(`Invalid URL: ${e}`); - } - s._url = e; - } - const o = n.protocol === "wss:", l = n.protocol === "ws+unix:"; - let f; - if (n.protocol !== "ws:" && !o && !l ? f = `The URL's protocol must be one of "ws:", "wss:", or "ws+unix:"` : l && !n.pathname ? f = "The URL's pathname is empty" : n.hash && (f = "The URL contains a fragment identifier"), f) { - const u = new SyntaxError(f); - if (s._redirects === 0) - throw u; - ee(s, u); - return; - } - const a = o ? 443 : 80, c = ls(16).toString("base64"), h = o ? ns.request : os.request, p = /* @__PURE__ */ new Set(); - let v; - if (i.createConnection = o ? xs : bs, i.defaultPort = i.defaultPort || a, i.port = n.port || a, i.host = n.hostname.startsWith("[") ? n.hostname.slice(1, -1) : n.hostname, i.headers = { - ...i.headers, - "Sec-WebSocket-Version": i.protocolVersion, - "Sec-WebSocket-Key": c, - Connection: "Upgrade", - Upgrade: "websocket" - }, i.path = n.pathname + n.search, i.timeout = i.handshakeTimeout, i.perMessageDeflate && (v = new T( - i.perMessageDeflate !== !0 ? i.perMessageDeflate : {}, - !1, - i.maxPayload - ), i.headers["Sec-WebSocket-Extensions"] = gs({ - [T.extensionName]: v.offer() - })), t.length) { - for (const u of t) { - if (typeof u != "string" || !Es.test(u) || p.has(u)) - throw new SyntaxError( - "An invalid or duplicated subprotocol was specified" - ); - p.add(u); - } - i.headers["Sec-WebSocket-Protocol"] = t.join(","); - } - if (i.origin && (i.protocolVersion < 13 ? i.headers["Sec-WebSocket-Origin"] = i.origin : i.headers.Origin = i.origin), (n.username || n.password) && (i.auth = `${n.username}:${n.password}`), l) { - const u = i.path.split(":"); - i.socketPath = u[0], i.path = u[1]; - } - let _; - if (i.followRedirects) { - if (s._redirects === 0) { - s._originalIpc = l, s._originalSecure = o, s._originalHostOrSocketPath = l ? i.socketPath : n.host; - const u = r && r.headers; - if (r = { ...r, headers: {} }, u) - for (const [E, $] of Object.entries(u)) - r.headers[E.toLowerCase()] = $; - } else if (s.listenerCount("redirect") === 0) { - const u = l ? s._originalIpc ? i.socketPath === s._originalHostOrSocketPath : !1 : s._originalIpc ? !1 : n.host === s._originalHostOrSocketPath; - (!u || s._originalSecure && !o) && (delete i.headers.authorization, delete i.headers.cookie, u || delete i.headers.host, i.auth = void 0); - } - i.auth && !r.headers.authorization && (r.headers.authorization = "Basic " + Buffer.from(i.auth).toString("base64")), _ = s._req = h(i), s._redirects && s.emit("redirect", s.url, _); - } else - _ = s._req = h(i); - i.timeout && _.on("timeout", () => { - b(s, _, "Opening handshake has timed out"); - }), _.on("error", (u) => { - _ === null || _[lt] || (_ = s._req = null, ee(s, u)); - }), _.on("response", (u) => { - const E = u.headers.location, $ = u.statusCode; - if (E && i.followRedirects && $ >= 300 && $ < 400) { - if (++s._redirects > i.maxRedirects) { - b(s, _, "Maximum redirects exceeded"); - return; - } - _.abort(); - let q; - try { - q = new me(E, e); - } catch { - const L = new SyntaxError(`Invalid URL: ${E}`); - ee(s, L); - return; - } - ht(s, q, t, r); - } else - s.emit("unexpected-response", _, u) || b( - s, - _, - `Unexpected server response: ${u.statusCode}` - ); - }), _.on("upgrade", (u, E, $) => { - if (s.emit("upgrade", u), s.readyState !== m.CONNECTING) - return; - if (_ = s._req = null, u.headers.upgrade.toLowerCase() !== "websocket") { - b(s, E, "Invalid Upgrade header"); - return; - } - const q = fs("sha1").update(c + us).digest("base64"); - if (u.headers["sec-websocket-accept"] !== q) { - b(s, E, "Invalid Sec-WebSocket-Accept header"); - return; - } - const D = u.headers["sec-websocket-protocol"]; - let L; - if (D !== void 0 ? p.size ? p.has(D) || (L = "Server sent an invalid subprotocol") : L = "Server sent a subprotocol but none was requested" : p.size && (L = "Server sent no subprotocol"), L) { - b(s, E, L); - return; - } - D && (s._protocol = D); - const ke = u.headers["sec-websocket-extensions"]; - if (ke !== void 0) { - if (!v) { - b(s, E, "Server sent a Sec-WebSocket-Extensions header but no extension was requested"); - return; - } - let he; - try { - he = ys(ke); - } catch { - b(s, E, "Invalid Sec-WebSocket-Extensions header"); - return; - } - const we = Object.keys(he); - if (we.length !== 1 || we[0] !== T.extensionName) { - b(s, E, "Server indicated an extension that was not requested"); - return; - } - try { - v.accept(he[T.extensionName]); - } catch { - b(s, E, "Invalid Sec-WebSocket-Extensions header"); - return; - } - s._extensions[T.extensionName] = v; - } - s.setSocket(E, $, { - generateMask: i.generateMask, - maxPayload: i.maxPayload, - skipUTF8Validation: i.skipUTF8Validation - }); - }), i.finishRequest ? i.finishRequest(_, s) : _.end(); -} -function ee(s, e) { - s._readyState = m.CLOSING, s.emit("error", e), s.emitClose(); -} -function bs(s) { - return s.path = s.socketPath, ot.connect(s); -} -function xs(s) { - return s.path = void 0, !s.servername && s.servername !== "" && (s.servername = ot.isIP(s.host) ? "" : s.host), as.connect(s); -} -function b(s, e, t) { - s._readyState = m.CLOSING; - const r = new Error(t); - Error.captureStackTrace(r, b), e.setHeader ? (e[lt] = !0, e.abort(), e.socket && !e.socket.destroyed && e.socket.destroy(), process.nextTick(ee, s, r)) : (e.destroy(r), e.once("error", s.emit.bind(s, "error")), e.once("close", s.emitClose.bind(s))); -} -function ve(s, e, t) { - if (e) { - const r = vs(e).length; - s._socket ? s._sender._bufferedBytes += r : s._bufferedAmount += r; - } - if (t) { - const r = new Error( - `WebSocket is not open: readyState ${s.readyState} (${O[s.readyState]})` - ); - process.nextTick(t, r); - } -} -function ks(s, e) { - const t = this[y]; - t._closeFrameReceived = !0, t._closeMessage = e, t._closeCode = s, t._socket[y] !== void 0 && (t._socket.removeListener("data", fe), process.nextTick(ct, t._socket), s === 1005 ? t.close() : t.close(s, e)); -} -function ws() { - const s = this[y]; - s.isPaused || s._socket.resume(); -} -function Os(s) { - const e = this[y]; - e._socket[y] !== void 0 && (e._socket.removeListener("data", fe), process.nextTick(ct, e._socket), e.close(s[_s])), e.emit("error", s); -} -function Ye() { - this[y].emitClose(); -} -function Cs(s, e) { - this[y].emit("message", s, e); -} -function Ts(s) { - const e = this[y]; - e.pong(s, !e._isServer, at), e.emit("ping", s); -} -function Ls(s) { - this[y].emit("pong", s); -} -function ct(s) { - s.resume(); -} -function ut() { - const s = this[y]; - this.removeListener("close", ut), this.removeListener("data", fe), this.removeListener("end", dt), s._readyState = m.CLOSING; - let e; - !this._readableState.endEmitted && !s._closeFrameReceived && !s._receiver._writableState.errorEmitted && (e = s._socket.read()) !== null && s._receiver.write(e), s._receiver.end(), this[y] = void 0, clearTimeout(s._closeTimer), s._receiver._writableState.finished || s._receiver._writableState.errorEmitted ? s.emitClose() : (s._receiver.on("error", Ye), s._receiver.on("finish", Ye)); -} -function fe(s) { - this[y]._receiver.write(s) || this.pause(); -} -function dt() { - const s = this[y]; - s._readyState = m.CLOSING, s._receiver.end(), this.end(); -} -function _t() { - const s = this[y]; - this.removeListener("error", _t), this.on("error", at), s && (s._readyState = m.CLOSING, this.destroy()); -} -const Xs = /* @__PURE__ */ z(ft), { tokenChars: Ns } = ae; -function Ps(s) { - const e = /* @__PURE__ */ new Set(); - let t = -1, r = -1, i = 0; - for (i; i < s.length; i++) { - const o = s.charCodeAt(i); - if (r === -1 && Ns[o] === 1) - t === -1 && (t = i); - else if (i !== 0 && (o === 32 || o === 9)) - r === -1 && t !== -1 && (r = i); - else if (o === 44) { - if (t === -1) - throw new SyntaxError(`Unexpected character at index ${i}`); - r === -1 && (r = i); - const l = s.slice(t, r); - if (e.has(l)) - throw new SyntaxError(`The "${l}" subprotocol is duplicated`); - e.add(l), t = r = -1; - } else - throw new SyntaxError(`Unexpected character at index ${i}`); - } - if (t === -1 || r !== -1) - throw new SyntaxError("Unexpected end of input"); - const n = s.slice(t, i); - if (e.has(n)) - throw new SyntaxError(`The "${n}" subprotocol is duplicated`); - return e.add(n), e; -} -var Rs = { parse: Ps }; -const Us = S, ie = S, { createHash: Bs } = S, qe = nt, N = oe, $s = Rs, Ms = ft, { GUID: Is, kWebSocket: Ds } = U, Ws = /^[+/0-9A-Za-z]{22}==$/, Ke = 0, Xe = 1, pt = 2; -class As extends Us { - /** - * Create a `WebSocketServer` instance. - * - * @param {Object} options Configuration options - * @param {Number} [options.backlog=511] The maximum length of the queue of - * pending connections - * @param {Boolean} [options.clientTracking=true] Specifies whether or not to - * track clients - * @param {Function} [options.handleProtocols] A hook to handle protocols - * @param {String} [options.host] The hostname where to bind the server - * @param {Number} [options.maxPayload=104857600] The maximum allowed message - * size - * @param {Boolean} [options.noServer=false] Enable no server mode - * @param {String} [options.path] Accept only connections matching this path - * @param {(Boolean|Object)} [options.perMessageDeflate=false] Enable/disable - * permessage-deflate - * @param {Number} [options.port] The port where to bind the server - * @param {(http.Server|https.Server)} [options.server] A pre-created HTTP/S - * server to use - * @param {Boolean} [options.skipUTF8Validation=false] Specifies whether or - * not to skip UTF-8 validation for text and close messages - * @param {Function} [options.verifyClient] A hook to reject connections - * @param {Function} [options.WebSocket=WebSocket] Specifies the `WebSocket` - * class to use. It must be the `WebSocket` class or class that extends it - * @param {Function} [callback] A listener for the `listening` event - */ - constructor(e, t) { - if (super(), e = { - maxPayload: 100 * 1024 * 1024, - skipUTF8Validation: !1, - perMessageDeflate: !1, - handleProtocols: null, - clientTracking: !0, - verifyClient: null, - noServer: !1, - backlog: null, - // use default (511 as implemented in net.js) - server: null, - host: null, - path: null, - port: null, - WebSocket: Ms, - ...e - }, e.port == null && !e.server && !e.noServer || e.port != null && (e.server || e.noServer) || e.server && e.noServer) - throw new TypeError( - 'One and only one of the "port", "server", or "noServer" options must be specified' - ); - if (e.port != null ? (this._server = ie.createServer((r, i) => { - const n = ie.STATUS_CODES[426]; - i.writeHead(426, { - "Content-Length": n.length, - "Content-Type": "text/plain" - }), i.end(n); - }), this._server.listen( - e.port, - e.host, - e.backlog, - t - )) : e.server && (this._server = e.server), this._server) { - const r = this.emit.bind(this, "connection"); - this._removeListeners = js(this._server, { - listening: this.emit.bind(this, "listening"), - error: this.emit.bind(this, "error"), - upgrade: (i, n, o) => { - this.handleUpgrade(i, n, o, r); - } - }); - } - e.perMessageDeflate === !0 && (e.perMessageDeflate = {}), e.clientTracking && (this.clients = /* @__PURE__ */ new Set(), this._shouldEmitClose = !1), this.options = e, this._state = Ke; - } - /** - * Returns the bound address, the address family name, and port of the server - * as reported by the operating system if listening on an IP socket. - * If the server is listening on a pipe or UNIX domain socket, the name is - * returned as a string. - * - * @return {(Object|String|null)} The address of the server - * @public - */ - address() { - if (this.options.noServer) - throw new Error('The server is operating in "noServer" mode'); - return this._server ? this._server.address() : null; - } - /** - * Stop the server from accepting new connections and emit the `'close'` event - * when all existing connections are closed. - * - * @param {Function} [cb] A one-time listener for the `'close'` event - * @public - */ - close(e) { - if (this._state === pt) { - e && this.once("close", () => { - e(new Error("The server is not running")); - }), process.nextTick(G, this); - return; - } - if (e && this.once("close", e), this._state !== Xe) - if (this._state = Xe, this.options.noServer || this.options.server) - this._server && (this._removeListeners(), this._removeListeners = this._server = null), this.clients ? this.clients.size ? this._shouldEmitClose = !0 : process.nextTick(G, this) : process.nextTick(G, this); - else { - const t = this._server; - this._removeListeners(), this._removeListeners = this._server = null, t.close(() => { - G(this); - }); - } - } - /** - * See if a given request should be handled by this server instance. - * - * @param {http.IncomingMessage} req Request object to inspect - * @return {Boolean} `true` if the request is valid, else `false` - * @public - */ - shouldHandle(e) { - if (this.options.path) { - const t = e.url.indexOf("?"); - if ((t !== -1 ? e.url.slice(0, t) : e.url) !== this.options.path) - return !1; - } - return !0; - } - /** - * Handle a HTTP Upgrade request. - * - * @param {http.IncomingMessage} req The request object - * @param {(net.Socket|tls.Socket)} socket The network socket between the - * server and client - * @param {Buffer} head The first packet of the upgraded stream - * @param {Function} cb Callback - * @public - */ - handleUpgrade(e, t, r, i) { - t.on("error", Ze); - const n = e.headers["sec-websocket-key"], o = +e.headers["sec-websocket-version"]; - if (e.method !== "GET") { - R(this, e, t, 405, "Invalid HTTP method"); - return; - } - if (e.headers.upgrade.toLowerCase() !== "websocket") { - R(this, e, t, 400, "Invalid Upgrade header"); - return; - } - if (!n || !Ws.test(n)) { - R(this, e, t, 400, "Missing or invalid Sec-WebSocket-Key header"); - return; - } - if (o !== 8 && o !== 13) { - R(this, e, t, 400, "Missing or invalid Sec-WebSocket-Version header"); - return; - } - if (!this.shouldHandle(e)) { - H(t, 400); - return; - } - const l = e.headers["sec-websocket-protocol"]; - let f = /* @__PURE__ */ new Set(); - if (l !== void 0) - try { - f = $s.parse(l); - } catch { - R(this, e, t, 400, "Invalid Sec-WebSocket-Protocol header"); - return; - } - const a = e.headers["sec-websocket-extensions"], c = {}; - if (this.options.perMessageDeflate && a !== void 0) { - const h = new N( - this.options.perMessageDeflate, - !0, - this.options.maxPayload - ); - try { - const p = qe.parse(a); - p[N.extensionName] && (h.accept(p[N.extensionName]), c[N.extensionName] = h); - } catch { - R(this, e, t, 400, "Invalid or unacceptable Sec-WebSocket-Extensions header"); - return; - } - } - if (this.options.verifyClient) { - const h = { - origin: e.headers[`${o === 8 ? "sec-websocket-origin" : "origin"}`], - secure: !!(e.socket.authorized || e.socket.encrypted), - req: e - }; - if (this.options.verifyClient.length === 2) { - this.options.verifyClient(h, (p, v, _, u) => { - if (!p) - return H(t, v || 401, _, u); - this.completeUpgrade( - c, - n, - f, - e, - t, - r, - i - ); - }); - return; - } - if (!this.options.verifyClient(h)) - return H(t, 401); - } - this.completeUpgrade(c, n, f, e, t, r, i); - } - /** - * Upgrade the connection to WebSocket. - * - * @param {Object} extensions The accepted extensions - * @param {String} key The value of the `Sec-WebSocket-Key` header - * @param {Set} protocols The subprotocols - * @param {http.IncomingMessage} req The request object - * @param {(net.Socket|tls.Socket)} socket The network socket between the - * server and client - * @param {Buffer} head The first packet of the upgraded stream - * @param {Function} cb Callback - * @throws {Error} If called more than once with the same socket - * @private - */ - completeUpgrade(e, t, r, i, n, o, l) { - if (!n.readable || !n.writable) - return n.destroy(); - if (n[Ds]) - throw new Error( - "server.handleUpgrade() was called more than once with the same socket, possibly due to a misconfiguration" - ); - if (this._state > Ke) - return H(n, 503); - const a = [ - "HTTP/1.1 101 Switching Protocols", - "Upgrade: websocket", - "Connection: Upgrade", - `Sec-WebSocket-Accept: ${Bs("sha1").update(t + Is).digest("base64")}` - ], c = new this.options.WebSocket(null); - if (r.size) { - const h = this.options.handleProtocols ? this.options.handleProtocols(r, i) : r.values().next().value; - h && (a.push(`Sec-WebSocket-Protocol: ${h}`), c._protocol = h); - } - if (e[N.extensionName]) { - const h = e[N.extensionName].params, p = qe.format({ - [N.extensionName]: [h] - }); - a.push(`Sec-WebSocket-Extensions: ${p}`), c._extensions = e; - } - this.emit("headers", a, i), n.write(a.concat(`\r -`).join(`\r -`)), n.removeListener("error", Ze), c.setSocket(n, o, { - maxPayload: this.options.maxPayload, - skipUTF8Validation: this.options.skipUTF8Validation - }), this.clients && (this.clients.add(c), c.on("close", () => { - this.clients.delete(c), this._shouldEmitClose && !this.clients.size && process.nextTick(G, this); - })), l(c, i); - } -} -var Fs = As; -function js(s, e) { - for (const t of Object.keys(e)) - s.on(t, e[t]); - return function() { - for (const r of Object.keys(e)) - s.removeListener(r, e[r]); - }; -} -function G(s) { - s._state = pt, s.emit("close"); -} -function Ze() { - this.destroy(); -} -function H(s, e, t, r) { - t = t || ie.STATUS_CODES[e], r = { - Connection: "close", - "Content-Type": "text/html", - "Content-Length": Buffer.byteLength(t), - ...r - }, s.once("finish", s.destroy), s.end( - `HTTP/1.1 ${e} ${ie.STATUS_CODES[e]}\r -` + Object.keys(r).map((i) => `${i}: ${r[i]}`).join(`\r -`) + `\r -\r -` + t - ); -} -function R(s, e, t, r, i) { - if (s.listenerCount("wsClientError")) { - const n = new Error(i); - Error.captureStackTrace(n, R), s.emit("wsClientError", n, t, e); - } else - H(t, r, i); -} -const Zs = /* @__PURE__ */ z(Fs); -export { - qs as Receiver, - Ks as Sender, - Xs as WebSocket, - Zs as WebSocketServer, - Vs as createWebSocketStream, - Xs as default -}; diff --git a/spaces/furqankassa/AI-Dashboard-0134/style.css b/spaces/furqankassa/AI-Dashboard-0134/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/furqankassa/AI-Dashboard-0134/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/configs/_base_/default_runtime.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/configs/_base_/default_runtime.py deleted file mode 100644 index b564cc4e7e7d9a67dacaaddecb100e4d8f5c005b..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/configs/_base_/default_runtime.py +++ /dev/null @@ -1,14 +0,0 @@ -# yapf:disable -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook', by_epoch=False), - # dict(type='TensorboardLoggerHook') - ]) -# yapf:enable -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] -cudnn_benchmark = True diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/ops/group_points.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/ops/group_points.py deleted file mode 100644 index 6c3ec9d758ebe4e1c2205882af4be154008253a5..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/ops/group_points.py +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Tuple - -import torch -from torch import nn as nn -from torch.autograd import Function - -from ..utils import ext_loader -from .ball_query import ball_query -from .knn import knn - -ext_module = ext_loader.load_ext( - '_ext', ['group_points_forward', 'group_points_backward']) - - -class QueryAndGroup(nn.Module): - """Groups points with a ball query of radius. - - Args: - max_radius (float): The maximum radius of the balls. - If None is given, we will use kNN sampling instead of ball query. - sample_num (int): Maximum number of features to gather in the ball. - min_radius (float, optional): The minimum radius of the balls. - Default: 0. - use_xyz (bool, optional): Whether to use xyz. - Default: True. - return_grouped_xyz (bool, optional): Whether to return grouped xyz. - Default: False. - normalize_xyz (bool, optional): Whether to normalize xyz. - Default: False. - uniform_sample (bool, optional): Whether to sample uniformly. - Default: False - return_unique_cnt (bool, optional): Whether to return the count of - unique samples. Default: False. - return_grouped_idx (bool, optional): Whether to return grouped idx. - Default: False. - """ - - def __init__(self, - max_radius, - sample_num, - min_radius=0, - use_xyz=True, - return_grouped_xyz=False, - normalize_xyz=False, - uniform_sample=False, - return_unique_cnt=False, - return_grouped_idx=False): - super().__init__() - self.max_radius = max_radius - self.min_radius = min_radius - self.sample_num = sample_num - self.use_xyz = use_xyz - self.return_grouped_xyz = return_grouped_xyz - self.normalize_xyz = normalize_xyz - self.uniform_sample = uniform_sample - self.return_unique_cnt = return_unique_cnt - self.return_grouped_idx = return_grouped_idx - if self.return_unique_cnt: - assert self.uniform_sample, \ - 'uniform_sample should be True when ' \ - 'returning the count of unique samples' - if self.max_radius is None: - assert not self.normalize_xyz, \ - 'can not normalize grouped xyz when max_radius is None' - - def forward(self, points_xyz, center_xyz, features=None): - """ - Args: - points_xyz (Tensor): (B, N, 3) xyz coordinates of the features. - center_xyz (Tensor): (B, npoint, 3) coordinates of the centriods. - features (Tensor): (B, C, N) Descriptors of the features. - - Returns: - Tensor: (B, 3 + C, npoint, sample_num) Grouped feature. - """ - # if self.max_radius is None, we will perform kNN instead of ball query - # idx is of shape [B, npoint, sample_num] - if self.max_radius is None: - idx = knn(self.sample_num, points_xyz, center_xyz, False) - idx = idx.transpose(1, 2).contiguous() - else: - idx = ball_query(self.min_radius, self.max_radius, self.sample_num, - points_xyz, center_xyz) - - if self.uniform_sample: - unique_cnt = torch.zeros((idx.shape[0], idx.shape[1])) - for i_batch in range(idx.shape[0]): - for i_region in range(idx.shape[1]): - unique_ind = torch.unique(idx[i_batch, i_region, :]) - num_unique = unique_ind.shape[0] - unique_cnt[i_batch, i_region] = num_unique - sample_ind = torch.randint( - 0, - num_unique, (self.sample_num - num_unique, ), - dtype=torch.long) - all_ind = torch.cat((unique_ind, unique_ind[sample_ind])) - idx[i_batch, i_region, :] = all_ind - - xyz_trans = points_xyz.transpose(1, 2).contiguous() - # (B, 3, npoint, sample_num) - grouped_xyz = grouping_operation(xyz_trans, idx) - grouped_xyz_diff = grouped_xyz - \ - center_xyz.transpose(1, 2).unsqueeze(-1) # relative offsets - if self.normalize_xyz: - grouped_xyz_diff /= self.max_radius - - if features is not None: - grouped_features = grouping_operation(features, idx) - if self.use_xyz: - # (B, C + 3, npoint, sample_num) - new_features = torch.cat([grouped_xyz_diff, grouped_features], - dim=1) - else: - new_features = grouped_features - else: - assert (self.use_xyz - ), 'Cannot have not features and not use xyz as a feature!' - new_features = grouped_xyz_diff - - ret = [new_features] - if self.return_grouped_xyz: - ret.append(grouped_xyz) - if self.return_unique_cnt: - ret.append(unique_cnt) - if self.return_grouped_idx: - ret.append(idx) - if len(ret) == 1: - return ret[0] - else: - return tuple(ret) - - -class GroupAll(nn.Module): - """Group xyz with feature. - - Args: - use_xyz (bool): Whether to use xyz. - """ - - def __init__(self, use_xyz: bool = True): - super().__init__() - self.use_xyz = use_xyz - - def forward(self, - xyz: torch.Tensor, - new_xyz: torch.Tensor, - features: torch.Tensor = None): - """ - Args: - xyz (Tensor): (B, N, 3) xyz coordinates of the features. - new_xyz (Tensor): new xyz coordinates of the features. - features (Tensor): (B, C, N) features to group. - - Returns: - Tensor: (B, C + 3, 1, N) Grouped feature. - """ - grouped_xyz = xyz.transpose(1, 2).unsqueeze(2) - if features is not None: - grouped_features = features.unsqueeze(2) - if self.use_xyz: - # (B, 3 + C, 1, N) - new_features = torch.cat([grouped_xyz, grouped_features], - dim=1) - else: - new_features = grouped_features - else: - new_features = grouped_xyz - - return new_features - - -class GroupingOperation(Function): - """Group feature with given index.""" - - @staticmethod - def forward(ctx, features: torch.Tensor, - indices: torch.Tensor) -> torch.Tensor: - """ - Args: - features (Tensor): (B, C, N) tensor of features to group. - indices (Tensor): (B, npoint, nsample) the indices of - features to group with. - - Returns: - Tensor: (B, C, npoint, nsample) Grouped features. - """ - features = features.contiguous() - indices = indices.contiguous() - - B, nfeatures, nsample = indices.size() - _, C, N = features.size() - output = torch.cuda.FloatTensor(B, C, nfeatures, nsample) - - ext_module.group_points_forward(B, C, N, nfeatures, nsample, features, - indices, output) - - ctx.for_backwards = (indices, N) - return output - - @staticmethod - def backward(ctx, - grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Args: - grad_out (Tensor): (B, C, npoint, nsample) tensor of the gradients - of the output from forward. - - Returns: - Tensor: (B, C, N) gradient of the features. - """ - idx, N = ctx.for_backwards - - B, C, npoint, nsample = grad_out.size() - grad_features = torch.cuda.FloatTensor(B, C, N).zero_() - - grad_out_data = grad_out.data.contiguous() - ext_module.group_points_backward(B, C, N, npoint, nsample, - grad_out_data, idx, - grad_features.data) - return grad_features, None - - -grouping_operation = GroupingOperation.apply diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Jest Yayn 6 Canl Ma zle Sporun Keyfini karn.md b/spaces/gotiQspiryo/whisper-ui/examples/Jest Yayn 6 Canl Ma zle Sporun Keyfini karn.md deleted file mode 100644 index 6fe883cc7f546ac291a22b0cd7dc70ecd1d9d10b..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Jest Yayn 6 Canl Ma zle Sporun Keyfini karn.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Jest Yay N 6 Canl Mac Izle


    Download ✒ ✒ ✒ https://urlgoal.com/2uyMYp



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/gradio/HuBERT/examples/adaptive_span/adaptive_span_attention.py b/spaces/gradio/HuBERT/examples/adaptive_span/adaptive_span_attention.py deleted file mode 100644 index 07f757bb8e1a8a67b1124175ee338c8735aa8d65..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/adaptive_span/adaptive_span_attention.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class AdaptiveMask(nn.Module): - """Soft masking function for adaptive size. - It masks out the last K values of an input. The masking value - goes from 1 to 0 gradually, so K can be learned with - back-propagation. - Args: - max_size: maximum size (i.e. input dimension) - ramp_size: size of the ramp going from 0 to 1 - init_val: initial size proportion not to be masked out - shape: learn multiple sizes independent of each other - """ - - def __init__(self, max_size, ramp_size, init_val=0, shape=(1,)): - nn.Module.__init__(self) - self._max_size = max_size - self._ramp_size = ramp_size - self.current_val = nn.Parameter(torch.zeros(*shape) + init_val) - mask_template = torch.linspace(1 - max_size, 0, steps=max_size) - self.register_buffer("mask_template", mask_template) - - def forward(self, x): - mask = self.mask_template.float() + self.current_val.float() * self._max_size - mask = mask / self._ramp_size + 1 - mask = mask.clamp(0, 1) - if x.size(-1) < self._max_size: - # the input could have been trimmed beforehand to save computation - mask = mask.narrow(-1, self._max_size - x.size(-1), x.size(-1)) - x = (x * mask).type_as(x) - return x - - def get_current_max_size(self, include_ramp=True): - current_size = math.ceil(self.current_val.max().item() * self._max_size) - if include_ramp: - current_size += self._ramp_size - current_size = max(0, min(self._max_size, current_size)) - return current_size - - def get_current_avg_size(self, include_ramp=True): - current_size = math.ceil( - self.current_val.float().mean().item() * self._max_size - ) - if include_ramp: - current_size += self._ramp_size - current_size = max(0, min(self._max_size, current_size)) - return current_size - - def clamp_param(self): - """this need to be called after each update""" - self.current_val.data.clamp_(0, 1) - - -class AdaptiveSpan(nn.Module): - """Adaptive attention span for Transformerself. - This module learns an attention span length from data for each - self-attention head. - Args: - attn_span: maximum attention span - adapt_span_loss: loss coefficient for the span length - adapt_span_ramp: length of the masking ramp - adapt_span_init: initial size ratio - adapt_span_cache: adapt cache size to reduce memory usage - """ - - def __init__( - self, - attn_span, - adapt_span_ramp, - adapt_span_init, - n_head, - adapt_span_layer, - **kargs - ): - nn.Module.__init__(self) - self._max_span = attn_span - self._n_head = n_head - self._adapt_span_layer = adapt_span_layer - if self._adapt_span_layer: - self._mask = AdaptiveMask( - max_size=self._max_span, - ramp_size=adapt_span_ramp, - init_val=adapt_span_init, - ) - else: - self._mask = AdaptiveMask( - max_size=self._max_span, - ramp_size=adapt_span_ramp, - init_val=adapt_span_init, - shape=(n_head, 1, 1), - ) - - def forward(self, attn, normalize=True): - """mask attention with the right span""" - # batch and head dimensions are merged together, so separate them first - self.clamp_param() - if self._adapt_span_layer: - attn = self._mask(attn) - else: - B = attn.size(0) # batch size - M = attn.size(1) # block size - attn = attn.reshape(B // self._n_head, self._n_head, M, -1) - attn = self._mask(attn) - attn = attn.view(B, M, -1) - return attn - - def get_trim_len(self): - """how much of memory can be trimmed to reduce computation""" - L = self._max_span - trim_len = min(L - 1, L - self._mask.get_current_max_size()) - # too fine granularity might be bad for the memory management - trim_len = math.floor(trim_len / 64) * 64 - return trim_len - - def trim_memory(self, query, key, value, key_pe): - """trim out unnecessary memory beforehand to reduce computation""" - trim_len = self.get_trim_len() - cache_size = key.size(1) - query.size(1) - trim_len_cache = trim_len - (self._max_span - cache_size) - if trim_len_cache > 0: - key = key[:, trim_len_cache:, :] - value = value[:, trim_len_cache:, :] - elif trim_len_cache < 0: - # cache is too short! this happens when validation resumes - # after a lot of updates. - key = F.pad(key, [0, 0, -trim_len_cache, 0]) - value = F.pad(value, [0, 0, -trim_len_cache, 0]) - if trim_len > 0: - if key_pe is not None: - key_pe = key_pe[:, :, trim_len:] - return key, value, key_pe - - def get_cache_size(self): - """determine how long the cache should be""" - trim_len = self.get_trim_len() - # give a buffer of 64 steps since a span might increase - # in future updates - return min(self._max_span, self._max_span - trim_len + 64) - - def get_loss(self): - """a loss term for regularizing the span length""" - return self._max_span * self._mask.current_val.float().mean() - - def get_current_max_span(self): - return self._mask.get_current_max_size() - - def get_current_avg_span(self): - return self._mask.get_current_avg_size() - - def clamp_param(self): - self._mask.clamp_param() diff --git a/spaces/gradio/image_segmentation/app.py b/spaces/gradio/image_segmentation/app.py deleted file mode 100644 index b7a7aec43c39cdd5b8fea7bd43295ad37e051436..0000000000000000000000000000000000000000 --- a/spaces/gradio/image_segmentation/app.py +++ /dev/null @@ -1,42 +0,0 @@ -import gradio as gr -import torch -import random -import numpy as np -from transformers import MaskFormerFeatureExtractor, MaskFormerForInstanceSegmentation - -device = torch.device("cpu") -model = MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-tiny-ade").to(device) -model.eval() -preprocessor = MaskFormerFeatureExtractor.from_pretrained("facebook/maskformer-swin-tiny-ade") - -def visualize_instance_seg_mask(mask): - image = np.zeros((mask.shape[0], mask.shape[1], 3)) - labels = np.unique(mask) - label2color = {label: (random.randint(0, 1), random.randint(0, 255), random.randint(0, 255)) for label in labels} - for i in range(image.shape[0]): - for j in range(image.shape[1]): - image[i, j, :] = label2color[mask[i, j]] - image = image / 255 - return image - -def query_image(img): - target_size = (img.shape[0], img.shape[1]) - inputs = preprocessor(images=img, return_tensors="pt") - with torch.no_grad(): - outputs = model(**inputs) - outputs.class_queries_logits = outputs.class_queries_logits.cpu() - outputs.masks_queries_logits = outputs.masks_queries_logits.cpu() - results = preprocessor.post_process_segmentation(outputs=outputs, target_size=target_size)[0].cpu().detach() - results = torch.argmax(results, dim=0).numpy() - results = visualize_instance_seg_mask(results) - return results - -demo = gr.Interface( - query_image, - inputs=[gr.Image()], - outputs="image", - title="MaskFormer Demo", - examples=[["example_2.png"]] -) - -demo.launch() \ No newline at end of file diff --git a/spaces/gsaivinay/Llama-2-13B-GGML-UI/components/Settings/SettingDialog.tsx b/spaces/gsaivinay/Llama-2-13B-GGML-UI/components/Settings/SettingDialog.tsx deleted file mode 100644 index 004a9cf507695ec2f44bcc2dcf8ffe5e738d85b0..0000000000000000000000000000000000000000 --- a/spaces/gsaivinay/Llama-2-13B-GGML-UI/components/Settings/SettingDialog.tsx +++ /dev/null @@ -1,105 +0,0 @@ -import { FC, useContext, useEffect, useReducer, useRef } from 'react'; - -import { useTranslation } from 'next-i18next'; - -import { useCreateReducer } from '@/hooks/useCreateReducer'; - -import { getSettings, saveSettings } from '@/utils/app/settings'; - -import { Settings } from '@/types/settings'; - -import HomeContext from '@/pages/api/home/home.context'; - -interface Props { - open: boolean; - onClose: () => void; -} - -export const SettingDialog: FC = ({ open, onClose }) => { - const { t } = useTranslation('settings'); - const settings: Settings = getSettings(); - const { state, dispatch } = useCreateReducer({ - initialState: settings, - }); - const { dispatch: homeDispatch } = useContext(HomeContext); - const modalRef = useRef(null); - - useEffect(() => { - const handleMouseDown = (e: MouseEvent) => { - if (modalRef.current && !modalRef.current.contains(e.target as Node)) { - window.addEventListener('mouseup', handleMouseUp); - } - }; - - const handleMouseUp = (e: MouseEvent) => { - window.removeEventListener('mouseup', handleMouseUp); - onClose(); - }; - - window.addEventListener('mousedown', handleMouseDown); - - return () => { - window.removeEventListener('mousedown', handleMouseDown); - }; - }, [onClose]); - - const handleSave = () => { - homeDispatch({ field: 'lightMode', value: state.theme }); - saveSettings(state); - }; - - // Render nothing if the dialog is not open. - if (!open) { - return <>; - } - - // Render the dialog. - return ( -
    -
    -
    - -
    -
    - ); -}; diff --git a/spaces/gstdl/screener-saham-demo/app/dataset/pull_data.py b/spaces/gstdl/screener-saham-demo/app/dataset/pull_data.py deleted file mode 100644 index 0b80cb3f1bf0b11cf500e4be7794eebf57fde414..0000000000000000000000000000000000000000 --- a/spaces/gstdl/screener-saham-demo/app/dataset/pull_data.py +++ /dev/null @@ -1,149 +0,0 @@ -import tabula -import yfinance as yfi -import sqlite3 -import pandas as pd -import json -import talib -import time -import datetime -import warnings - -warnings.filterwarnings("ignore") - -with open("patterns.json", "r") as f: - patterns = json.load(f) - -update_time = datetime.datetime.now() -# dummy update time - -def find_patterns(df): - result = pd.DataFrame( - columns=[ - "Date", - "Kode", - "Pattern", - "Pattern_Score", - "Open_Close_Change", - "High_Low_Change", - ] - ) - for attr, pattern in patterns.items(): - scores = getattr(talib, attr)(df["Open"], df["High"], df["Low"], df["Close"]) - mask = scores != 0 - temp_result = df[mask] - if len(temp_result) > 0: - temp_result = temp_result.assign( - Open_Close_Change=(temp_result["Close"] - temp_result["Open"]) / temp_result["Open"], - High_Low_Change=(temp_result["High"] - temp_result["Low"]) / temp_result["Low"], - Pattern=[pattern] * len(temp_result), - Pattern_Score=scores[mask].values, - )[result.columns] - result = result.append(temp_result) - result = result.assign(time_updated = update_time) - return result - - -def pull_data_yfi(): - start = time.time() - with sqlite3.connect("ihsg.db") as con: - tickers = pd.read_sql( - """ - SELECT Kode FROM list_perusahaan - WHERE Kode != "IHSG" - """, - con=con, - ).values.flatten() - ihsg = ( - yfi.download("^JKSE", start="2017-01-01", end="2023-01-10", progress=False) - .reset_index() - .dropna() - .assign(Kode="IHSG") - ) - ihsg = ihsg[["Date", "Kode", "Open", "High", "Low", "Close", "Volume"]] - ihsg = ihsg.assign(time_updated = update_time) - ihsg.to_sql("historical", if_exists="replace", con=con, index=False) - pattern_search = find_patterns(ihsg) - pattern_search.to_sql("patterns", if_exists="replace", con=con, index=False) - print("INSERTION RESULT: \n") - print(pd.read_sql("SELECT * FROM historical", con=con).tail(10)) - print(pd.read_sql("SELECT * FROM historical", con=con).shape) - print("\n\n*--\n") - for i in range(0, len(tickers), 50): - ticker = [f"{kode}.JK" for kode in tickers[i : i + 50]] - df = ( - yfi.download(ticker, start="2017-01-01", end="2023-01-10", progress=False) - .T.unstack(level=1) - .T.reset_index() - .dropna() - .rename(columns={"level_1": "Kode"}) - ) - df = df[["Date", "Kode", "Open", "High", "Low", "Close", "Volume"]] - df["Kode"] = df["Kode"].str.replace(".JK", "") - for j, kode in enumerate(df["Kode"].unique()): - print(f"Finding Patterns for {kode} #{i+j+1}\t\t time elapsed = {time.time() - start:.2f} s") - pattern_search = find_patterns(df[df["Kode"] == kode]) - pattern_search.to_sql("patterns", if_exists="append", con=con, index=False) - df = df.assign(time_updated = update_time) - df.to_sql("historical", if_exists="append", con=con, index=False) - print("INSERTION RESULT: \n") - print(pd.read_sql("SELECT * FROM historical", con=con).tail(10)) - print(pd.read_sql("SELECT * FROM historical", con=con).shape) - print("\n\n*--\n") - time.sleep(60) - con.commit() - -def pull_data_klasifikasi_industri(): - with sqlite3.connect("ihsg.db") as con: - cur = con.cursor() - cur.execute("DROP TABLE IF EXISTS list_perusahaan") - cur.execute(""" - CREATE TABLE list_perusahaan ( - Kode VARCHAR(4), - Nama TEXT, - Sektor TEXT, - Instrumen TEXT) - """) - cur.execute(""" - INSERT INTO list_perusahaan VALUES - ('IHSG', 'Indeks Harga Saham Gabungan', NULL, 'Indeks') - """) - # TODO: Change Schema from Star Schema to Snowflake Schema - # list_perusahaan table will be the dimension table for sector and sub-sector fact tables - # note: list_perusahaan table is a dimension table for historical fact table - - dfs = tabula.read_pdf("Klasifikasi Industri Perusahaan Tercatat.pdf", pages="all", stream=True) - # print(len(dfs)) - for df in dfs: - kode, nama, sektor = None, None, None - for row in df.iloc[2:,:].itertuples(): - if kode is not None and pd.notna(row[2]): - cur.execute(f""" - INSERT INTO list_perusahaan VALUES - ('{kode}', '{nama}', '{sektor}', 'Saham') - """) - kode, nama, sektor = None, None, None - elif kode is not None and pd.isna(row[2]): - if pd.notna(row[3]): - nama += " " + row[3] - if pd.notna(row[5]): - sektor += " " + row[5] - if kode is None and nama is None and sektor is None and pd.notna(row[2]): - if "saham" in row[8].lower(): - kode = row[2] - nama = row[3] - sektor = row[5] - else: - if kode is not None: - cur.execute(f""" - INSERT INTO list_perusahaan VALUES - ('{kode}', '{nama}', '{sektor}', 'Saham') - """) - print("INSERTION RESULT: \n") - print(pd.read_sql("SELECT * FROM list_perusahaan", con=con).tail(10)) - print(pd.read_sql("SELECT * FROM list_perusahaan", con=con).shape) - print("\n\n*--\n") - con.commit() - -if __name__ == "__main__": - pull_data_klasifikasi_industri() - pull_data_yfi() diff --git a/spaces/gwang-kim/DATID-3D/pose_estimation/models/arcface_torch/configs/ms1mv3_r50.py b/spaces/gwang-kim/DATID-3D/pose_estimation/models/arcface_torch/configs/ms1mv3_r50.py deleted file mode 100644 index 08ba55dbbea6df0afffddbb3d1ed173efad99604..0000000000000000000000000000000000000000 --- a/spaces/gwang-kim/DATID-3D/pose_estimation/models/arcface_torch/configs/ms1mv3_r50.py +++ /dev/null @@ -1,26 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.loss = "arcface" -config.network = "r50" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 128 -config.lr = 0.1 # batch size is 512 - -config.rec = "/train_tmp/ms1m-retinaface-t1" -config.num_classes = 93431 -config.num_image = 5179510 -config.num_epoch = 25 -config.warmup_epoch = -1 -config.decay_epoch = [10, 16, 22] -config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/spaces/h2oai/h2ogpt-chatbot/src/gen.py b/spaces/h2oai/h2ogpt-chatbot/src/gen.py deleted file mode 100644 index d8602e7b0a56920a4afb7b4ac9c73e7449216729..0000000000000000000000000000000000000000 --- a/spaces/h2oai/h2ogpt-chatbot/src/gen.py +++ /dev/null @@ -1,3831 +0,0 @@ -import ast -import copy -import functools -import inspect -import queue -import sys -import os -import time -import traceback -import typing -import warnings -from datetime import datetime -import requests -from requests import ConnectTimeout, JSONDecodeError -from urllib3.exceptions import ConnectTimeoutError, MaxRetryError, ConnectionError -from requests.exceptions import ConnectionError as ConnectionError2 -from requests.exceptions import ReadTimeout as ReadTimeout2 - -if os.path.dirname(os.path.abspath(__file__)) not in sys.path: - sys.path.append(os.path.dirname(os.path.abspath(__file__))) - -os.environ['HF_HUB_DISABLE_TELEMETRY'] = '1' -os.environ['BITSANDBYTES_NOWELCOME'] = '1' -warnings.filterwarnings('ignore', category=UserWarning, message='TypedStorage is deprecated') - -# more is not useful typically, don't let these go beyond limits and eat up resources -max_cores = max(1, os.cpu_count() // 2) -if os.getenv('NUMEXPR_MAX_THREADS') is None: - os.environ['NUMEXPR_MAX_THREADS'] = str(min(8, max_cores)) -if os.getenv('NUMEXPR_NUM_THREADS') is None: - os.environ['NUMEXPR_NUM_THREADS'] = str(min(8, max_cores)) -if os.getenv('OMP_NUM_THREADS') is None: - os.environ['OMP_NUM_THREADS'] = str(min(8, max_cores)) -if os.getenv('OPENBLAS_NUM_THREADS') is None: - os.environ['OPENBLAS_NUM_THREADS'] = str(min(8, max_cores)) -if os.getenv('DUCKDB_NUM_THREADS') is None: - os.environ['DUCKDB_NUM_THREADS'] = str(min(4, max_cores)) -if os.getenv('RAYON_RS_NUM_CPUS') is None: - os.environ['RAYON_RS_NUM_CPUS'] = str(min(8, max_cores)) -if os.getenv('RAYON_NUM_THREADS') is None: - os.environ['RAYON_NUM_THREADS'] = str(min(8, max_cores)) - -import numpy as np -from evaluate_params import eval_func_param_names, no_default_param_names, input_args_list -from enums import DocumentSubset, LangChainMode, no_lora_str, model_token_mapping, no_model_str, \ - LangChainAction, LangChainAgent, DocumentChoice, LangChainTypes, super_source_prefix, \ - super_source_postfix, t5_type, get_langchain_prompts, gr_to_lg, invalid_key_msg -from loaders import get_loaders -from utils import set_seed, clear_torch_cache, NullContext, wrapped_partial, EThread, get_githash, \ - import_matplotlib, get_device, makedirs, get_kwargs, start_faulthandler, get_hf_server, FakeTokenizer, \ - have_langchain, set_openai, cuda_vis_check, H2O_Fire, lg_to_gr, str_to_list, str_to_dict, get_token_count - -start_faulthandler() -import_matplotlib() - -SEED = 1236 -set_seed(SEED) - -from typing import Union - -import torch -from transformers import GenerationConfig, AutoModel, TextIteratorStreamer - -from prompter import Prompter, inv_prompt_type_to_model_lower, non_hf_types, PromptType, get_prompt, generate_prompt -from stopping import get_stopping - -langchain_actions = [x.value for x in list(LangChainAction)] - -langchain_agents_list = [x.value for x in list(LangChainAgent)] - - -def main( - load_8bit: bool = False, - load_4bit: bool = False, - low_bit_mode: int = 1, - load_half: bool = None, - load_gptq: str = '', - load_exllama: bool = False, - use_safetensors: bool = False, - revision: str = None, - use_gpu_id: bool = True, - base_model: str = '', - tokenizer_base_model: str = '', - lora_weights: str = "", - gpu_id: int = 0, - compile_model: bool = None, - use_cache: bool = None, - inference_server: str = "", - prompt_type: Union[int, str] = None, - prompt_dict: typing.Dict = None, - system_prompt: str = '', - - # llama and gpt4all settings - llamacpp_dict: typing.Dict = dict(n_gpu_layers=100, use_mlock=True, n_batch=1024, n_gqa=0), - model_path_llama: str = 'https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q8_0.bin', - # 'llama-2-7b-chat.ggmlv3.q8_0.bin', - model_name_gptj: str = 'ggml-gpt4all-j-v1.3-groovy.bin', - model_name_gpt4all_llama: str = 'ggml-wizardLM-7B.q4_2.bin', - model_name_exllama_if_no_config: str = 'TheBloke/Nous-Hermes-Llama2-GPTQ', - - model_lock: typing.List[typing.Dict[str, str]] = None, - model_lock_columns: int = None, - fail_if_cannot_connect: bool = False, - - # input to generation - temperature: float = None, - top_p: float = None, - top_k: int = None, - num_beams: int = None, - repetition_penalty: float = None, - num_return_sequences: int = None, - do_sample: bool = None, - max_new_tokens: int = None, - min_new_tokens: int = None, - early_stopping: Union[bool, str] = None, - max_time: float = None, - - memory_restriction_level: int = None, - debug: bool = False, - save_dir: str = None, - share: bool = False, - local_files_only: bool = False, - resume_download: bool = True, - use_auth_token: Union[str, bool] = False, - trust_remote_code: Union[str, bool] = True, - rope_scaling: dict = None, - max_seq_len: int = None, - offload_folder: str = "offline_folder", - - src_lang: str = "English", - tgt_lang: str = "Russian", - - prepare_offline_level: int = 0, - cli: bool = False, - cli_loop: bool = True, - gradio: bool = True, - gradio_offline_level: int = 0, - server_name: str = "0.0.0.0", - root_path: str = "", - chat: bool = True, - chat_conversation: typing.List[typing.Tuple[str, str]] = None, - text_context_list: typing.List[str] = None, - stream_output: bool = True, - async_output: bool = True, - num_async: int = 3, - show_examples: bool = None, - verbose: bool = False, - h2ocolors: bool = True, - dark: bool = False, # light tends to be best - height: int = 600, - show_lora: bool = True, - show_llama: bool = True, - show_gpt4all: bool = False, - login_mode_if_model0: bool = False, - block_gradio_exit: bool = True, - concurrency_count: int = 1, - api_open: bool = False, - allow_api: bool = True, - input_lines: int = 1, - gradio_size: str = None, - show_copy_button: bool = True, - large_file_count_mode: bool = False, - pre_load_embedding_model: bool = True, - - auth: Union[typing.List[typing.Tuple[str, str]], str] = None, - auth_filename: str = None, - auth_access: str = 'open', - auth_freeze: bool = False, - auth_message: str = None, - guest_name: str = "guest", - enforce_h2ogpt_api_key: bool = None, - h2ogpt_api_keys: Union[list, str] = [], - h2ogpt_key: str = None, - - max_max_time=None, - max_max_new_tokens=None, - - visible_models: list = None, - visible_visible_models: bool = True, - visible_submit_buttons: bool = True, - visible_side_bar: bool = True, - visible_doc_track: bool = True, - visible_chat_tab: bool = True, - visible_doc_selection_tab: bool = True, - visible_doc_view_tab: bool = True, - visible_chat_history_tab: bool = True, - visible_expert_tab: bool = True, - visible_models_tab: bool = True, - visible_system_tab: bool = True, - visible_tos_tab: bool = False, - visible_login_tab: bool = True, - visible_hosts_tab: bool = False, - chat_tables: bool = False, - visible_h2ogpt_header: bool = True, - max_raw_chunks: int = None, - - sanitize_user_prompt: bool = False, - sanitize_bot_response: bool = False, - - extra_model_options: typing.List[str] = [], - extra_lora_options: typing.List[str] = [], - extra_server_options: typing.List[str] = [], - - score_model: str = 'auto', - - eval_filename: str = None, - eval_prompts_only_num: int = 0, - eval_prompts_only_seed: int = 1234, - eval_as_output: bool = False, - - langchain_mode: str = None, - user_path: str = None, - langchain_modes: list = [LangChainMode.USER_DATA.value, LangChainMode.MY_DATA.value, LangChainMode.LLM.value, - LangChainMode.DISABLED.value], - langchain_mode_paths: dict = {LangChainMode.USER_DATA.value: None}, - langchain_mode_types: dict = {LangChainMode.USER_DATA.value: LangChainTypes.SHARED.value}, - detect_user_path_changes_every_query: bool = False, - - langchain_action: str = LangChainAction.QUERY.value, - langchain_agents: list = [], - force_langchain_evaluate: bool = False, - - visible_langchain_actions: list = [LangChainAction.QUERY.value, LangChainAction.SUMMARIZE_MAP.value], - visible_langchain_agents: list = langchain_agents_list.copy(), - - document_subset: str = DocumentSubset.Relevant.name, - document_choice: list = [DocumentChoice.ALL.value], - - use_llm_if_no_docs: bool = True, - load_db_if_exists: bool = True, - keep_sources_in_context: bool = False, - db_type: str = 'chroma', - use_openai_embedding: bool = False, - use_openai_model: bool = False, - hf_embedding_model: str = None, - migrate_embedding_model: str = False, - auto_migrate_db: bool = False, - cut_distance: float = 1.64, - answer_with_sources: bool = True, - append_sources_to_answer: bool = True, - show_accordions: bool = True, - top_k_docs_max_show: int = 10, - show_link_in_sources: bool = True, - pre_prompt_query: str = None, - prompt_query: str = None, - pre_prompt_summary: str = None, - prompt_summary: str = None, - add_chat_history_to_context: bool = True, - add_search_to_context: bool = False, - context: str = '', - iinput: str = '', - allow_upload_to_user_data: bool = True, - reload_langchain_state: bool = True, - allow_upload_to_my_data: bool = True, - enable_url_upload: bool = True, - enable_text_upload: bool = True, - enable_sources_list: bool = True, - chunk: bool = True, - chunk_size: int = 512, - top_k_docs: int = None, - docs_ordering_type: str = 'reverse_ucurve_sort', - min_max_new_tokens=256, - auto_reduce_chunks: bool = True, - max_chunks: int = 100, - headsize: int = 50, - n_jobs: int = -1, - - # urls - use_unstructured=True, - use_playwright=False, - use_selenium=False, - - # pdfs - use_pymupdf='auto', - use_unstructured_pdf='auto', - use_pypdf='auto', - enable_pdf_ocr='auto', - enable_pdf_doctr='auto', - try_pdf_as_html='auto', - - # images - enable_ocr=False, - enable_doctr=False, - enable_pix2struct=False, - enable_captions=True, - - pre_load_caption_model: bool = False, - caption_gpu: bool = True, - captions_model: str = "Salesforce/blip-image-captioning-base", - doctr_gpu: bool = True, - - # json - jq_schema='.[]', - - max_quality: bool = False, - - enable_heap_analytics: bool = True, - heap_app_id: str = "1680123994", -): - """ - - :param load_8bit: load model in 8-bit using bitsandbytes - :param load_4bit: load model in 4-bit using bitsandbytes - :param low_bit_mode: 0: no quantization config 1: change compute 2: nf4 3: double quant 4: 2 and 3 - See: https://huggingface.co/docs/transformers/main_classes/quantization - If using older bitsandbytes or transformers, 0 is required - :param load_half: load model in float16 (None means auto, which means True unless t5 based model) - otherwise specify bool - :param load_gptq: to load model with GPTQ, put model_basename here, e.g. gptq_model-4bit--1g - :param load_exllama: whether to use exllama (only applicable to LLaMa1/2 models with 16-bit or GPTQ - :param use_safetensors: to use safetensors version (assumes file/HF points to safe tensors version) - :param revision: Which HF revision to use - :param use_gpu_id: whether to control devices with gpu_id. If False, then spread across GPUs - :param base_model: model HF-type name. If use --base_model to preload model, cannot unload in gradio in models tab - :param tokenizer_base_model: tokenizer HF-type name. Usually not required, inferred from base_model. - :param lora_weights: LORA weights path/HF link - :param gpu_id: if use_gpu_id, then use gpu_id for cuda device ID, or auto mode if gpu_id != -1 - :param compile_model Whether to compile the model - :param use_cache: Whether to use caching in model (some models fail when multiple threads use) - :param inference_server: Consume base_model as type of model at this address - Address can be text-generation-server hosting that base_model - e.g. python generate.py --inference_server="http://192.168.1.46:6112" --base_model=h2oai/h2ogpt-oasst1-512-12b - - Or Address can be "openai_chat" or "openai" for OpenAI API - Or Address can be "openai_azure_chat" or "openai_azure" for Azure OpenAI API - e.g. python generate.py --inference_server="openai_chat" --base_model=gpt-3.5-turbo - e.g. python generate.py --inference_server="openai" --base_model=text-davinci-003 - e.g. python generate.py --inference_server="openai_azure_chat::::" --base_model=gpt-3.5-turbo - e.g. python generate.py --inference_server="openai_azure::::" --base_model=text-davinci-003 - Optionals (Replace with None or just leave empty but keep :) - of some deployment name - : e.g. ".openai.azure.com" for some without https:// - of some api, e.g. 2023-05-15 - e.g. 0613 - - Or Address can be for vLLM: - Use: "vllm:IP:port" for OpenAI-compliant vLLM endpoint - Note: vllm_chat not supported by vLLM project. - - Or Address can be replicate: - Use: - --inference_server=replicate: will use a Replicate server, requiring a Replicate key. - e.g. looks like "a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5" - - Or Address can be for AWS SageMaker: - Use: "sagemaker_chat:" for chat models that AWS sets up as dialog - Use: "sagemaker:" for foundation models that AWS only text as inputs - - :param prompt_type: type of prompt, usually matched to fine-tuned model or plain for foundational model - :param prompt_dict: If prompt_type=custom, then expects (some) items returned by get_prompt(..., return_dict=True) - :param system_prompt: Universal system prompt to use if model supports, like LLaMa2, regardless of prompt_type definition. - Useful for langchain case to control behavior, or OpenAI and Replicate. - If None, 'None', or 'auto', then for LLaMa or other models that internally have system_prompt, will use default for each model - If '', then no system prompt (no empty template given to model either, just no system part added at all) - If some string not in ['None', 'auto'], then use that as system prompt - Default is '', no system_prompt, because often it hurts performance/accuracy - - :param llamacpp_dict: - n_gpu_layers: for llama.cpp based models, number of GPU layers to offload (default is all by using large value) - use_mlock: when using `llama.cpp` based CPU models, for computers with low system RAM or slow CPUs, recommended False - n_batch: Can make smaller to 128 for slower low-memory CPU systems - n_gqa: Required to be 8 for LLaMa 70B - ... etc. anything that could be passed to llama.cpp or GPT4All models - e.g. python generate.py --base_model='llama' --prompt_type=llama2 --score_model=None --langchain_mode='UserData' --user_path=user_path --llamacpp_dict="{'n_gpu_layers':25,'n_batch':128}" - :param model_path_llama: model path or URL (for auto-download) - :param model_name_gptj: model path or URL (for auto-download) - :param model_name_gpt4all_llama: model path or URL (for auto-download) - :param model_name_exllama_if_no_config: exllama model's full path for model, tokenizer, generator for use when no HuggingFace config - - :param model_lock: Lock models to specific combinations, for ease of use and extending to many models - Only used if gradio = True - List of dicts, each dict has base_model, tokenizer_base_model, lora_weights, inference_server, prompt_type, and prompt_dict - If all models have same prompt_type, and prompt_dict, can still specify that once in CLI outside model_lock as default for dict - Can specify model_lock instead of those items on CLI - As with CLI itself, base_model can infer prompt_type and prompt_dict if in prompter.py. - Also, tokenizer_base_model and lora_weights are optional. - Also, inference_server is optional if loading model from local system. - All models provided will automatically appear in compare model mode - Model loading-unloading and related choices will be disabled. Model/lora/server adding will be disabled - :param model_lock_columns: How many columns to show if locking models (and so showing all at once) - If None, then defaults to up to 3 - if -1, then all goes into 1 row - Maximum value is 4 due to non-dynamic gradio rendering elements - :param fail_if_cannot_connect: if doing model locking (e.g. with many models), fail if True. Otherwise ignore. - Useful when many endpoints and want to just see what works, but still have to wait for timeout. - - :param temperature: generation temperature - :param top_p: generation top_p - :param top_k: generation top_k - :param num_beams: generation number of beams - :param repetition_penalty: generation repetition penalty - :param num_return_sequences: generation number of sequences (1 forced for chat) - :param do_sample: generation sample - :param max_new_tokens: generation max new tokens - :param min_new_tokens: generation min tokens - :param early_stopping: generation early stopping - :param max_time: maximum time to allow for generation - :param memory_restriction_level: 0 = no restriction to tokens or model, 1 = some restrictions on token 2 = HF like restriction 3 = very low memory case - :param debug: enable debug mode - :param save_dir: directory chat data is saved to - :param share: whether to share the gradio app with sharable URL - :param local_files_only: whether to only use local files instead of doing to HF for models - :param resume_download: whether to resume downloads from HF for models - :param use_auth_token: whether to use HF auth token (requires CLI did huggingface-cli login before) - :param trust_remote_code: whether to use trust any code needed for HF model - :param rope_scaling: - For HF transformers model: scaling for rope-based models, e.g. --rope_scaling="{'type':'dynamic', 'factor':4}" - For exllama model: --rope_scaling="{'alpha_value':4}" . This automatically scales max_seq_len for exllama - :param max_seq_len: Manually set maximum sequence length for the LLM - :param offload_folder: path for spilling model onto disk - :param src_lang: source languages to include if doing translation (None = all) - :param tgt_lang: target languages to include if doing translation (None = all) - - :param prepare_offline_level: - Whether to just prepare for offline use, do not go into cli, eval, or gradio run modes - 0 : no prep - 1: prepare just h2oGPT with exact same setup as passed to CLI and ensure all artifacts for h2oGPT alone added to ~/.cache/ - 2: prepare h2oGPT + all inference servers so h2oGPT+inference servers can use the ~/.cache/ - :param cli: whether to use CLI (non-gradio) interface. - :param cli_loop: whether to loop for CLI (False usually only for testing) - :param gradio: whether to enable gradio, or to enable benchmark mode - :param gradio_offline_level: > 0, then change fonts so full offline - == 1 means backend won't need internet for fonts, but front-end UI might if font not cached - == 2 means backend and frontend don't need internet to download any fonts. - Note: Some things always disabled include HF telemetry, gradio telemetry, chromadb posthog that involve uploading. - This option further disables google fonts for downloading, which is less intrusive than uploading, - but still required in air-gapped case. The fonts don't look as nice as google fonts, but ensure full offline behavior. - Also set --share=False to avoid sharing a gradio live link. - :param server_name: IP to use. In linux 0.0.0.0 is good choice so exposed to outside host, else for only local use 127.0.0.1. - For windows/MAC 0.0.0.0 or 127.0.0.1 will work, but may need to specify actual LAN IP address for other LAN clients to see. - :param root_path: The root path (or "mount point") of the application, - if it's not served from the root ("/") of the domain. Often used when the application is behind a reverse proxy - that forwards requests to the application. For example, if the application is served at "https://example.com/myapp", - the `root_path` should be set to "/myapp". - :param chat: whether to enable chat mode with chat history - :param chat_conversation: list of tuples of (human, bot) conversation pre-appended to existing chat when using instruct/chat models - Requires also add_chat_history_to_context = True - It does *not* require chat=True, so works with nochat_api etc. - :param text_context_list: List of strings to add to context for non-database version of document Q/A for faster handling via API etc. - Forces LangChain code path and uses as many entries in list as possible given max_seq_len, with first assumed to be most relevant and to go near prompt. - :param stream_output: whether to stream output - :param async_output: Whether to do asyncio handling - For summarization - Applicable to HF TGI server - Only if stream_output=False in CLI, UI, or API - :param num_async: Number of simultaneously allowed asyncio calls to make for async_output - Too many will overload inference server, too few will be too slow - :param show_examples: whether to show clickable examples in gradio - :param verbose: whether to show verbose prints - :param h2ocolors: whether to use H2O.ai theme - :param dark: whether to use dark mode for UI by default (still controlled in UI) - :param height: height of chat window - :param show_lora: whether to show LORA options in UI (expert so can be hard to understand) - :param show_llama: whether to show LLaMa.cpp/GPT4All options in UI (only likely useful if have weak GPUs) - :param show_gpt4all: whether to show GPT4All models in UI (not often useful, llama.cpp models best) - :param login_mode_if_model0: set to True to load --base_model after client logs in, to be able to free GPU memory when model is swapped - :param block_gradio_exit: whether to block gradio exit (used for testing) - :param concurrency_count: gradio concurrency count (1 is optimal for LLMs) - :param api_open: If False, don't let API calls skip gradio queue - :param allow_api: whether to allow API calls at all to gradio server - :param input_lines: how many input lines to show for chat box (>1 forces shift-enter for submit, else enter is submit) - :param gradio_size: Overall size of text and spaces: "xsmall", "small", "medium", "large". - Small useful for many chatbots in model_lock mode - :param show_copy_button: Whether to show copy button for chatbots - :param large_file_count_mode: Whether to force manual update to UI of drop-downs, good idea if millions of chunks or documents - :param pre_load_embedding_model: Whether to preload embedding model for shared use across DBs and users (multi-thread safe only) - - :param auth: gradio auth for launcher in form [(user1, pass1), (user2, pass2), ...] - e.g. --auth=[('jon','password')] with no spaces - e.g. --auth="[('jon', 'password)())(')]" so any special characters can be used - e.g. --auth=auth.json to specify persisted state file with name auth.json (auth_filename then not required) - e.g. --auth='' will use default auth.json as file name for persisted state file (auth_filename then not required) - e.g. --auth=None will use no auth, but still keep track of auth state, just not from logins - :param auth_filename: - Set auth filename, used only if --auth= was passed list of user/passwords - :param auth_access: - 'open': Allow new users to be added - 'closed': Stick to existing users - :param auth_freeze: whether freeze authentication based upon current file, no longer update file - :param auth_message: Message to show if having users login, fixed if passed, else dynamic internally - :param guest_name: guess name if using auth and have open access. - If '', then no guest allowed even if open access, then all databases for each user always persisted - :param enforce_h2ogpt_api_key: Whether to enforce h2oGPT token usage for API - :param h2ogpt_api_keys: list of tokens allowed for API access or file accessed on demand for json of list of keys - :param h2ogpt_key: E.g. can be set when accessing gradio h2oGPT server from local gradio h2oGPT server that acts as client to that inference server - - :param max_max_time: Maximum max_time for gradio slider - :param max_max_new_tokens: Maximum max_new_tokens for gradio slider - :param min_max_new_tokens: Minimum of max_new_tokens, when auto-scaling down to handle more docs/prompt, but still let generation have some tokens - - :param visible_models: Which models in model_lock list to show by default - Takes integers of position in model_lock (model_states) list or strings of base_model names - Ignored if model_lock not used - For nochat API, this is single item within a list for model by name or by index in model_lock - If None, then just use first model in model_lock list - If model_lock not set, use model selected by CLI --base_model etc. - - :param visible_visible_models: Whether visible models drop-down is visible in UI - :param visible_submit_buttons: whether submit buttons are visible when UI first comes up - :param visible_side_bar: whether left side bar is visible when UI first comes up - :param visible_doc_track: whether left side bar's document tracking is visible when UI first comes up - :param visible_chat_tab: "" for chat tab - :param visible_doc_selection_tab: "" for doc selection tab - :param visible_doc_view_tab: "" for doc view tab - :param visible_chat_history_tab: "" for chat history tab - :param visible_expert_tab: "" for expert tab - :param visible_models_tab: "" for models tab - :param visible_system_tab: "" for system tab - :param visible_tos_tab: "" for ToS tab - :param visible_login_tab: "" for Login tab - :param visible_hosts_tab: "" for hosts tab - :param chat_tables: Just show Chat as block without tab (useful if want only chat view) - :param visible_h2ogpt_header: Whether github stars, URL, logo, and QR code are visible - :param max_raw_chunks: Maximum number of chunks to show in UI when asking for raw DB text from documents/collection - - :param sanitize_user_prompt: whether to remove profanity from user input (slows down input processing) - Requires optional packages: - pip install alt-profanity-check==1.2.2 better-profanity==0.7.0 - :param sanitize_bot_response: whether to remove profanity and repeat lines from bot output (about 2x slower generation for long streaming cases due to better_profanity being slow) - :param extra_model_options: extra models to show in list in gradio - :param extra_lora_options: extra LORA to show in list in gradio - :param extra_server_options: extra servers to show in list in gradio - :param score_model: which model to score responses - None: no response scoring - 'auto': auto mode, '' (no model) for CPU or 1 GPU, 'OpenAssistant/reward-model-deberta-v3-large-v2' for >=2 GPUs, - because on CPU takes too much compute just for scoring response - :param eval_filename: json file to use for evaluation, if None is sharegpt - :param eval_prompts_only_num: for no gradio benchmark, if using eval_filename prompts for eval instead of examples - :param eval_prompts_only_seed: for no gradio benchmark, seed for eval_filename sampling - :param eval_as_output: for no gradio benchmark, whether to test eval_filename output itself - - :param langchain_mode: Data source to include. Choose "UserData" to only consume files from make_db.py. - None: auto mode, check if langchain package exists, at least do LLM if so, else Disabled - If not passed, then chosen to be first langchain_modes, else langchain_mode->Disabled is set if no langchain_modes either - WARNING: wiki_full requires extra data processing via read_wiki_full.py and requires really good workstation to generate db, unless already present. - :param user_path: user path to glob from to generate db for vector search, for 'UserData' langchain mode. - If already have db, any new/changed files are added automatically if path set, does not have to be same path used for prior db sources - :param langchain_modes: dbs to generate at launch to be ready for LLM - Apart from additional user-defined collections, can include ['wiki', 'wiki_full', 'UserData', 'MyData', 'github h2oGPT', 'DriverlessAI docs'] - But wiki_full is expensive and requires preparation - To allow personal space only live in session, add 'MyData' to list - Default: If only want to consume local files, e.g. prepared by make_db.py, only include ['UserData'] - If have own user modes, need to add these here or add in UI. - :param langchain_mode_paths: dict of langchain_mode keys and disk path values to use for source of documents - E.g. "{'UserData2': 'userpath2'}" - A disk path be None, e.g. --langchain_mode_paths="{'UserData2': None}" even if existing DB, to avoid new documents being added from that path, source links that are on disk still work. - If `--user_path` was passed, that path is used for 'UserData' instead of the value in this dict - :param langchain_mode_types: dict of langchain_mode keys and database types - E.g. python generate.py --base_model=llama --langchain_modes=['TestData'] --langchain_mode_types="{'TestData':'shared'}" - The type is attempted to be inferred if directory already exists, then don't have to pass this - :param detect_user_path_changes_every_query: whether to detect if any files changed or added every similarity search (by file hashes). - Expensive for large number of files, so not done by default. By default only detect changes during db loading. - - :param langchain_action: Mode langchain operations in on documents. - Query: Make query of document(s) - Summarize or Summarize_map_reduce: Summarize document(s) via map_reduce - Summarize_all: Summarize document(s) using entire document at once - Summarize_refine: Summarize document(s) using entire document, and try to refine before returning summary - :param langchain_agents: Which agents to use - 'search': Use Web Search as context for LLM response, e.g. SERP if have SERPAPI_API_KEY in env - :param force_langchain_evaluate: Whether to force langchain LLM use even if not doing langchain, mostly for testing. - - :param visible_langchain_actions: Which actions to allow - :param visible_langchain_agents: Which agents to allow - - :param document_subset: Default document choice when taking subset of collection - :param document_choice: Chosen document(s) by internal name, 'All' means use all docs - - :param use_llm_if_no_docs: Whether to use LLM even if no documents, when langchain_mode=UserData or MyData or custom - :param load_db_if_exists: Whether to load chroma db if exists or re-generate db - :param keep_sources_in_context: Whether to keep url sources in context, not helpful usually - :param db_type: 'faiss' for in-memory - 'chroma' (for chroma >= 0.4) - 'chroma_old' (for chroma < 0.4) -- recommended for large collections - 'weaviate' for persisted on disk - :param use_openai_embedding: Whether to use OpenAI embeddings for vector db - :param use_openai_model: Whether to use OpenAI model for use with vector db - :param hf_embedding_model: Which HF embedding model to use for vector db - Default is instructor-large with 768 parameters per embedding if have GPUs, else all-MiniLM-L6-v2 if no GPUs - Can also choose simpler model with 384 parameters per embedding: "sentence-transformers/all-MiniLM-L6-v2" - Can also choose even better embedding with 1024 parameters: 'hkunlp/instructor-xl' - We support automatically changing of embeddings for chroma, with a backup of db made if this is done - :param migrate_embedding_model: whether to use hf_embedding_model embedding even if database already had an embedding set. - used to migrate all embeddings to a new one, but will take time to re-embed. - Default (False) is to use the prior embedding for existing databases, and only use hf_embedding_model for new databases - If had old database without embedding saved, then hf_embedding_model is also used. - :param auto_migrate_db: whether to automatically migrate any chroma<0.4 database from duckdb -> sqlite version - :param cut_distance: Distance to cut off references with larger distances when showing references. - 1.64 is good to avoid dropping references for all-MiniLM-L6-v2, but instructor-large will always show excessive references. - For all-MiniLM-L6-v2, a value of 1.5 can push out even more references, or a large value of 100 can avoid any loss of references. - :param answer_with_sources: Whether to determine (and return) sources - :param append_sources_to_answer: Whether to place source information in chat response (ignored by LLM). Always disabled for API. - :param show_accordions: whether to show accordion for document references in chatbot UI - :param top_k_docs_max_show: Max number of docs to show in UI for sources - If web search is enabled, then this is modified to be max(top_k_docs_max_show, number of links used in search) - :param show_link_in_sources: Whether to show URL link to source document in references - :param pre_prompt_query: prompt before documents to query, if None then use internal defaults - :param prompt_query: prompt after documents to query, if None then use internal defaults - :param pre_prompt_summary: prompt before documents to summarize, if None then use internal defaults - :param prompt_summary: prompt after documents to summarize, if None then use internal defaults - For summarize, normal to have empty query (nothing added in ask anything in UI or empty string in API) - If pass query, template is "Focusing on %s, %s" % (query, prompt_summary) - If pass query and iinput, template is "Focusing on %s, %s, %s" % (query, iinput, prompt_summary) - :param add_chat_history_to_context: Include chat context when performing action - Not supported yet for openai_chat when using document collection instead of LLM - Also not supported when using CLI mode - :param add_search_to_context: Include web search in context as augmented prompt - :param context: Default context to use (for system pre-context in gradio UI) - context comes before chat_conversation and any document Q/A from text_context_list - :param iinput: Default input for instruction-based prompts - :param allow_upload_to_user_data: Whether to allow file uploads to update shared vector db (UserData or custom user dbs) - Ensure pass user_path for the files uploaded to be moved to this location for linking. - :param reload_langchain_state: Whether to reload langchain_modes.pkl file that contains any new user collections. - :param allow_upload_to_my_data: Whether to allow file uploads to update personal vector db - :param enable_url_upload: Whether to allow upload from URL - :param enable_text_upload: Whether to allow upload of text - :param enable_sources_list: Whether to allow list (or download for non-shared db) of list of sources for chosen db - :param chunk: Whether to chunk data (True unless know data is already optimally chunked) - :param chunk_size: Size of chunks, with typically top-4 passed to LLM, so needs to be in context length - :param top_k_docs: For langchain_action query: number of chunks to give LLM - -1 : auto-fills context up to max_seq_len - For langchain_action summarize: number of document parts, like pages for PDF. - There's no such thing as chunks for summarization. - -1 : auto-fills context up to max_seq_len - :param docs_ordering_type: - Type of ordering of docs. - 'best_first': Order by score so score is worst match near prompt - 'best_near_prompt' or 'reverse_sort' : reverse docs order so most relevant is closest to question. - Best choice for sufficiently smart model, and truncation occurs for oldest context, so best then too. - But smaller 6_9 models fail to use newest context and can get stuck on old information. - '' or None (i.e. default) or 'reverse_ucurve_sort' : Sort so most relevant is either near start or near end - Best to avoid "lost in middle" as well as avoid hallucinating off starting content that LLM focuses on alot. - :param auto_reduce_chunks: Whether to automatically reduce top_k_docs to fit context given prompt - :param max_chunks: If top_k_docs=-1, maximum number of chunks to allow - :param headsize: Maximum number of characters for head of document document for UI to show - :param n_jobs: Number of processors to use when consuming documents (-1 = all, is default) - - :param use_unstructured: Enable unstructured URL loader - :param use_playwright: Enable PlayWright URL loader - :param use_selenium: Enable Selenium URL loader - - :param use_pymupdf: enable PyMUPDF 'auto' means use first, use others if they are 'auto' if no result - :param use_unstructured_pdf: enable Unstructured PDF loader, 'auto' means use if pymupdf fails to get doc result - :param use_pypdf: enable PyPDF loader 'auto' means use if unstructured fails to get doc result - :param enable_pdf_ocr: 'auto' means only use OCR if normal text extraction fails. Useful for pure image-based PDFs with text. - if enable_pdf_doctr == 'on' then don't do. - 'on' means always do OCR as additional parsing of same documents - 'off' means don't do OCR (e.g. because it's slow even if 'auto' only would trigger if nothing else worked) - :param enable_pdf_doctr: Whether to support doctr on pdfs, 'auto' means use do if failed to get doc result so far - :param try_pdf_as_html: Try "PDF" as if HTML file, in case web link has .pdf extension but really is just HTML - - :param enable_ocr: Whether to support OCR on images - :param enable_doctr: Whether to support doctr on images (using OCR better than enable_ocr=True) - :param enable_pix2struct: Whether to support pix2struct on images for captions - :param enable_captions: Whether to support captions using BLIP for image files as documents, - then preloads that model if pre_load_caption_model=True - - :param pre_load_caption_model: Whether to preload caption model, or load after forking parallel doc loader - parallel loading disabled if preload and have images, to prevent deadlocking on cuda context - Recommended if using larger caption model - :param captions_model: Which model to use for captions. - captions_model: str = "Salesforce/blip-image-captioning-base", # continue capable - captions_model: str = "Salesforce/blip2-flan-t5-xl", # question/answer capable, 16GB state - captions_model: str = "Salesforce/blip2-flan-t5-xxl", # question/answer capable, 60GB state - Note: opt-based blip2 are not permissive license due to opt and Meta license restrictions - Disabled for CPU since BLIP requires CUDA - :param caption_gpu: If support caption, then use GPU if exists - - :param doctr_gpu: If support doctr, then use GPU if exists - - :param jq_schema: control json loader - By default '.[]' ingests everything in brute-force way, but better to match your schema - See: https://python.langchain.com/docs/modules/data_connection/document_loaders/json#using-jsonloader - - :param max_quality: Choose maximum quality ingestion with all available parsers - Pro: Catches document when some default parsers would fail - Pro: Enables DocTR that has much better OCR than Tesseract - Con: Fills DB with results from all parsers, so similarity search gives redundant results - - :param enable_heap_analytics: Toggle telemetry. - :param heap_app_id: App ID for Heap, change to your ID. - :return: - """ - if base_model is None: - base_model = '' - if tokenizer_base_model is None: - tokenizer_base_model = '' - if lora_weights is None: - lora_weights = '' - if inference_server is None: - inference_server = '' - - # listen to env if set - model_lock = os.getenv('model_lock', str(model_lock)) - model_lock = ast.literal_eval(model_lock) - - chat_conversation = str_to_list(chat_conversation) - text_context_list = str_to_list(text_context_list) - - llamacpp_dict = str_to_dict(llamacpp_dict) - # add others to single dict - llamacpp_dict['model_path_llama'] = model_path_llama - llamacpp_dict['model_name_gptj'] = model_name_gptj - llamacpp_dict['model_name_gpt4all_llama'] = model_name_gpt4all_llama - llamacpp_dict['model_name_exllama_if_no_config'] = model_name_exllama_if_no_config - # if user overrides but doesn't set these: - if 'n_batch' not in llamacpp_dict: - llamacpp_dict['n_batch'] = 128 - if 'n_gpu_layers' not in llamacpp_dict: - llamacpp_dict['n_gpu_layers'] = 100 - if 'n_gqa' not in llamacpp_dict: - llamacpp_dict['n_gqa'] = 0 - - if os.environ.get('SERPAPI_API_KEY') is None and LangChainAgent.SEARCH.value in visible_langchain_agents: - visible_langchain_agents.remove(LangChainAgent.SEARCH.value) - - if model_lock: - assert gradio, "model_lock only supported for gradio=True" - assert not cli, "model_lock only supported for cli=False" - assert not (not cli and not gradio), "model_lock only supported for eval (cli=gradio=False)" - assert not base_model, "Don't specify model_lock and base_model" - assert not tokenizer_base_model, "Don't specify model_lock and tokenizer_base_model" - assert not lora_weights, "Don't specify model_lock and lora_weights" - assert not inference_server, "Don't specify model_lock and inference_server" - # assert not prompt_type, "Don't specify model_lock and prompt_type" - # assert not prompt_dict, "Don't specify model_lock and prompt_dict" - - n_jobs = int(os.getenv('n_jobs', str(n_jobs))) - is_hf = bool(int(os.getenv("HUGGINGFACE_SPACES", '0'))) - is_gpth2oai = bool(int(os.getenv("GPT_H2O_AI", '0'))) - is_public = is_hf or is_gpth2oai # multi-user case with fixed model and disclaimer - if is_public: - visible_tos_tab = visible_hosts_tab = True - if enforce_h2ogpt_api_key is None: - enforce_h2ogpt_api_key = True - else: - if enforce_h2ogpt_api_key is None: - enforce_h2ogpt_api_key = False - if isinstance(h2ogpt_api_keys, str) and not os.path.isfile(h2ogpt_api_keys): - h2ogpt_api_keys = str_to_list(h2ogpt_api_keys) - if memory_restriction_level is None: - memory_restriction_level = 2 if is_hf else 0 # 2 assumes run on 24GB consumer GPU - else: - assert 0 <= memory_restriction_level <= 3, "Bad memory_restriction_level=%s" % memory_restriction_level - if n_jobs == -1: - # if -1, assume hypercores, don't use, force user to pass n_jobs to be specific if not standard cores - n_jobs = max(1, os.cpu_count() // 2) - if is_public and os.getenv('n_jobs') is None: - n_jobs = min(n_jobs, max(1, min(os.cpu_count() // 2, 8))) - admin_pass = os.getenv("ADMIN_PASS") - # will sometimes appear in UI or sometimes actual generation, but maybe better than empty result - # but becomes unrecoverable sometimes if raise, so just be silent for now - raise_generate_gpu_exceptions = True - - rope_scaling = str_to_dict(rope_scaling) - - if isinstance(auth, str): - if auth.strip().startswith('['): - auth = str_to_list(auth) - if isinstance(auth, str) and auth: - auth_filename = auth - if not auth_filename: - auth_filename = "auth.json" - assert isinstance(auth, (str, list, tuple, type(None))), "Unknown type %s for auth=%s" % (type(auth), auth) - - # allow set token directly - use_auth_token = os.environ.get("HUGGING_FACE_HUB_TOKEN", use_auth_token) - allow_upload_to_user_data = bool( - int(os.environ.get("allow_upload_to_user_data", str(int(allow_upload_to_user_data))))) - allow_upload_to_my_data = bool(int(os.environ.get("allow_upload_to_my_data", str(int(allow_upload_to_my_data))))) - height = int(os.environ.get("HEIGHT", height)) - h2ocolors = bool(int(os.getenv('h2ocolors', h2ocolors))) - - # allow enabling langchain via ENV - # FIRST PLACE where LangChain referenced, but no imports related to it - langchain_modes = ast.literal_eval(os.environ.get("langchain_modes", str(langchain_modes))) - if not isinstance(langchain_modes, list): - langchain_modes = [] - # always allow DISABLED - if LangChainMode.DISABLED.value not in langchain_modes: - langchain_modes.append(LangChainMode.DISABLED.value) - if not have_langchain: - # only allow disabled, not even LLM that is langchain related - langchain_mode = LangChainMode.DISABLED.value - langchain_modes = [langchain_mode] - - # update - langchain_mode_paths = str_to_dict(langchain_mode_paths) - langchain_mode_types = str_to_dict(langchain_mode_types) - for lmode in [LangChainMode.GITHUB_H2OGPT.value, - LangChainMode.H2O_DAI_DOCS.value, - LangChainMode.WIKI.value, - LangChainMode.WIKI_FULL.value, - ]: - if lmode not in langchain_mode_types: - langchain_mode_types[lmode] = 'shared' - if lmode not in langchain_mode_paths: - langchain_mode_types[lmode] = '' - if user_path: - user_path = makedirs(user_path, use_base=True) - langchain_mode_paths['UserData'] = user_path - langchain_mode_paths['UserData'] = LangChainTypes.SHARED.value - - if is_public: - allow_upload_to_user_data = False - if LangChainMode.USER_DATA.value in langchain_modes: - langchain_modes.remove(LangChainMode.USER_DATA.value) - if max_raw_chunks is None: - max_raw_chunks = 30 if is_public else 1000000 - - # in-place, for non-scratch dbs - if allow_upload_to_user_data: - # always listen to CLI-passed user_path if passed - if user_path: - langchain_mode_paths['UserData'] = user_path - - assert langchain_action in langchain_actions, "Invalid langchain_action %s not in %s" % ( - langchain_action, langchain_actions) - assert len( - set(langchain_agents).difference(langchain_agents_list)) == 0, "Invalid langchain_agents %s" % langchain_agents - - # auto-set langchain_mode - langchain_mode = os.environ.get("LANGCHAIN_MODE", langchain_mode) - if have_langchain and langchain_mode is None: - # start in chat mode, in case just want to chat and don't want to get "No documents to query" by default. - if LangChainMode.LLM.value in langchain_modes: - langchain_mode = LangChainMode.LLM.value - elif len(langchain_modes) >= 1: - # infer even if don't pass which langchain_mode, just langchain_modes. - langchain_mode = langchain_modes[0] - if allow_upload_to_user_data and not is_public and langchain_mode_paths['UserData']: - if verbose: - print("Auto set langchain_mode=%s. Could use UserData instead." % langchain_mode, flush=True) - elif allow_upload_to_my_data: - if verbose: - print("Auto set langchain_mode=%s. Could use MyData instead." - " To allow UserData to pull files from disk," - " set user_path or langchain_mode_paths, and ensure allow_upload_to_user_data=True" % langchain_mode, - flush=True) - else: - raise RuntimeError("Please pass --langchain_mode= out of %s" % langchain_modes) - if not have_langchain and langchain_mode not in [None, LangChainMode.DISABLED.value, LangChainMode.LLM.value]: - raise RuntimeError("Asked for LangChain mode but langchain python package cannot be found.") - if langchain_mode is None: - # if not set yet, disable - langchain_mode = LangChainMode.DISABLED.value - print("Auto set langchain_mode=%s Have langchain package: %s" % (langchain_mode, have_langchain), flush=True) - # go ahead and add - if langchain_mode not in langchain_modes: - langchain_modes.append(langchain_mode) - - if is_public: - allow_upload_to_user_data = False - input_lines = 1 # ensure set, for ease of use - temperature = 0.2 if temperature is None else temperature - top_p = 0.85 if top_p is None else top_p - top_k = 70 if top_k is None else top_k - if is_hf: - do_sample = True if do_sample is None else do_sample - top_k_docs = 3 if top_k_docs is None else top_k_docs - else: - # by default don't sample, too chatty - do_sample = False if do_sample is None else do_sample - top_k_docs = 4 if top_k_docs is None else top_k_docs - - if memory_restriction_level == 2: - if not base_model and not inference_server and not model_lock: - base_model = 'h2oai/h2ogpt-oasst1-512-12b' - # don't set load_8bit if passed base_model, doesn't always work so can't just override - load_8bit = True - load_4bit = False # FIXME - consider using 4-bit instead of 8-bit - elif not inference_server: - top_k_docs = 10 if top_k_docs is None else top_k_docs - if memory_restriction_level >= 2: - load_8bit = True - load_4bit = False # FIXME - consider using 4-bit instead of 8-bit - if hf_embedding_model is None: - hf_embedding_model = "sentence-transformers/all-MiniLM-L6-v2" - top_k_docs = 3 if top_k_docs is None else top_k_docs - if top_k_docs is None: - top_k_docs = 3 - if is_public: - if not max_time: - max_time = 60 * 2 - if not max_max_time: - max_max_time = max_time - if not max_new_tokens: - max_new_tokens = 256 - if not max_max_new_tokens: - max_max_new_tokens = 512 - else: - if not max_max_time: - max_max_time = 60 * 20 - if not max_max_new_tokens: - max_max_new_tokens = 1024 - if is_hf: - # must override share if in spaces - share = False - if not max_time: - max_time = 60 * 1 - if not max_max_time: - max_max_time = max_time - # HF accounted for later in get_max_max_new_tokens() - save_dir = os.getenv('SAVE_DIR', save_dir) - save_dir = makedirs(save_dir, exist_ok=True, tmp_ok=True, use_base=True) - score_model = os.getenv('SCORE_MODEL', score_model) - if str(score_model) == 'None': - score_model = '' - concurrency_count = int(os.getenv('CONCURRENCY_COUNT', concurrency_count)) - api_open = bool(int(os.getenv('API_OPEN', str(int(api_open))))) - allow_api = bool(int(os.getenv('ALLOW_API', str(int(allow_api))))) - - n_gpus = torch.cuda.device_count() if torch.cuda.is_available() else 0 - n_gpus, gpu_ids = cuda_vis_check(n_gpus) - - if load_half is None and t5_type(base_model): - load_half = False - print("load_half=%s auto-set for %s to avoid bad generation" % (load_half, base_model), flush=True) - - if n_gpus == 0 or get_device() == "mps": - # No CUDA GPUs usable - - if get_device() != "mps": - print("No GPUs detected", flush=True) - - enable_captions = False - gpu_id = None - load_8bit = False - load_4bit = False - low_bit_mode = 1 - if load_half is None: - # wouldn't work if specified True, but respect - load_half = False - load_gptq = '' - load_exllama = False - use_gpu_id = False - if get_device() == "cuda": - torch.backends.cudnn.benchmark = True - torch.backends.cudnn.enabled = False - torch.set_default_dtype(torch.float32) - if is_public and not inference_server and not model_lock: - # 12B uses ~94GB - # 6.9B uses ~47GB - base_model = 'h2oai/h2ogpt-oig-oasst1-512-6_9b' if not base_model else base_model - if hf_embedding_model is None: - # if no GPUs, use simpler embedding model to avoid cost in time - hf_embedding_model = "sentence-transformers/all-MiniLM-L6-v2" - if score_model == 'auto': - score_model = '' - else: - if load_half is None: - load_half = True - # CUDA GPUs visible - if score_model == 'auto': - if n_gpus >= 2: - # will by default place scoring model on last GPU - score_model = 'OpenAssistant/reward-model-deberta-v3-large-v2' - else: - score_model = '' - if hf_embedding_model is None: - # if still None, then set default - hf_embedding_model = 'hkunlp/instructor-large' - - # get defaults - if base_model: - model_lower = base_model.lower() - elif model_lock: - # have 0th model be thought of as normal model - assert len(model_lock) > 0 and model_lock[0]['base_model'] - model_lower = model_lock[0]['base_model'].lower() - else: - model_lower = '' - if not gradio: - # force, else not single response like want to look at - stream_output = False - # else prompt removal can mess up output - chat = False - # hard-coded defaults - first_para = False - text_limit = None - - if compile_model is None: - # too avoid noisy CLI - compile_model = not cli - - if offload_folder: - offload_folder = makedirs(offload_folder, exist_ok=True, tmp_ok=True, use_base=True) - - # defaults - caption_loader = None - doctr_loader = None - pix2struct_loader = None - - image_loaders_options0, image_loaders_options, \ - pdf_loaders_options0, pdf_loaders_options, \ - url_loaders_options0, url_loaders_options = lg_to_gr(**locals()) - jq_schema0 = jq_schema - # transcribe - image_loaders = image_loaders_options0 - pdf_loaders = pdf_loaders_options0 - url_loaders = url_loaders_options0 - - placeholder_instruction, placeholder_input, \ - stream_output, show_examples, \ - prompt_type, prompt_dict, \ - temperature, top_p, top_k, num_beams, \ - max_new_tokens, min_new_tokens, early_stopping, max_time, \ - repetition_penalty, num_return_sequences, \ - do_sample, \ - src_lang, tgt_lang, \ - examples, \ - task_info = \ - get_generate_params(model_lower, - chat, - stream_output, show_examples, - prompt_type, prompt_dict, - system_prompt, - pre_prompt_query, prompt_query, - pre_prompt_summary, prompt_summary, - temperature, top_p, top_k, num_beams, - max_new_tokens, min_new_tokens, early_stopping, max_time, - repetition_penalty, num_return_sequences, - do_sample, - top_k_docs, - chunk, - chunk_size, - image_loaders, - pdf_loaders, - url_loaders, - jq_schema, - docs_ordering_type, - min_max_new_tokens, - verbose, - ) - - git_hash = get_githash() if is_public or os.getenv('GET_GITHASH') else "GET_GITHASH" - locals_dict = locals() - locals_print = '\n'.join(['%s: %s' % (k, v) for k, v in locals_dict.items()]) - if verbose: - print(f"Generating model with params:\n{locals_print}", flush=True) - print("Command: %s\nHash: %s" % (str(' '.join(sys.argv)), git_hash), flush=True) - - if langchain_mode != LangChainMode.DISABLED.value: - # SECOND PLACE where LangChain referenced, but all imports are kept local so not required - from gpt_langchain import prep_langchain, get_some_dbs_from_hf, get_persist_directory - if is_hf: - get_some_dbs_from_hf() - dbs = {} - for langchain_mode1 in langchain_modes: - langchain_type = langchain_mode_types.get(langchain_mode1, LangChainTypes.EITHER.value) - if langchain_type == LangChainTypes.PERSONAL.value: - # shouldn't prepare per-user databases here - continue - persist_directory1, langchain_type = get_persist_directory(langchain_mode1, langchain_type=langchain_type) - langchain_mode_types[langchain_mode1] = langchain_type - if langchain_type == LangChainTypes.PERSONAL.value: - # shouldn't prepare per-user databases here - continue - try: - db = prep_langchain(persist_directory1, - load_db_if_exists, - db_type, use_openai_embedding, - langchain_mode1, langchain_mode_paths, langchain_mode_types, - hf_embedding_model, - migrate_embedding_model, - auto_migrate_db, - kwargs_make_db=locals(), - verbose=verbose) - finally: - # in case updated embeddings or created new embeddings - clear_torch_cache() - dbs[langchain_mode1] = db - # remove None db's so can just rely upon k in dbs for if hav db - dbs = {k: v for k, v in dbs.items() if v is not None} - else: - dbs = {} - # import control - if os.environ.get("TEST_LANGCHAIN_IMPORT"): - assert 'gpt_langchain' not in sys.modules, "Dev bug, import of langchain when should not have" - assert 'langchain' not in sys.modules, "Dev bug, import of langchain when should not have" - - other_model_state_defaults = dict(load_8bit=load_8bit, load_4bit=load_4bit, low_bit_mode=low_bit_mode, - load_half=load_half, - load_gptq=load_gptq, load_exllama=load_exllama, use_safetensors=use_safetensors, - revision=revision, use_gpu_id=use_gpu_id, gpu_id=gpu_id, - compile_model=compile_model, - use_cache=use_cache, - llamacpp_dict=llamacpp_dict, model_path_llama=model_path_llama, - model_name_gptj=model_name_gptj, - model_name_gpt4all_llama=model_name_gpt4all_llama, - model_name_exllama_if_no_config=model_name_exllama_if_no_config, - ) - model_state_none = dict(model=None, tokenizer=None, device=None, - base_model=None, tokenizer_base_model=None, lora_weights=None, - inference_server=None, prompt_type=None, prompt_dict=None, - visible_models=None, h2ogpt_key=None, - ) - model_state_none.update(other_model_state_defaults) - my_db_state0 = {LangChainMode.MY_DATA.value: [None, None, None]} - selection_docs_state0 = dict(langchain_modes=langchain_modes, - langchain_mode_paths=langchain_mode_paths, - langchain_mode_types=langchain_mode_types) - selection_docs_state = copy.deepcopy(selection_docs_state0) - - if cli or not gradio: - # initial state for query prompt - model_name = base_model - pre_prompt_query, prompt_query, pre_prompt_summary, prompt_summary = \ - get_langchain_prompts(pre_prompt_query, prompt_query, - pre_prompt_summary, prompt_summary, - model_name, inference_server, - model_path_llama) - - if cli: - from cli import run_cli - return run_cli(**get_kwargs(run_cli, exclude_names=['model_state0'], **locals())) - elif not gradio: - from eval import run_eval - return run_eval(**get_kwargs(run_eval, exclude_names=['model_state0'], **locals())) - elif gradio or prepare_offline_level > 0: - # imported here so don't require gradio to run generate - from gradio_runner import go_gradio - - # get default model - model_states = [] - model_list = [dict(base_model=base_model, tokenizer_base_model=tokenizer_base_model, lora_weights=lora_weights, - inference_server=inference_server, prompt_type=prompt_type, prompt_dict=prompt_dict, - visible_models=None, h2ogpt_key=None)] - model_list[0].update(other_model_state_defaults) - # FIXME: hyper per model, not about model loading - # for k in gen_hyper: - # model_list[k] = locals()[k] - - model_list0 = copy.deepcopy(model_list) # just strings, safe to deepcopy - model_state0 = model_state_none.copy() - assert len(model_state_none) == len(model_state0) - if model_lock: - model_list = model_lock - # do reverse, so first is default base_model etc., so some logic works in go_gradio() more easily - for model_dict in reversed(model_list): - # handle defaults user didn't have to pass - # special defaults, ignore defaults for these if not specifically set, replace with '' - model_dict['base_model'] = model_dict.get('base_model', '') - model_dict['tokenizer_base_model'] = model_dict.get('tokenizer_base_model', '') - model_dict['lora_weights'] = model_dict.get('lora_weights', '') - model_dict['inference_server'] = model_dict.get('inference_server', '') - if prepare_offline_level >= 2: - if 'openai' not in model_dict['inference_server'] and 'replicate' not in model_dict['inference_server']: - # assume want locally, but OpenAI and replicate are never local for model part - model_dict['inference_server'] = '' - prompt_type_infer = not model_dict.get('prompt_type') - model_dict['prompt_type'] = model_dict.get('prompt_type', - model_list0[0]['prompt_type']) # don't use mutated value - # rest of generic defaults - for k in model_list0[0]: - if k not in model_dict: - model_dict[k] = model_list0[0][k] - - # begin prompt adjustments - # get query prompt for (say) last base model if using model lock - pre_prompt_query1, prompt_query1, pre_prompt_summary1, prompt_summary1 = ( - get_langchain_prompts(pre_prompt_query, prompt_query, - pre_prompt_summary, prompt_summary, - model_dict['base_model'], - model_dict['inference_server'], - model_dict['model_path_llama'])) - # if mixed setup, choose non-empty so best models best - # FIXME: Make per model dict passed through to evaluate - pre_prompt_query = pre_prompt_query or pre_prompt_query1 - prompt_query = prompt_query or prompt_query1 - pre_prompt_summary = pre_prompt_summary or pre_prompt_summary1 - prompt_summary = prompt_summary or prompt_summary1 - - # try to infer, ignore empty initial state leading to get_generate_params -> 'plain' - if prompt_type_infer: - model_lower1 = model_dict['base_model'].lower() - if model_lower1 in inv_prompt_type_to_model_lower: - model_dict['prompt_type'] = inv_prompt_type_to_model_lower[model_lower1] - model_dict['prompt_dict'], error0 = get_prompt(model_dict['prompt_type'], '', - chat=False, context='', reduced=False, - making_context=False, - return_dict=True, - system_prompt=system_prompt) - else: - model_dict['prompt_dict'] = prompt_dict - else: - model_dict['prompt_dict'] = prompt_dict - model_dict['prompt_dict'] = model_dict.get('prompt_dict', model_dict['prompt_dict']) - # end prompt adjustments - all_kwargs = locals().copy() - all_kwargs.update(model_dict) - if model_dict['base_model'] and not login_mode_if_model0: - model0, tokenizer0, device = get_model(reward_type=False, - **get_kwargs(get_model, exclude_names=['reward_type'], - **all_kwargs)) - else: - # if empty model, then don't load anything, just get gradio up - model0, tokenizer0, device = None, None, None - if model0 is None: - if fail_if_cannot_connect: - raise RuntimeError("Could not connect, see logs") - # skip - if isinstance(model_lock, list): - model_lock.remove(model_dict) - continue - model_state_trial = dict(model=model0, tokenizer=tokenizer0, device=device) - model_state_trial.update(model_dict) - diff_keys = set(list(model_state_none.keys())).symmetric_difference(model_state_trial.keys()) - assert len(model_state_none) == len(model_state_trial), diff_keys - print("Model %s" % model_dict, flush=True) - if model_lock: - # last in iteration will be first - model_states.insert(0, model_state_trial) - # fill model_state0 so go_gradio() easier, manage model_states separately - model_state0 = model_state_trial.copy() - else: - model_state0 = model_state_trial.copy() - assert len(model_state_none) == len(model_state0) - - visible_models = str_to_list(visible_models, allow_none=True) # None means first model - all_models = [x.get('base_model', xi) for xi, x in enumerate(model_states)] - visible_models_state0 = [x.get('base_model', xi) for xi, x in enumerate(model_states) if - visible_models is None or - x.get('base_model', xi) in visible_models or - xi in visible_models] - - # update to be consistent with what is passed from CLI and model chose - # do after go over all models if multi-model, so don't contaminate - # This is just so UI shows reasonable correct value, not 2048 dummy value - if len(model_states) >= 1: - max_seq_len = model_states[0]['tokenizer'].model_max_length - - # get score model - all_kwargs = locals().copy() - smodel, stokenizer, sdevice = get_score_model(reward_type=True, - **get_kwargs(get_score_model, exclude_names=['reward_type'], - **all_kwargs)) - score_model_state0 = dict(model=smodel, tokenizer=stokenizer, device=sdevice, - base_model=score_model, tokenizer_base_model='', lora_weights='', - inference_server='', prompt_type='', prompt_dict='', - visible_models=None, h2ogpt_key=None) - - if enable_captions: - if pre_load_caption_model: - from image_captions import H2OImageCaptionLoader - caption_loader = H2OImageCaptionLoader(caption_gpu=caption_gpu).load_model() - else: - caption_loader = 'gpu' if n_gpus > 0 and caption_gpu else 'cpu' - else: - caption_loader = False - - if pre_load_embedding_model and \ - langchain_mode != LangChainMode.DISABLED.value and \ - not use_openai_embedding: - from src.gpt_langchain import get_embedding - hf_embedding_model = dict(name=hf_embedding_model, - model=get_embedding(use_openai_embedding, hf_embedding_model=hf_embedding_model, - preload=True)) - if enable_doctr or enable_pdf_ocr in [True, 'auto', 'on']: - doctr_loader = 'gpu' if n_gpus > 0 and doctr_gpu else 'cpu' - else: - doctr_loader = False - - # assume gradio needs everything - go_gradio(**locals()) - - -def get_config(base_model, - use_auth_token=False, - trust_remote_code=True, - offload_folder=None, - revision=None, - rope_scaling=None, - triton_attn=False, - long_sequence=True, - return_model=False, - raise_exception=False, - max_seq_len=None, - verbose=False, - ): - from accelerate import init_empty_weights - with init_empty_weights(): - from transformers import AutoConfig - try: - config = AutoConfig.from_pretrained(base_model, use_auth_token=use_auth_token, - trust_remote_code=trust_remote_code, - offload_folder=offload_folder, - revision=revision, - rope_scaling=rope_scaling if rope_scaling else None) - except OSError as e: - if raise_exception: - raise - if 'not a local folder and is not a valid model identifier listed on' in str( - e) or '404 Client Error' in str(e) or "couldn't connect" in str(e): - # e.g. llama, gpjt, etc. - # e.g. HF TGI but not model on HF or private etc. - if max_seq_len is None and base_model.lower() in non_hf_types: - print("Could not determine --max_seq_len, setting to 2048. Pass if not correct", flush=True) - max_seq_len = 2048 - # HF TGI server only should really require prompt_type, not HF model state - return None, None, max_seq_len - else: - raise - if triton_attn and 'mpt-' in base_model.lower(): - config.attn_config['attn_impl'] = 'triton' - if long_sequence: - if 'mpt-7b-storywriter' in base_model.lower(): - config.update({"max_seq_len": 83968}) - if 'mosaicml/mpt-7b-chat' in base_model.lower(): - config.update({"max_seq_len": 4096}) - if 'mpt-30b' in base_model.lower(): - config.update({"max_seq_len": 2 * 8192}) - if return_model and \ - issubclass(config.__class__, tuple(AutoModel._model_mapping.keys())): - model = AutoModel.from_config( - config, - trust_remote_code=trust_remote_code, - ) - else: - # can't infer - model = None - if 'falcon' in base_model.lower(): - config.use_cache = False - - # allow override - if max_seq_len is not None: - print("Overriding max_seq_len -> %d" % max_seq_len, flush=True) - else: - if hasattr(config, 'max_seq_len'): - max_seq_len = int(config.max_seq_len) - elif hasattr(config, 'max_position_embeddings') and isinstance(config.max_position_embeddings, int): - # help automatically limit inputs to generate - max_seq_len = config.max_position_embeddings - if verbose: - print("Used max_position_embeddings=%s as base model (pre-rope) max_seq_len." - " If not desired, pass --max_seq_len and set to some integer value." % config.max_position_embeddings, - flush=True) - elif hasattr(config, 'n_ctx'): - # e.g. gpt2 - max_seq_len = int(config.n_ctx) - else: - print("Could not determine --max_seq_len, setting to 2048. Pass if not correct", flush=True) - max_seq_len = 2048 - # FIXME: - # raise RuntimeError("Could not determine max_seq_len," - # " please pass --max_seq_len and set to some value, e.g. 2048.") - - if rope_scaling: - if rope_scaling.get('factor'): - # HF transformers - max_seq_len *= rope_scaling.get('factor') - elif rope_scaling.get('alpha_value'): - # exllama - # Note: exllama's own tokenizer has this set correctly in loaders.py, this config will be unused - max_seq_len *= rope_scaling.get('alpha_value') - print("Automatically setting max_seq_len=%d for RoPE scaling" % max_seq_len, flush=True) - - return config, model, max_seq_len - - -def get_non_lora_model(base_model, model_loader, load_half, - load_gptq, - load_exllama, - use_safetensors, - revision, - model_kwargs, reward_type, - config, model, - gpu_id=0, - ): - """ - Ensure model gets on correct device - """ - - if model is not None: - # NOTE: Can specify max_memory={0: max_mem, 1: max_mem}, to shard model - # NOTE: Some models require avoiding sharding some layers, - # then would pass no_split_module_classes and give list of those layers. - from accelerate import infer_auto_device_map - device_map = infer_auto_device_map( - model, - dtype=torch.float16 if load_half else torch.float32, - ) - if hasattr(model, 'model'): - device_map_model = infer_auto_device_map( - model.model, - dtype=torch.float16 if load_half else torch.float32, - ) - device_map.update(device_map_model) - else: - device_map = "auto" - - n_gpus = torch.cuda.device_count() if torch.cuda.is_available() else 0 - n_gpus, gpu_ids = cuda_vis_check(n_gpus) - - if n_gpus > 0: - if gpu_id >= 0: - # FIXME: If really distributes model, tend to get things like: ValueError: gpt_neox.embed_in.weight doesn't have any device set. - # So avoid for now, just put on first GPU, unless score_model, put on last - if reward_type: - device_map = {'': n_gpus - 1} - else: - device_map = {'': min(n_gpus - 1, gpu_id)} - if gpu_id == -1: - device_map = {'': 'cuda'} - else: - device_map = {'': 'cpu'} - model_kwargs['load_in_8bit'] = False - model_kwargs['load_in_4bit'] = False - print('device_map: %s' % device_map, flush=True) - - load_in_8bit = model_kwargs.get('load_in_8bit', False) - load_in_4bit = model_kwargs.get('load_in_4bit', False) - model_kwargs['device_map'] = device_map - model_kwargs['use_safetensors'] = use_safetensors - model_kwargs['revision'] = revision - pop_unused_model_kwargs(model_kwargs) - - if load_exllama: - model = model_loader - elif load_gptq: - if 'Llama-2-70B-chat-GPTQ' in base_model: - model_kwargs.update(dict(inject_fused_attention=False)) - model_kwargs.pop('torch_dtype', None) - model_kwargs.pop('device_map') - model = model_loader( - model_name_or_path=base_model, - model_basename=load_gptq, - **model_kwargs, - ) - elif load_in_8bit or load_in_4bit or not load_half: - model = model_loader( - base_model, - config=config, - **model_kwargs, - ) - else: - - model = model_loader( - base_model, - config=config, - **model_kwargs, - ) - if not getattr(model, "is_quantized", False): - model = model.half() - return model - - -def get_client_from_inference_server(inference_server, base_model=None, raise_connection_exception=False): - inference_server, headers = get_hf_server(inference_server) - # preload client since slow for gradio case especially - from gradio_utils.grclient import GradioClient - gr_client = None - hf_client = None - if headers is None: - try: - print("GR Client Begin: %s %s" % (inference_server, base_model), flush=True) - # first do sanity check if alive, else gradio client takes too long by default - requests.get(inference_server, timeout=int(os.getenv('REQUEST_TIMEOUT', '30'))) - gr_client = GradioClient(inference_server) - print("GR Client End: %s" % inference_server, flush=True) - except (OSError, ValueError) as e: - # Occurs when wrong endpoint and should have been HF client, so don't hard raise, just move to HF - gr_client = None - print("GR Client Failed %s %s: %s" % (inference_server, base_model, str(e)), flush=True) - except (ConnectTimeoutError, ConnectTimeout, MaxRetryError, ConnectionError, ConnectionError2, - JSONDecodeError, ReadTimeout2, KeyError) as e: - t, v, tb = sys.exc_info() - ex = ''.join(traceback.format_exception(t, v, tb)) - print("GR Client Failed %s %s: %s" % (inference_server, base_model, str(ex)), flush=True) - if raise_connection_exception: - raise - - if gr_client is None: - res = None - from text_generation import Client as HFClient - print("HF Client Begin: %s %s" % (inference_server, base_model)) - try: - hf_client = HFClient(inference_server, headers=headers, timeout=int(os.getenv('REQUEST_TIMEOUT', '30'))) - # quick check valid TGI endpoint - res = hf_client.generate('What?', max_new_tokens=1) - hf_client = HFClient(inference_server, headers=headers, timeout=300) - except (ConnectTimeoutError, ConnectTimeout, MaxRetryError, ConnectionError, ConnectionError2, - JSONDecodeError, ReadTimeout2, KeyError) as e: - hf_client = None - t, v, tb = sys.exc_info() - ex = ''.join(traceback.format_exception(t, v, tb)) - print("HF Client Failed %s %s: %s" % (inference_server, base_model, str(ex))) - if raise_connection_exception: - raise - print("HF Client End: %s %s : %s" % (inference_server, base_model, res)) - return inference_server, gr_client, hf_client - - -def get_model( - load_8bit: bool = False, - load_4bit: bool = False, - low_bit_mode: int = 1, - load_half: bool = True, - load_gptq: str = '', - load_exllama: bool = False, - use_safetensors: bool = False, - revision: str = None, - use_gpu_id: bool = True, - base_model: str = '', - inference_server: str = "", - tokenizer_base_model: str = '', - lora_weights: str = "", - gpu_id: int = 0, - n_jobs=None, - - reward_type: bool = None, - local_files_only: bool = False, - resume_download: bool = True, - use_auth_token: Union[str, bool] = False, - trust_remote_code: bool = True, - offload_folder: str = None, - rope_scaling: dict = None, - max_seq_len: int = None, - compile_model: bool = True, - llamacpp_dict=None, - - verbose: bool = False, -): - """ - - :param load_8bit: load model in 8-bit, not supported by all models - :param load_4bit: load model in 4-bit, not supported by all models - :param low_bit_mode: See gen.py - :param load_half: load model in 16-bit - :param load_gptq: GPTQ model_basename - :param load_exllama: whether to use exllama - :param use_safetensors: use safetensors file - :param revision: - :param use_gpu_id: Use torch infer of optimal placement of layers on devices (for non-lora case) - For non-LORA case, False will spread shards across multiple GPUs, but this can lead to cuda:x cuda:y mismatches - So it is not the default - :param base_model: name/path of base model - :param inference_server: whether base_model is hosted locally ('') or via http (url) - :param tokenizer_base_model: name/path of tokenizer - :param lora_weights: name/path - :param gpu_id: which GPU (0..n_gpus-1) or allow all GPUs if relevant (-1) - :param n_jobs: number of cores to use (e.g. for llama CPU model) - :param reward_type: reward type model for sequence classification - :param local_files_only: use local files instead of from HF - :param resume_download: resume downloads from HF - :param use_auth_token: assumes user did on CLI `huggingface-cli login` to access private repo - :param trust_remote_code: trust code needed by model - :param offload_folder: offload folder - :param rope_scaling: scaling for rope-based models, e.g. "{'type':'dynamic', 'factor':4}" - :param max_seq_len: override for maximum sequence length for model - :param max_seq_len: if set, use as max_seq_len for model - :param compile_model: whether to compile torch model - :param llamacpp_dict: dict of llama.cpp and GPT4All model options - :param verbose: - :return: - """ - print("Starting get_model: %s %s" % (base_model, inference_server), flush=True) - - triton_attn = False - long_sequence = True - config_kwargs = dict(use_auth_token=use_auth_token, - trust_remote_code=trust_remote_code, - offload_folder=offload_folder, - rope_scaling=rope_scaling, - triton_attn=triton_attn, - long_sequence=long_sequence, - revision=revision, - max_seq_len=max_seq_len, - verbose=verbose) - config, _, max_seq_len = get_config(base_model, **config_kwargs, raise_exception=False) - - if base_model in non_hf_types: - assert config is None, "Expected config None for %s" % base_model - - llama_type_from_config = 'llama' in str(config).lower() - llama_type_from_name = "llama" in base_model.lower() - llama_type = llama_type_from_config or llama_type_from_name - if "xgen" in base_model.lower() or 'llama2' in base_model.lower() or 'llama-2' in base_model.lower(): - llama_type = False - if llama_type: - if verbose: - print("Detected as llama type from" - " config (%s) or name (%s)" % (llama_type_from_config, llama_type_from_name), flush=True) - - model_name_exllama_if_no_config = '' if not llamacpp_dict else llamacpp_dict.get('model_name_exllama_if_no_config', - '') - model_loader, tokenizer_loader, conditional_type = ( - get_loaders(model_name=base_model, reward_type=reward_type, llama_type=llama_type, - load_gptq=load_gptq, load_exllama=load_exllama, config=config, - rope_scaling=rope_scaling, max_seq_len=max_seq_len, - model_name_exllama_if_no_config=model_name_exllama_if_no_config)) - - tokenizer_kwargs = dict(local_files_only=local_files_only, - resume_download=resume_download, - use_auth_token=use_auth_token, - trust_remote_code=trust_remote_code, - offload_folder=offload_folder, - revision=revision, - padding_side='left', - config=config, - ) - if not tokenizer_base_model: - tokenizer_base_model = base_model - - if load_exllama: - tokenizer = tokenizer_loader - elif config is not None and tokenizer_loader is not None and not isinstance(tokenizer_loader, str): - if load_exllama: - tokenizer = tokenizer_loader - else: - tokenizer = tokenizer_loader.from_pretrained(tokenizer_base_model, **tokenizer_kwargs) - # sets raw (no cushion) limit - # If using RoPE with scaling, then for non-exllama models (e.g. HF models), - # then config -> tokenizer will set model_max_length correctly - set_model_max_len(max_seq_len, tokenizer, verbose=False) - # if using fake tokenizer, not really accurate when lots of numbers, give a bit of buffer, else get: - # Generation Failed: Input validation error: `inputs` must have less than 2048 tokens. Given: 2233 - tokenizer.model_max_length = tokenizer.model_max_length - 50 - else: - tokenizer = None - - if isinstance(inference_server, str) and inference_server.startswith("http"): - inference_server, gr_client, hf_client = get_client_from_inference_server(inference_server, - base_model=base_model) - client = gr_client or hf_client - # Don't return None, None for model, tokenizer so triggers - if tokenizer is None: - # FIXME: Could use only tokenizer from llama etc. but hard to detatch from model, just use fake for now - if os.getenv("HARD_ASSERTS") and base_model not in non_hf_types: - raise RuntimeError("Unexpected tokenizer=None") - tokenizer = FakeTokenizer() - return client, tokenizer, 'http' - if isinstance(inference_server, str) and ( - inference_server.startswith('openai') or - inference_server.startswith('vllm') or - inference_server.startswith('replicate') or - inference_server.startswith('sagemaker') - ): - if inference_server.startswith('openai'): - assert os.getenv('OPENAI_API_KEY'), "Set environment for OPENAI_API_KEY" - # Don't return None, None for model, tokenizer so triggers - # include small token cushion - max_seq_len = model_token_mapping[base_model] - if inference_server.startswith('replicate'): - assert len(inference_server.split(':')) >= 3, "Expected replicate:model string, got %s" % inference_server - assert os.getenv('REPLICATE_API_TOKEN'), "Set environment for REPLICATE_API_TOKEN" - assert max_seq_len is not None, "Please pass --max_seq_len= for replicate models." - try: - import replicate as replicate_python - except ImportError: - raise ImportError( - "Could not import replicate python package. " - "Please install it with `pip install replicate`." - ) - if inference_server.startswith('sagemaker'): - assert len( - inference_server.split( - ':')) >= 3, "Expected sagemaker_chat::, got %s" % inference_server - assert os.getenv('AWS_ACCESS_KEY_ID'), "Set environment for AWS_ACCESS_KEY_ID" - assert os.getenv('AWS_SECRET_ACCESS_KEY'), "Set environment for AWS_SECRET_ACCESS_KEY" - # Don't return None, None for model, tokenizer so triggers - # include small token cushion - if inference_server.startswith('openai') or tokenizer is None: - # don't use fake (tiktoken) tokenizer for vLLM//replicate if know actual model with actual tokenizer - tokenizer = FakeTokenizer(model_max_length=max_seq_len - 50) - return inference_server, tokenizer, inference_server - assert not inference_server, "Malformed inference_server=%s" % inference_server - if base_model in non_hf_types: - from gpt4all_llm import get_model_tokenizer_gpt4all - model, tokenizer, device = get_model_tokenizer_gpt4all(base_model, n_jobs=n_jobs, - max_seq_len=max_seq_len, - llamacpp_dict=llamacpp_dict) - return model, tokenizer, device - if load_exllama: - return model_loader, tokenizer, 'cuda' - - # get local torch-HF model - return get_hf_model(load_8bit=load_8bit, - load_4bit=load_4bit, - low_bit_mode=low_bit_mode, - load_half=load_half, - load_gptq=load_gptq, - use_safetensors=use_safetensors, - revision=revision, - use_gpu_id=use_gpu_id, - base_model=base_model, - tokenizer_base_model=tokenizer_base_model, - lora_weights=lora_weights, - gpu_id=gpu_id, - - reward_type=reward_type, - local_files_only=local_files_only, - resume_download=resume_download, - use_auth_token=use_auth_token, - trust_remote_code=trust_remote_code, - offload_folder=offload_folder, - rope_scaling=rope_scaling, - compile_model=compile_model, - - llama_type=llama_type, - config_kwargs=config_kwargs, - tokenizer_kwargs=tokenizer_kwargs, - - verbose=verbose) - - -def get_hf_model(load_8bit: bool = False, - load_4bit: bool = False, - low_bit_mode: int = 1, - load_half: bool = True, - load_gptq: str = '', - use_safetensors: bool = False, - revision: str = None, - use_gpu_id: bool = True, - base_model: str = '', - tokenizer_base_model: str = '', - lora_weights: str = "", - gpu_id: int = 0, - - reward_type: bool = None, - local_files_only: bool = False, - resume_download: bool = True, - use_auth_token: Union[str, bool] = False, - trust_remote_code: bool = True, - offload_folder: str = None, - rope_scaling: dict = None, - compile_model: bool = True, - - llama_type: bool = False, - config_kwargs=None, - tokenizer_kwargs=None, - - verbose: bool = False, - ): - assert config_kwargs is not None - assert tokenizer_kwargs is not None - - load_exllama = False # Never should be in HF code for exllama - - if lora_weights is not None and lora_weights.strip(): - if verbose: - print("Get %s lora weights" % lora_weights, flush=True) - device = get_device() - - if 'gpt2' in base_model.lower(): - # RuntimeError: where expected condition to be a boolean tensor, but got a tensor with dtype Half - load_8bit = False - load_4bit = False - - assert base_model.strip(), ( - "Please choose a base model with --base_model (CLI) or load one from Models Tab (gradio)" - ) - - model_loader, tokenizer_loader, conditional_type = ( - get_loaders(model_name=base_model, reward_type=reward_type, llama_type=llama_type, - load_gptq=load_gptq, load_exllama=load_exllama)) - - config, _, max_seq_len = get_config(base_model, return_model=False, raise_exception=True, **config_kwargs) - - if tokenizer_loader is not None and not isinstance(tokenizer_loader, str): - if load_exllama: - tokenizer = tokenizer_loader - else: - tokenizer = tokenizer_loader.from_pretrained(tokenizer_base_model, - **tokenizer_kwargs) - else: - tokenizer = tokenizer_loader - - if isinstance(tokenizer, str): - # already a pipeline, tokenizer_loader is string for task - model = model_loader(tokenizer, - model=base_model, - device=0 if device == "cuda" else -1, - torch_dtype=torch.float16 if device == 'cuda' else torch.float32) - else: - assert device in ["cuda", "cpu", "mps"], "Unsupported device %s" % device - model_kwargs = dict(local_files_only=local_files_only, - torch_dtype=torch.float16 if device == 'cuda' else torch.float32, - resume_download=resume_download, - use_auth_token=use_auth_token, - trust_remote_code=trust_remote_code, - offload_folder=offload_folder, - revision=revision, - # rope_scaling=rope_scaling, # only put into config - ) - if 'mbart-' not in base_model.lower() and 'mpt-' not in base_model.lower(): - if use_gpu_id and gpu_id is not None and gpu_id >= 0 and device == 'cuda': - device_map = {"": gpu_id} - else: - device_map = "auto" - model_kwargs.update(dict(load_in_8bit=load_8bit, - load_in_4bit=load_4bit, - device_map=device_map, - )) - if 'mpt-' in base_model.lower() and gpu_id is not None and gpu_id >= 0: - # MPT doesn't support spreading over GPUs - model_kwargs.update(dict(device_map={"": gpu_id} if device == 'cuda' else "cpu")) - - if 'OpenAssistant/reward-model'.lower() in base_model.lower(): - # FIXME: could put on other GPUs - model_kwargs['device_map'] = {"": 0} if device == 'cuda' else {"": 'cpu'} - model_kwargs.pop('torch_dtype', None) - pop_unused_model_kwargs(model_kwargs) - - n_gpus = torch.cuda.device_count() if torch.cuda.is_available() else 0 - n_gpus, gpu_ids = cuda_vis_check(n_gpus) - if low_bit_mode == 1 and n_gpus != 0: - from transformers import BitsAndBytesConfig - model_kwargs['quantization_config'] = BitsAndBytesConfig(bnb_4bit_compute_dtype=torch.bfloat16, - load_in_4bit=load_4bit, - load_in_8bit=load_8bit, - ) - elif low_bit_mode == 2 and n_gpus != 0: - from transformers import BitsAndBytesConfig - model_kwargs['quantization_config'] = BitsAndBytesConfig(bnb_4bit_quant_type="nf4", - load_in_4bit=load_4bit, - load_in_8bit=load_8bit, - ) - elif low_bit_mode == 3 and n_gpus != 0: - from transformers import BitsAndBytesConfig - model_kwargs['quantization_config'] = BitsAndBytesConfig(bnb_4bit_use_double_quant=True, - load_in_4bit=load_4bit, - load_in_8bit=load_8bit, - ) - elif low_bit_mode == 4 and n_gpus != 0: - from transformers import BitsAndBytesConfig - model_kwargs['quantization_config'] = BitsAndBytesConfig(bnb_4bit_use_double_quant=True, - bnb_4bit_quant_type="nf4", - load_in_4bit=load_4bit, - load_in_8bit=load_8bit, - ) - - if not lora_weights: - # torch.device context uses twice memory for AutoGPTQ - context = NullContext if load_gptq else torch.device - with context(device): - - if use_gpu_id: - config, model, max_seq_len = get_config(base_model, - return_model=True, raise_exception=True, **config_kwargs) - model = get_non_lora_model(base_model, model_loader, load_half, load_gptq, - load_exllama, - use_safetensors, - revision, - model_kwargs, reward_type, - config, model, - gpu_id=gpu_id, - ) - else: - config, _, max_seq_len = get_config(base_model, **config_kwargs) - if load_half and not (load_8bit or load_4bit or load_gptq): - model = model_loader( - base_model, - config=config, - **model_kwargs) - if not getattr(model, "is_quantized", False): - model = model.half() - else: - model = model_loader( - base_model, - config=config, - **model_kwargs) - elif load_8bit or load_4bit: - config, _, max_seq_len = get_config(base_model, **config_kwargs) - model = model_loader( - base_model, - config=config, - **model_kwargs - ) - from peft import PeftModel # loads cuda, so avoid in global scope - model = PeftModel.from_pretrained( - model, - lora_weights, - torch_dtype=torch.float16 if device == 'cuda' else torch.float32, - local_files_only=local_files_only, - resume_download=resume_download, - use_auth_token=use_auth_token, - trust_remote_code=trust_remote_code, - offload_folder=offload_folder, - rope_scaling=rope_scaling, - revision=revision, - device_map={"": 0} if device == 'cuda' else {"": 'cpu'}, # seems to be required - ) - else: - with torch.device(device): - config, _, max_seq_len = get_config(base_model, raise_exception=True, **config_kwargs) - model = model_loader( - base_model, - config=config, - **model_kwargs - ) - from peft import PeftModel # loads cuda, so avoid in global scope - model = PeftModel.from_pretrained( - model, - lora_weights, - torch_dtype=torch.float16 if device == 'cuda' else torch.float32, - local_files_only=local_files_only, - resume_download=resume_download, - use_auth_token=use_auth_token, - trust_remote_code=trust_remote_code, - offload_folder=offload_folder, - rope_scaling=rope_scaling, - device_map="auto", - ) - if load_half and not load_gptq: - if not getattr(model, "is_quantized", False): - model = model.half() - - # unwind broken decapoda-research config - if llama_type: - model.config.pad_token_id = tokenizer.pad_token_id = 0 # unk - model.config.bos_token_id = 1 - model.config.eos_token_id = 2 - if 'gpt2' in base_model.lower(): - # add special tokens that otherwise all share the same id - tokenizer.add_special_tokens({'bos_token': '', - 'eos_token': '', - 'pad_token': ''}) - - if not isinstance(tokenizer, str): - model.eval() - if torch.__version__ >= "2" and sys.platform != "win32" and compile_model: - model = torch.compile(model) - - set_model_max_len(max_seq_len, tokenizer, verbose=False, reward_type=reward_type) - - # tell if conditional type - model.conditional_type = conditional_type - tokenizer.conditional_type = conditional_type - - return model, tokenizer, device - - -def set_model_max_len(max_seq_len, tokenizer, verbose=False, reward_type=False): - if reward_type: - # limit deberta, else uses too much memory and not worth response score - tokenizer.model_max_length = 512 - return - - tokenizer.model_max_length = int(max_seq_len) - if verbose: - print("model_max_length=%s" % tokenizer.model_max_length, flush=True) - # for bug in HF transformers - if tokenizer.model_max_length > 100000000: - tokenizer.model_max_length = 2048 - - -def pop_unused_model_kwargs(model_kwargs): - """ - in-place pop unused kwargs that are not dependency-upgrade friendly - no point passing in False, is default, and helps avoid needing to update requirements for new deps - :param model_kwargs: - :return: - """ - check_list = ['load_in_8bit', 'load_in_4bit'] - for k in check_list: - if k in model_kwargs and not model_kwargs[k]: - model_kwargs.pop(k) - - -def get_score_model(score_model: str = None, - load_8bit: bool = False, - load_4bit: bool = False, - low_bit_mode=1, - load_half: bool = True, - load_gptq: str = '', - load_exllama: bool = False, - use_gpu_id: bool = True, - base_model: str = '', - inference_server: str = '', - tokenizer_base_model: str = '', - lora_weights: str = "", - gpu_id: int = 0, - n_jobs=None, - - reward_type: bool = None, - local_files_only: bool = False, - resume_download: bool = True, - use_auth_token: Union[str, bool] = False, - trust_remote_code: bool = True, - offload_folder: str = None, - rope_scaling: dict = None, - compile_model: bool = True, - llamacpp_dict: typing.Dict = None, - - verbose: bool = False, - ): - if score_model is not None and score_model.strip(): - load_8bit = False - load_4bit = False - low_bit_mode = 1 - load_half = False - load_gptq = '' - load_exllama = False - use_safetensors = False - revision = None - base_model = score_model.strip() - tokenizer_base_model = '' - lora_weights = '' - inference_server = '' - llama_type = False - max_seq_len = None - compile_model = False - llamacpp_dict = {} - smodel, stokenizer, sdevice = get_model(reward_type=True, - **get_kwargs(get_model, exclude_names=['reward_type'], **locals())) - else: - smodel, stokenizer, sdevice = None, None, None - return smodel, stokenizer, sdevice - - -def evaluate_fake(*args, **kwargs): - yield dict(response=invalid_key_msg, sources='') - return - - -def evaluate( - model_state, - my_db_state, - selection_docs_state, - requests_state, - # START NOTE: Examples must have same order of parameters - instruction, - iinput, - context, - stream_output, - prompt_type, - prompt_dict, - temperature, - top_p, - top_k, - num_beams, - max_new_tokens, - min_new_tokens, - early_stopping, - max_time, - repetition_penalty, - num_return_sequences, - do_sample, - chat, - instruction_nochat, - iinput_nochat, - langchain_mode, - add_chat_history_to_context, - langchain_action, - langchain_agents, - top_k_docs, - chunk, - chunk_size, - document_subset, - document_choice, - pre_prompt_query, - prompt_query, - pre_prompt_summary, - prompt_summary, - system_prompt, - - image_loaders, - pdf_loaders, - url_loaders, - jq_schema, - visible_models, - h2ogpt_key, - add_search_to_context, - chat_conversation, - text_context_list, - docs_ordering_type, - min_max_new_tokens, - - # END NOTE: Examples must have same order of parameters - captions_model=None, - caption_loader=None, - doctr_loader=None, - pix2struct_loader=None, - async_output=None, - num_async=None, - src_lang=None, - tgt_lang=None, - debug=False, - concurrency_count=None, - save_dir=None, - sanitize_bot_response=False, - model_state0=None, - memory_restriction_level=None, - max_max_new_tokens=None, - is_public=None, - max_max_time=None, - raise_generate_gpu_exceptions=None, - lora_weights=None, - use_llm_if_no_docs=True, - load_db_if_exists=True, - dbs=None, - detect_user_path_changes_every_query=None, - use_openai_embedding=None, - use_openai_model=None, - hf_embedding_model=None, - migrate_embedding_model=None, - auto_migrate_db=None, - cut_distance=None, - db_type=None, - n_jobs=None, - first_para=None, - text_limit=None, - show_accordions=None, - top_k_docs_max_show=None, - show_link_in_sources=None, - verbose=False, - cli=False, - use_cache=None, - auto_reduce_chunks=None, - max_chunks=None, - headsize=None, - model_lock=None, - force_langchain_evaluate=None, - model_state_none=None, - load_exllama=None, - answer_with_sources=None, - append_sources_to_answer=None, - image_loaders_options0=None, - pdf_loaders_options0=None, - url_loaders_options0=None, - jq_schema0=None, - keep_sources_in_context=None, -): - # ensure passed these - assert concurrency_count is not None - assert memory_restriction_level is not None - assert raise_generate_gpu_exceptions is not None - assert use_openai_embedding is not None - assert use_openai_model is not None - assert hf_embedding_model is not None - assert migrate_embedding_model is not None - assert auto_migrate_db is not None - assert db_type is not None - assert top_k_docs is not None and isinstance(top_k_docs, int) - assert chunk is not None and isinstance(chunk, bool) - assert chunk_size is not None and isinstance(chunk_size, int) - assert n_jobs is not None - assert first_para is not None - assert isinstance(add_chat_history_to_context, bool) - assert isinstance(add_search_to_context, bool) - assert load_exllama is not None - # for lazy client (even chat client) - if image_loaders is None: - image_loaders = image_loaders_options0 - if pdf_loaders is None: - pdf_loaders = pdf_loaders_options0 - if url_loaders is None: - url_loaders = url_loaders_options0 - if jq_schema is None: - jq_schema = jq_schema0 - if isinstance(langchain_agents, str): - if langchain_agents.strip().startswith('['): - # already list, but as string - langchain_agents = str_to_list(langchain_agents) - else: - # just 1 item and make list - langchain_agents = [langchain_agents] - chat_conversation = str_to_list(chat_conversation) - text_context_list = str_to_list(text_context_list) - - langchain_modes = selection_docs_state['langchain_modes'] - langchain_mode_paths = selection_docs_state['langchain_mode_paths'] - langchain_mode_types = selection_docs_state['langchain_mode_types'] - - if debug: - locals_dict = locals().copy() - locals_dict.pop('model_state', None) - locals_dict.pop('model_state0', None) - locals_dict.pop('model_states', None) - print(locals_dict) - - no_model_msg = "Please choose a base model with --base_model (CLI) or load in Models Tab (gradio).\n" \ - "Then start New Conversation" - - if model_state is None: - model_state = model_state_none.copy() - if model_state0 is None: - # e.g. for no gradio case, set dummy value, else should be set - model_state0 = model_state_none.copy() - - # model_state['model] is only 'model' if should use model_state0 - # model could also be None - have_model_lock = model_lock is not None - have_fresh_model = model_state['model'] not in [None, 'model', no_model_str] - # for gradio UI control, expect model_state and model_state0 to match, so if have_model_lock=True, then should have_fresh_model=True - # but gradio API control will only use nochat api etc. and won't use fresh model, so can't assert in general - # if have_model_lock: - # assert have_fresh_model, "Expected model_state and model_state0 to match if have_model_lock" - have_cli_model = model_state0['model'] not in [None, 'model', no_model_str] - - if have_fresh_model: - # USE FRESH MODEL - if not have_model_lock: - # model_state0 is just one of model_state if model_lock, so don't nuke - # try to free-up original model (i.e. list was passed as reference) - if model_state0['model'] and hasattr(model_state0['model'], 'cpu'): - model_state0['model'].cpu() - model_state0['model'] = None - # try to free-up original tokenizer (i.e. list was passed as reference) - if model_state0['tokenizer']: - model_state0['tokenizer'] = None - clear_torch_cache() - chosen_model_state = model_state - elif have_cli_model: - # USE MODEL SETUP AT CLI - assert isinstance(model_state['model'], (type(None), str)) # expect no fresh model - chosen_model_state = model_state0 - else: - raise AssertionError(no_model_msg) - # get variables - model = chosen_model_state['model'] - tokenizer = chosen_model_state['tokenizer'] - device = chosen_model_state['device'] - base_model = chosen_model_state['base_model'] - tokenizer_base_model = chosen_model_state['tokenizer_base_model'] - lora_weights = chosen_model_state['lora_weights'] - inference_server = chosen_model_state['inference_server'] - visible_models = chosen_model_state['visible_models'] - # use overall key if have, so key for this gradio and any inner gradio - if chosen_model_state['h2ogpt_key'] is not None: - h2ogpt_key = chosen_model_state['h2ogpt_key'] - # prefer use input from API over model state - prompt_type = prompt_type or chosen_model_state['prompt_type'] - prompt_dict = prompt_dict or chosen_model_state['prompt_dict'] - - if base_model is None: - raise AssertionError(no_model_msg) - - assert base_model.strip(), no_model_msg - assert model, "Model is missing" - assert tokenizer, "Tokenizer is missing" - - # choose chat or non-chat mode - if not chat: - instruction = instruction_nochat - iinput = iinput_nochat - - # in some cases, like lean nochat API, don't want to force sending prompt_type, allow default choice - model_lower = base_model.lower() - if not prompt_type and model_lower in inv_prompt_type_to_model_lower and prompt_type != 'custom': - prompt_type = inv_prompt_type_to_model_lower[model_lower] - if verbose: - print("Auto-selecting prompt_type=%s for %s" % (prompt_type, model_lower), flush=True) - assert prompt_type is not None, "prompt_type was None" - - # Control generation hyperparameters - # adjust for bad inputs, e.g. in case also come from API that doesn't get constrained by gradio sliders - # below is for TGI server, not required for HF transformers - # limits are chosen similar to gradio_runner.py sliders/numbers - top_p = min(max(1e-3, top_p), 1.0 - 1e-3) - top_k = min(max(1, int(top_k)), 100) - temperature = min(max(0.01, temperature), 2.0) - # FIXME: https://github.com/h2oai/h2ogpt/issues/106 - num_beams = 1 if stream_output else num_beams # See max_beams in gradio_runner - max_max_new_tokens = get_max_max_new_tokens(chosen_model_state, - memory_restriction_level=memory_restriction_level, - max_new_tokens=max_new_tokens, - max_max_new_tokens=max_max_new_tokens) - if min_max_new_tokens is None: - # default for nochat api - min_max_new_tokens = 256 - if docs_ordering_type is None: - docs_ordering_type = 'reverse_ucurve_sort' - model_max_length = get_model_max_length(chosen_model_state) - max_new_tokens = min(max(1, int(max_new_tokens)), max_max_new_tokens) - min_new_tokens = min(max(0, int(min_new_tokens)), max_new_tokens) - max_time = min(max(0, max_time), max_max_time) - repetition_penalty = min(max(0.01, repetition_penalty), 3.0) - num_return_sequences = 1 if chat else min(max(1, int(num_return_sequences)), 10) - min_top_k_docs, max_top_k_docs, label_top_k_docs = get_minmax_top_k_docs(is_public) - # limit total tokens processed, e.g. for summarization, if public instance - if is_public: - total_tokens_for_docs = min(2 * model_max_length, 16384) - else: - total_tokens_for_docs = None - top_k_docs = min(max(min_top_k_docs, int(top_k_docs)), max_top_k_docs) - chunk_size = min(max(128, int(chunk_size)), 2048) - if not context: - context = '' - - # get prompter - prompter = Prompter(prompt_type, prompt_dict, debug=debug, chat=chat, stream_output=stream_output, - system_prompt=system_prompt) - - # THIRD PLACE where LangChain referenced, but imports only occur if enabled and have db to use - assert langchain_mode in langchain_modes, "Invalid langchain_mode %s not in %s" % (langchain_mode, langchain_modes) - assert langchain_action in langchain_actions, "Invalid langchain_action %s not in %s" % ( - langchain_action, langchain_actions) - assert len( - set(langchain_agents).difference(langchain_agents_list)) == 0, "Invalid langchain_agents %s" % langchain_agents - - # get db, but also fill db state so return already has my_db_state and dbs filled so faster next query - if langchain_mode != LangChainMode.DISABLED.value: - from src.gpt_langchain import get_any_db - db = get_any_db(my_db_state, langchain_mode, langchain_mode_paths, langchain_mode_types, - dbs=dbs, - load_db_if_exists=load_db_if_exists, - db_type=db_type, - use_openai_embedding=use_openai_embedding, - hf_embedding_model=hf_embedding_model, - migrate_embedding_model=migrate_embedding_model, - auto_migrate_db=auto_migrate_db, - for_sources_list=True, - verbose=verbose, - n_jobs=n_jobs, - ) - else: - db = None - - t_generate = time.time() - langchain_only_model = base_model in non_hf_types or \ - load_exllama or \ - inference_server.startswith('replicate') or \ - inference_server.startswith('sagemaker') or \ - inference_server.startswith('openai_azure_chat') or \ - inference_server.startswith('openai_azure') - do_langchain_path = langchain_mode not in [False, 'Disabled', 'LLM'] or \ - langchain_only_model or \ - force_langchain_evaluate or \ - len(text_context_list) > 0 - - if len(langchain_agents) > 0: - do_langchain_path = True - if add_search_to_context: - # easier to manage prompt etc. by doing full langchain path - do_langchain_path = True - - if do_langchain_path: - text = '' - sources = '' - response = '' - # use smaller cut_distance for wiki_full since so many matches could be obtained, and often irrelevant unless close - from gpt_langchain import run_qa_db - gen_hyper_langchain = dict(do_sample=do_sample, - temperature=temperature, - repetition_penalty=repetition_penalty, - top_k=top_k, - top_p=top_p, - num_beams=num_beams, - min_new_tokens=min_new_tokens, - max_new_tokens=max_new_tokens, - early_stopping=early_stopping, - max_time=max_time, - num_return_sequences=num_return_sequences, - ) - loaders_dict, captions_model = gr_to_lg(image_loaders, - pdf_loaders, - url_loaders, - captions_model=captions_model, - ) - loaders_dict.update(dict(captions_model=captions_model, - caption_loader=caption_loader, - doctr_loader=doctr_loader, - pix2struct_loader=pix2struct_loader, - jq_schema=jq_schema, - )) - data_point = dict(context=context, instruction=instruction, input=iinput) - # no longer stuff chat history directly into context this early - prompt_basic = prompter.generate_prompt(data_point, context_from_history=False) - prompt = prompt_basic - num_prompt_tokens = 0 - for r in run_qa_db( - inference_server=inference_server, - model_name=base_model, model=model, tokenizer=tokenizer, - langchain_only_model=langchain_only_model, - async_output=async_output, - num_async=num_async, - prompter=prompter, - use_llm_if_no_docs=use_llm_if_no_docs, - load_db_if_exists=load_db_if_exists, - db=db, - langchain_mode_paths=langchain_mode_paths, - langchain_mode_types=langchain_mode_types, - detect_user_path_changes_every_query=detect_user_path_changes_every_query, - cut_distance=1.1 if langchain_mode in ['wiki_full'] else cut_distance, - answer_with_sources=answer_with_sources, - append_sources_to_answer=append_sources_to_answer, - add_chat_history_to_context=add_chat_history_to_context, - add_search_to_context=add_search_to_context, - keep_sources_in_context=keep_sources_in_context, - memory_restriction_level=memory_restriction_level, - system_prompt=system_prompt, - use_openai_embedding=use_openai_embedding, - use_openai_model=use_openai_model, - hf_embedding_model=hf_embedding_model, - migrate_embedding_model=migrate_embedding_model, - auto_migrate_db=auto_migrate_db, - first_para=first_para, - text_limit=text_limit, - show_accordions=show_accordions, - top_k_docs_max_show=top_k_docs_max_show, - show_link_in_sources=show_link_in_sources, - - # evaluate args items - query=instruction, - iinput=iinput, - context=context, - stream_output=stream_output, - chunk=chunk, - chunk_size=chunk_size, - - **loaders_dict, - - langchain_mode=langchain_mode, - langchain_action=langchain_action, - langchain_agents=langchain_agents, - document_subset=document_subset, - document_choice=document_choice, - top_k_docs=top_k_docs, - prompt_type=prompt_type, - prompt_dict=prompt_dict, - pre_prompt_query=pre_prompt_query, - prompt_query=prompt_query, - pre_prompt_summary=pre_prompt_summary, - prompt_summary=prompt_summary, - text_context_list=text_context_list, - chat_conversation=chat_conversation, - visible_models=visible_models, - h2ogpt_key=h2ogpt_key, - docs_ordering_type=docs_ordering_type, - min_max_new_tokens=min_max_new_tokens, - - **gen_hyper_langchain, - - db_type=db_type, - n_jobs=n_jobs, - verbose=verbose, - cli=cli, - sanitize_bot_response=sanitize_bot_response, - - lora_weights=lora_weights, - - auto_reduce_chunks=auto_reduce_chunks, - max_chunks=max_chunks, - total_tokens_for_docs=total_tokens_for_docs, - headsize=headsize, - ): - # doesn't accumulate, new answer every yield, so only save that full answer - response = r['response'] - sources = r['sources'] - prompt = r['prompt'] - num_prompt_tokens = r['num_prompt_tokens'] - yield dict(response=response, sources=sources, save_dict=dict()) - if save_dir: - # estimate using tiktoken - extra_dict = gen_hyper_langchain.copy() - extra_dict.update(prompt_type=prompt_type, - inference_server=inference_server, - langchain_mode=langchain_mode, - langchain_action=langchain_action, - langchain_agents=langchain_agents, - document_subset=document_subset, - document_choice=document_choice, - chat_conversation=chat_conversation, - add_search_to_context=add_search_to_context, - num_prompt_tokens=num_prompt_tokens, - instruction=instruction, - iinput=iinput, - context=context, - t_generate=time.time() - t_generate, - ntokens=None, - tokens_persecond=None, - ) - save_dict = dict(prompt=prompt, - output=response, base_model=base_model, save_dir=save_dir, - where_from='run_qa_db', - extra_dict=extra_dict) - yield dict(response=response, sources=sources, save_dict=save_dict) - if verbose: - print( - 'Post-Generate Langchain: %s decoded_output: %s' % - (str(datetime.now()), len(response) if response else -1), - flush=True) - if response or sources or langchain_only_model: - # if got no response (e.g. not showing sources and got no sources, - # so nothing to give to LLM), then slip through and ask LLM - # Or if llama/gptj, then just return since they had no response and can't go down below code path - # don't clear torch cache here, delays multi-generation, and bot(), all_bot(), and evaluate_nochat() do it - return - - # NOT LANGCHAIN PATH, raw LLM - # restrict instruction + , typically what has large input - prompt, \ - instruction, iinput, context, \ - num_prompt_tokens, max_new_tokens, num_prompt_tokens0, num_prompt_tokens_actual, \ - chat_index, top_k_docs_trial, one_doc_size = \ - get_limited_prompt(instruction, - iinput, - tokenizer, - prompter=prompter, - inference_server=inference_server, - # prompt_type=prompt_type, - # prompt_dict=prompt_dict, - # chat=chat, - max_new_tokens=max_new_tokens, - # system_prompt=system_prompt, - context=context, - chat_conversation=chat_conversation, - keep_sources_in_context=keep_sources_in_context, - model_max_length=model_max_length, - memory_restriction_level=memory_restriction_level, - langchain_mode=langchain_mode, - add_chat_history_to_context=add_chat_history_to_context, - min_max_new_tokens=min_max_new_tokens, - ) - - if inference_server.startswith('vllm') or \ - inference_server.startswith('openai') or \ - inference_server.startswith('http'): - if inference_server.startswith('vllm') or inference_server.startswith('openai'): - assert not inference_server.startswith('openai_azure_chat'), "Not fo Azure, use langchain path" - assert not inference_server.startswith('openai_azure'), "Not for Azure, use langchain path" - openai, inf_type, deployment_name, base_url, api_version = set_openai(inference_server) - where_from = inf_type - - terminate_response = prompter.terminate_response or [] - stop_sequences = list(set(terminate_response + [prompter.PreResponse])) - stop_sequences = [x for x in stop_sequences if x] - # OpenAI will complain if ask for too many new tokens, takes it as min in some sense, wrongly so. - max_new_tokens_openai = min(max_new_tokens, model_max_length - num_prompt_tokens) - gen_server_kwargs = dict(temperature=temperature if do_sample else 0, - max_tokens=max_new_tokens_openai, - top_p=top_p if do_sample else 1, - frequency_penalty=0, - n=num_return_sequences, - presence_penalty=1.07 - repetition_penalty + 0.6, # so good default - ) - if inf_type == 'vllm' or inference_server == 'openai': - responses = openai.Completion.create( - model=base_model, - prompt=prompt, - **gen_server_kwargs, - stop=stop_sequences, - stream=stream_output, - ) - text = '' - sources = '' - response = '' - if not stream_output: - text = responses['choices'][0]['text'] - response = prompter.get_response(prompt + text, prompt=prompt, - sanitize_bot_response=sanitize_bot_response) - yield dict(response=response, sources=sources, save_dict=dict()) - else: - collected_events = [] - for event in responses: - collected_events.append(event) # save the event response - event_text = event['choices'][0]['text'] # extract the text - text += event_text # append the text - response = prompter.get_response(prompt + text, prompt=prompt, - sanitize_bot_response=sanitize_bot_response) - yield dict(response=response, sources=sources, save_dict=dict()) - elif inf_type == 'vllm_chat' or inference_server == 'openai_chat': - if inf_type == 'vllm_chat': - raise NotImplementedError('%s not supported by vLLM' % inf_type) - if system_prompt in [None, 'None', 'auto']: - openai_system_prompt = "You are a helpful assistant." - else: - openai_system_prompt = system_prompt - messages0 = [] - if openai_system_prompt: - messages0.append({"role": "system", "content": openai_system_prompt}) - messages0.append({'role': 'user', 'content': prompt}) - responses = openai.ChatCompletion.create( - model=base_model, - messages=messages0, - stream=stream_output, - **gen_server_kwargs, - ) - text = "" - sources = '' - response = "" - if not stream_output: - text = responses["choices"][0]["message"]["content"] - response = prompter.get_response(prompt + text, prompt=prompt, - sanitize_bot_response=sanitize_bot_response) - yield dict(response=response, sources=sources, save_dict=dict()) - else: - for chunk in responses: - delta = chunk["choices"][0]["delta"] - if 'content' in delta: - text += delta['content'] - response = prompter.get_response(prompt + text, prompt=prompt, - sanitize_bot_response=sanitize_bot_response) - yield dict(response=response, sources=sources, save_dict=dict()) - else: - raise RuntimeError("No such OpenAI mode: %s" % inference_server) - elif inference_server.startswith('http'): - inference_server, headers = get_hf_server(inference_server) - from gradio_utils.grclient import GradioClient - from text_generation import Client as HFClient - if isinstance(model, GradioClient): - gr_client = model - hf_client = None - elif isinstance(model, HFClient): - gr_client = None - hf_client = model - else: - inference_server, gr_client, hf_client = get_client_from_inference_server(inference_server, - base_model=base_model) - - # quick sanity check to avoid long timeouts, just see if can reach server - requests.get(inference_server, timeout=int(os.getenv('REQUEST_TIMEOUT_FAST', '10'))) - - if gr_client is not None: - # Note: h2oGPT gradio server could handle input token size issues for prompt, - # but best to handle here so send less data to server - - chat_client = False - where_from = "gr_client" - client_langchain_mode = 'Disabled' - client_add_chat_history_to_context = True - client_add_search_to_context = False - client_langchain_action = LangChainAction.QUERY.value - client_langchain_agents = [] - gen_server_kwargs = dict(temperature=temperature, - top_p=top_p, - top_k=top_k, - num_beams=num_beams, - max_new_tokens=max_new_tokens, - min_new_tokens=min_new_tokens, - early_stopping=early_stopping, - max_time=max_time, - repetition_penalty=repetition_penalty, - num_return_sequences=num_return_sequences, - do_sample=do_sample, - chat=chat_client, - ) - # account for gradio into gradio that handles prompting, avoid duplicating prompter prompt injection - if prompt_type in [None, '', PromptType.plain.name, PromptType.plain.value, - str(PromptType.plain.value)]: - # if our prompt is plain, assume either correct or gradio server knows different prompt type, - # so pass empty prompt_Type - gr_prompt_type = '' - gr_prompt_dict = '' - gr_prompt = prompt # already prepared prompt - gr_context = '' - gr_iinput = '' - else: - # if already have prompt_type that is not plain, None, or '', then already applied some prompting - # But assume server can handle prompting, and need to avoid double-up. - # Also assume server can do better job of using stopping.py to stop early, so avoid local prompting, let server handle - # So avoid "prompt" and let gradio server reconstruct from prompt_type we passed - # Note it's ok that prompter.get_response() has prompt+text, prompt=prompt passed, - # because just means extra processing and removal of prompt, but that has no human-bot prompting doesn't matter - # since those won't appear - gr_context = context - gr_prompt = instruction - gr_iinput = iinput - gr_prompt_type = prompt_type - gr_prompt_dict = prompt_dict - client_kwargs = dict(instruction=gr_prompt if chat_client else '', # only for chat=True - iinput=gr_iinput, # only for chat=True - context=gr_context, - # streaming output is supported, loops over and outputs each generation in streaming mode - # but leave stream_output=False for simple input/output mode - stream_output=stream_output, - - **gen_server_kwargs, - - prompt_type=gr_prompt_type, - prompt_dict=gr_prompt_dict, - - instruction_nochat=gr_prompt if not chat_client else '', - iinput_nochat=gr_iinput, # only for chat=False - langchain_mode=client_langchain_mode, - add_chat_history_to_context=client_add_chat_history_to_context, - langchain_action=client_langchain_action, - langchain_agents=client_langchain_agents, - top_k_docs=top_k_docs, - chunk=chunk, - chunk_size=chunk_size, - document_subset=DocumentSubset.Relevant.name, - document_choice=[DocumentChoice.ALL.value], - pre_prompt_query=pre_prompt_query, - prompt_query=prompt_query, - pre_prompt_summary=pre_prompt_summary, - prompt_summary=prompt_summary, - system_prompt=system_prompt, - image_loaders=image_loaders, - pdf_loaders=pdf_loaders, - url_loaders=url_loaders, - jq_schema=jq_schema, - visible_models=visible_models, - h2ogpt_key=h2ogpt_key, - add_search_to_context=client_add_search_to_context, - docs_ordering_type=None, - min_max_new_tokens=min_max_new_tokens, - ) - api_name = '/submit_nochat_api' # NOTE: like submit_nochat but stable API for string dict passing - response = '' - text = '' - sources = '' - if not stream_output: - res = gr_client.predict(str(dict(client_kwargs)), api_name=api_name) - res_dict = ast.literal_eval(res) - text = res_dict['response'] - sources = res_dict['sources'] - response = prompter.get_response(prompt + text, prompt=prompt, - sanitize_bot_response=sanitize_bot_response) - yield dict(response=response, sources=sources, save_dict=dict()) - else: - job = gr_client.submit(str(dict(client_kwargs)), api_name=api_name) - res_dict = dict(response=text, sources=sources, save_dict=dict()) - text0 = '' - while not job.done(): - if job.communicator.job.latest_status.code.name == 'FINISHED': - break - e = job.future._exception - if e is not None: - break - outputs_list = job.communicator.job.outputs - if outputs_list: - res = job.communicator.job.outputs[-1] - res_dict = ast.literal_eval(res) - text = res_dict['response'] - sources = res_dict['sources'] - if gr_prompt_type == 'plain': - # then gradio server passes back full prompt + text - prompt_and_text = text - else: - prompt_and_text = prompt + text - response = prompter.get_response(prompt_and_text, prompt=prompt, - sanitize_bot_response=sanitize_bot_response) - text_chunk = response[len(text0):] - if not text_chunk: - continue - # save old - text0 = response - yield dict(response=response, sources=sources, save_dict=dict()) - time.sleep(0.01) - # ensure get last output to avoid race - res_all = job.outputs() - if len(res_all) > 0: - res = res_all[-1] - res_dict = ast.literal_eval(res) - text = res_dict['response'] - sources = res_dict['sources'] - else: - # go with old text if last call didn't work - e = job.future._exception - if e is not None: - stre = str(e) - strex = ''.join(traceback.format_tb(e.__traceback__)) - else: - stre = '' - strex = '' - - print("Bad final response: %s %s %s %s %s: %s %s" % (base_model, inference_server, - res_all, prompt, text, stre, strex), - flush=True) - if gr_prompt_type == 'plain': - # then gradio server passes back full prompt + text - prompt_and_text = text - else: - prompt_and_text = prompt + text - response = prompter.get_response(prompt_and_text, prompt=prompt, - sanitize_bot_response=sanitize_bot_response) - yield dict(response=response, sources=sources, save_dict=dict()) - elif hf_client: - # HF inference server needs control over input tokens - where_from = "hf_client" - response = '' - extra = '' - sources = '' - - # prompt must include all human-bot like tokens, already added by prompt - # https://github.com/huggingface/text-generation-inference/tree/main/clients/python#types - terminate_response = prompter.terminate_response or [] - stop_sequences = list(set(terminate_response + [prompter.PreResponse])) - stop_sequences = [x for x in stop_sequences if x] - gen_server_kwargs = dict(do_sample=do_sample, - max_new_tokens=max_new_tokens, - # best_of=None, - repetition_penalty=repetition_penalty, - return_full_text=False, - seed=SEED, - stop_sequences=stop_sequences, - temperature=temperature, - top_k=top_k, - top_p=top_p, - # truncate=False, # behaves oddly - # typical_p=top_p, - # watermark=False, - # decoder_input_details=False, - ) - # work-around for timeout at constructor time, will be issue if multi-threading, - # so just do something reasonable or max_time if larger - # lower bound because client is re-used if multi-threading - hf_client.timeout = max(300, max_time) - if not stream_output: - text = hf_client.generate(prompt, **gen_server_kwargs).generated_text - response = prompter.get_response(prompt + text, prompt=prompt, - sanitize_bot_response=sanitize_bot_response) - yield dict(response=response, sources=sources, save_dict=dict()) - else: - text = "" - for responses in hf_client.generate_stream(prompt, **gen_server_kwargs): - if not responses.token.special: - # stop_sequences - text_chunk = responses.token.text - text += text_chunk - response = prompter.get_response(prompt + text, prompt=prompt, - sanitize_bot_response=sanitize_bot_response) - sources = '' - yield dict(response=response, sources=sources, save_dict=dict()) - else: - raise RuntimeError("Failed to get client: %s" % inference_server) - else: - raise RuntimeError("No such inference_server %s" % inference_server) - - if save_dir and text: - # save prompt + new text - extra_dict = gen_server_kwargs.copy() - extra_dict.update(dict(inference_server=inference_server, num_prompt_tokens=num_prompt_tokens, - t_generate=time.time() - t_generate, - ntokens=None, - tokens_persecond=None, - )) - save_dict = dict(prompt=prompt, output=text, base_model=base_model, save_dir=save_dir, - where_from=where_from, extra_dict=extra_dict) - yield dict(response=response, sources=sources, save_dict=save_dict) - return - else: - assert not inference_server, "inference_server=%s not supported" % inference_server - - if isinstance(tokenizer, str): - # pipeline - if tokenizer == "summarization": - key = 'summary_text' - else: - raise RuntimeError("No such task type %s" % tokenizer) - # NOTE: uses max_length only - sources = '' - yield dict(response=model(prompt, max_length=max_new_tokens)[0][key], sources=sources, save_dict=dict()) - - if 'mbart-' in base_model.lower(): - assert src_lang is not None - tokenizer.src_lang = languages_covered()[src_lang] - - stopping_criteria = get_stopping(prompt_type, prompt_dict, tokenizer, device, base_model, - model_max_length=model_max_length, - prompter=prompter) - - inputs = tokenizer(prompt, return_tensors="pt") - if debug and len(inputs["input_ids"]) > 0: - print('input_ids length', len(inputs["input_ids"][0]), flush=True) - input_ids = inputs["input_ids"].to(device) - # CRITICAL LIMIT else will fail - max_max_tokens = tokenizer.model_max_length - max_input_tokens = max(0, int(max_max_tokens - min_new_tokens)) - # NOTE: Don't limit up front due to max_new_tokens, let go up to max or reach max_max_tokens in stopping.py - assert isinstance(max_input_tokens, int), "Bad type for max_input_tokens=%s %s" % ( - max_input_tokens, type(max_input_tokens)) - input_ids = input_ids[:, -max_input_tokens:] - # required for falcon if multiple threads or asyncio accesses to model during generation - if use_cache is None: - use_cache = False if 'falcon' in base_model else True - gen_config_kwargs = dict(num_beams=num_beams, - do_sample=do_sample, - repetition_penalty=float(repetition_penalty), - num_return_sequences=num_return_sequences, - renormalize_logits=True, - remove_invalid_values=True, - use_cache=use_cache, - ) - if do_sample: - gen_config_kwargs.update(dict(temperature=float(temperature), - top_p=float(top_p), - top_k=top_k)) - if True: - # unclear impact, some odd things going on inside - # leads to: - # The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results. - # Setting `pad_token_id` to `eos_token_id`:2 for open-end generation. - # or leads to: - # Using cls_token, but it is not set yet. - # Using mask_token, but it is not set yet. - # Using pad_token, but it is not set yet. - # Using sep_token, but it is not set yet. - token_ids = ['eos_token_id', 'pad_token_id', 'bos_token_id', 'cls_token_id', 'sep_token_id'] - for token_id in token_ids: - if hasattr(tokenizer, token_id) and getattr(tokenizer, token_id) is not None: - gen_config_kwargs.update({token_id: getattr(tokenizer, token_id)}) - generation_config = GenerationConfig(**gen_config_kwargs) - - gen_kwargs = dict(input_ids=input_ids, - generation_config=generation_config, - return_dict_in_generate=True, - output_scores=True, - max_new_tokens=max_new_tokens, # prompt + new - min_new_tokens=min_new_tokens, # prompt + new - early_stopping=early_stopping, # False, True, "never" - max_time=max_time, - stopping_criteria=stopping_criteria, - ) - if 'gpt2' in base_model.lower(): - gen_kwargs.update(dict(bos_token_id=tokenizer.bos_token_id, pad_token_id=tokenizer.eos_token_id)) - elif 'mbart-' in base_model.lower(): - assert tgt_lang is not None - tgt_lang = languages_covered()[tgt_lang] - gen_kwargs.update(dict(forced_bos_token_id=tokenizer.lang_code_to_id[tgt_lang])) - else: - token_ids = ['eos_token_id', 'bos_token_id', 'pad_token_id'] - for token_id in token_ids: - if hasattr(tokenizer, token_id) and getattr(tokenizer, token_id) is not None: - gen_kwargs.update({token_id: getattr(tokenizer, token_id)}) - - decoder_kwargs = dict(skip_special_tokens=True, - clean_up_tokenization_spaces=True) - - decoder = functools.partial(tokenizer.decode, - **decoder_kwargs - ) - with torch.no_grad(): - have_lora_weights = lora_weights not in [no_lora_str, '', None] - context_class_cast = NullContext if device == 'cpu' or have_lora_weights or device == 'mps' else torch.autocast - if t5_type(base_model): - # issues when casting to float16, can mess up t5 model, e.g. only when not streaming, or other odd behaviors - context_class_cast = NullContext - with context_class_cast(device): - # protection for gradio not keeping track of closed users, - # else hit bitsandbytes lack of thread safety: - # https://github.com/h2oai/h2ogpt/issues/104 - # but only makes sense if concurrency_count == 1 - context_class = NullContext # if concurrency_count > 1 else filelock.FileLock - if verbose: - print('Pre-Generate: %s' % str(datetime.now()), flush=True) - decoded_output = None - response = '' - with context_class("generate.lock"): - if verbose: - print('Generate: %s' % str(datetime.now()), flush=True) - always_use_streaming_method = True # to deal with complex parsing of prompt vs. generation due to odd tokenizing - if stream_output or always_use_streaming_method: - skip_prompt = True # True means first output excludes prompt - streamer = H2OTextIteratorStreamer(tokenizer, skip_prompt=skip_prompt, block=False, - **decoder_kwargs) - gen_kwargs.update(dict(streamer=streamer)) - target = wrapped_partial(generate_with_exceptions, model.generate, - raise_generate_gpu_exceptions=raise_generate_gpu_exceptions, - **gen_kwargs) - bucket = queue.Queue() - thread = EThread(target=target, streamer=streamer, bucket=bucket) - thread.start() - ret = dict(response='', sources='', save_dict=dict()) - outputs = "" - sources = '' - try: - for new_text in streamer: - if bucket.qsize() > 0 or thread.exc: - thread.join() - outputs += new_text - response = prompter.get_response(outputs, prompt=None, - only_new_text=True, - sanitize_bot_response=sanitize_bot_response) - ret = dict(response=response, sources=sources, save_dict=dict()) - if stream_output: - yield ret - if not stream_output: - yield ret - except BaseException: - # if any exception, raise that exception if was from thread, first - if thread.exc: - raise thread.exc - raise - finally: - # don't clear torch cache here, delays multi-generation, and bot(), all_bot(), and evaluate_nochat() do it - # in case no exception and didn't join with thread yet, then join - if not thread.exc: - thread.join() - # in case raise StopIteration or broke queue loop in streamer, but still have exception - if thread.exc: - raise thread.exc - decoded_output = outputs - ntokens = len(outputs) // 4 # hack for now - else: - # below length removal doesn't work in general, because encoding does not match internal of model generation - input_ids_len = gen_kwargs['input_ids'][0].shape[0] - try: - outputs = model.generate(**gen_kwargs) - finally: - pass - # don't clear torch cache here, delays multi-generation, and bot(), all_bot(), and evaluate_nochat() do it - # skip first IDs - ntokens = sum([len(s) - input_ids_len for s in outputs.sequences]) if save_dir else -1 - outputs = [decoder(s[input_ids_len:]) for s in outputs.sequences] - sources = '' - response = prompter.get_response(outputs, prompt=None, - only_new_text=True, - sanitize_bot_response=sanitize_bot_response) - yield dict(response=response, sources=sources, save_dict=dict()) - if outputs and len(outputs) >= 1: - decoded_output = prompt + outputs[0] - if save_dir and decoded_output: - extra_dict = gen_config_kwargs.copy() - extra_dict.update(dict(num_prompt_tokens=num_prompt_tokens, - t_generate=time.time() - t_generate, - ntokens=ntokens, - tokens_persecond=ntokens / (time.time() - t_generate), - )) - save_dict = dict(prompt=prompt, output=decoded_output, base_model=base_model, save_dir=save_dir, - where_from="evaluate_%s" % str(stream_output), - extra_dict=extra_dict) - yield dict(response=response, sources=sources, save_dict=save_dict) - if verbose: - print('Post-Generate: %s decoded_output: %s' % ( - str(datetime.now()), len(decoded_output) if decoded_output else -1), flush=True) - - -inputs_list_names = list(inspect.signature(evaluate).parameters) -state_names = input_args_list.copy() # doesn't have to be the same, but state_names must match evaluate() and how filled then -inputs_kwargs_list = [x for x in inputs_list_names if x not in eval_func_param_names + state_names] - - -def get_cutoffs(memory_restriction_level, for_context=False, model_max_length=2048): - # help to avoid errors like: - # RuntimeError: The size of tensor a (2048) must match the size of tensor b (2049) at non-singleton dimension 3 - # RuntimeError: expected scalar type Half but found Float - # with - 256 - if memory_restriction_level > 0: - max_length_tokenize = 768 - 256 if memory_restriction_level <= 2 else 512 - 256 - else: - # at least give room for 1 paragraph output - max_length_tokenize = model_max_length - 256 - cutoff_len = max_length_tokenize * 4 # if reaches limit, then can't generate new tokens - output_smallest = 30 * 4 - max_prompt_length = cutoff_len - output_smallest - - if for_context: - # then lower even more to avoid later chop, since just estimate tokens in context bot - max_prompt_length = max(64, int(max_prompt_length * 0.8)) - - return cutoff_len, output_smallest, max_length_tokenize, max_prompt_length - - -class H2OTextIteratorStreamer(TextIteratorStreamer): - """ - normally, timeout required for now to handle exceptions, else get() - but with H2O version of TextIteratorStreamer, loop over block to handle - """ - - def __init__(self, tokenizer, skip_prompt: bool = False, timeout: typing.Optional[float] = None, - block=True, **decode_kwargs): - super().__init__(tokenizer, skip_prompt, **decode_kwargs) - self.text_queue = queue.Queue() - self.stop_signal = None - self.do_stop = False - self.timeout = timeout - self.block = block - - def on_finalized_text(self, text: str, stream_end: bool = False): - """Put the new text in the queue. If the stream is ending, also put a stop signal in the queue.""" - self.text_queue.put(text, timeout=self.timeout) - if stream_end: - self.text_queue.put(self.stop_signal, timeout=self.timeout) - - def __iter__(self): - return self - - def __next__(self): - while True: - try: - value = self.stop_signal # value looks unused in pycharm, not true - if self.do_stop: - print("hit stop", flush=True) - # could raise or break, maybe best to raise and make parent see if any exception in thread - self.clear_queue() - self.do_stop = False - raise StopIteration() - # break - value = self.text_queue.get(block=self.block, timeout=self.timeout) - break - except queue.Empty: - time.sleep(0.01) - if value == self.stop_signal: - self.clear_queue() - self.do_stop = False - raise StopIteration() - else: - return value - - def clear_queue(self): - # make sure streamer is reusable after stop hit - with self.text_queue.mutex: - self.text_queue.queue.clear() - - def put(self, value): - """ - Receives tokens, decodes them, and prints them to stdout as soon as they form entire words. - # same as base class, except remove hack w.r.t. text.rfind(" ") that ruins LLaMa2 - """ - if len(value.shape) > 1 and value.shape[0] > 1: - raise ValueError("TextStreamer only supports batch size 1") - elif len(value.shape) > 1: - value = value[0] - - if self.skip_prompt and self.next_tokens_are_prompt: - self.next_tokens_are_prompt = False - return - - # Add the new token to the cache and decodes the entire thing. - self.token_cache.extend(value.tolist()) - text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs) - - # After the symbol for a new line, we flush the cache. - if text.endswith("\n"): - printable_text = text[self.print_len:] - self.token_cache = [] - self.print_len = 0 - # If the last token is a CJK character, we print the characters. - elif len(text) > 0 and self._is_chinese_char(ord(text[-1])): - printable_text = text[self.print_len:] - self.print_len += len(printable_text) - # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, - # which may change with the subsequent token -- there are probably smarter ways to do this!) - elif len(text) > 0 and text[-1] == '�': - printable_text = text[self.print_len: text.rfind(" ") + 1] - self.print_len += len(printable_text) - else: - printable_text = text[self.print_len:] - self.print_len += len(printable_text) - - self.on_finalized_text(printable_text) - - -def generate_with_exceptions(func, *args, raise_generate_gpu_exceptions=True, **kwargs): - try: - func(*args, **kwargs) - except torch.cuda.OutOfMemoryError as e: - print("GPU OOM 2: exception: %s" % str(e), - flush=True) - if 'input_ids' in kwargs: - if kwargs['input_ids'] is not None: - kwargs['input_ids'].cpu() - kwargs['input_ids'] = None - traceback.print_exc() - clear_torch_cache() - return - except (Exception, RuntimeError) as e: - if 'Expected all tensors to be on the same device' in str(e) or \ - 'expected scalar type Half but found Float' in str(e) or \ - 'probability tensor contains either' in str(e) or \ - 'cublasLt ran into an error!' in str(e) or \ - 'mat1 and mat2 shapes cannot be multiplied' in str(e): - print( - "GPU Error: exception: %s" % str(e), - flush=True) - traceback.print_exc() - clear_torch_cache() - if raise_generate_gpu_exceptions: - raise - return - else: - clear_torch_cache() - if raise_generate_gpu_exceptions: - raise - - -def get_generate_params(model_lower, - chat, - stream_output, show_examples, - prompt_type, prompt_dict, - system_prompt, - pre_prompt_query, prompt_query, - pre_prompt_summary, prompt_summary, - temperature, top_p, top_k, num_beams, - max_new_tokens, min_new_tokens, early_stopping, max_time, - repetition_penalty, num_return_sequences, - do_sample, - top_k_docs, chunk, chunk_size, - image_loaders, - pdf_loaders, - url_loaders, - jq_schema, - docs_ordering_type, - min_max_new_tokens, - verbose, - ): - use_defaults = False - use_default_examples = True - examples = [] - task_info = 'LLM' - if model_lower: - print(f"Using Model {model_lower}", flush=True) - else: - if verbose: - print("No model defined yet", flush=True) - - min_new_tokens = min_new_tokens if min_new_tokens is not None else 0 - early_stopping = early_stopping if early_stopping is not None else False - max_time_defaults = 60 * 3 - max_time = max_time if max_time is not None else max_time_defaults - - if not prompt_type and model_lower in inv_prompt_type_to_model_lower and prompt_type != 'custom': - prompt_type = inv_prompt_type_to_model_lower[model_lower] - if verbose: - print("Auto-selecting prompt_type=%s for %s" % (prompt_type, model_lower), flush=True) - - # examples at first don't include chat, instruction_nochat, iinput_nochat, added at end - if show_examples is None: - if chat: - show_examples = False - else: - show_examples = True - - summarize_example1 = """Jeff: Can I train a ? Transformers model on Amazon SageMaker? -Philipp: Sure you can use the new Hugging Face Deep Learning Container. -Jeff: ok. -Jeff: and how can I get started? -Jeff: where can I find documentation? -Philipp: ok, ok you can find everything here. https://huggingface.co/blog/the-partnership-amazon-sagemaker-and-hugging-face""" - - use_placeholder_instruction_as_example = False - if 'bart-large-cnn-samsum' in model_lower or 'flan-t5-base-samsum' in model_lower: - placeholder_instruction = summarize_example1 - placeholder_input = "" - use_defaults = True - use_default_examples = False - use_placeholder_instruction_as_example = True - task_info = "Summarization" - elif 't5-' in model_lower or 't5' == model_lower or 'flan-' in model_lower: - placeholder_instruction = "The square root of x is the cube root of y. What is y to the power of 2, if x = 4?" - placeholder_input = "" - use_defaults = True - use_default_examples = True - task_info = "Multi-Task: Q/A, translation, Chain-of-Thought, Logical Reasoning, Summarization, etc. Best to use task prefix as trained on, e.g. `translate English to German: ` (space after colon)" - elif 'mbart-' in model_lower: - placeholder_instruction = "The girl has long hair." - placeholder_input = "" - use_defaults = True - use_default_examples = False - use_placeholder_instruction_as_example = True - elif 'gpt2' in model_lower: - placeholder_instruction = "The sky is" - placeholder_input = "" - prompt_type = prompt_type or 'plain' - use_default_examples = True # some will be odd "continuations" but can be ok - use_placeholder_instruction_as_example = True - task_info = "Auto-complete phrase, code, etc." - use_defaults = True - else: - if chat: - placeholder_instruction = "" - else: - placeholder_instruction = "Give detailed answer for whether Einstein or Newton is smarter." - placeholder_input = "" - if not prompt_type and model_lower in inv_prompt_type_to_model_lower and prompt_type != 'custom': - prompt_type = inv_prompt_type_to_model_lower[model_lower] - elif model_lower: - # default is plain, because might rely upon trust_remote_code to handle prompting - prompt_type = prompt_type or 'plain' - else: - prompt_type = '' - task_info = "No task" - if prompt_type == 'instruct': - task_info = "Answer question or follow imperative as instruction with optionally input." - elif prompt_type == 'plain': - task_info = "Auto-complete phrase, code, etc." - elif prompt_type == 'human_bot': - if chat: - task_info = "Chat (Shift-Enter to give question/imperative, input concatenated with instruction)" - else: - task_info = "Ask question/imperative (input concatenated with instruction)" - - # revert to plain if still nothing - prompt_type = prompt_type or 'plain' - if use_defaults: - temperature = 1.0 if temperature is None else temperature - top_p = 1.0 if top_p is None else top_p - top_k = 40 if top_k is None else top_k - num_beams = num_beams or 1 - max_new_tokens = max_new_tokens or 512 - repetition_penalty = repetition_penalty or 1.07 - num_return_sequences = min(num_beams, num_return_sequences or 1) - do_sample = False if do_sample is None else do_sample - else: - temperature = 0.1 if temperature is None else temperature - top_p = 0.75 if top_p is None else top_p - top_k = 40 if top_k is None else top_k - num_beams = num_beams or 1 - max_new_tokens = max_new_tokens or 1024 - repetition_penalty = repetition_penalty or 1.07 - num_return_sequences = min(num_beams, num_return_sequences or 1) - do_sample = False if do_sample is None else do_sample - # doesn't include chat, instruction_nochat, iinput_nochat, added later - params_list = ["", - stream_output, - prompt_type, prompt_dict, - temperature, top_p, top_k, num_beams, - max_new_tokens, min_new_tokens, - early_stopping, max_time, repetition_penalty, num_return_sequences, do_sample] - - if use_placeholder_instruction_as_example: - examples += [[placeholder_instruction, ''] + params_list] - - if use_default_examples: - examples += [ - ["Translate English to French", "Good morning"] + params_list, - ["Give detailed answer for whether Einstein or Newton is smarter.", ''] + params_list, - ["Explain in detailed list, all the best practices for coding in python.", ''] + params_list, - [ - "Create a markdown table with 3 rows for the primary colors, and 2 columns, with color name and hex codes.", - ''] + params_list, - ['Translate to German: My name is Arthur', ''] + params_list, - ["Please answer to the following question. Who is going to be the next Ballon d'or?", ''] + params_list, - ['Can Geoffrey Hinton have a conversation with George Washington? Give the rationale before answering.', - ''] + params_list, - ['Please answer the following question. What is the boiling point of Nitrogen?', ''] + params_list, - ['Answer the following yes/no question. Can you write a whole Haiku in a single tweet?', ''] + params_list, - ["Simplify the following expression: (False or False and True). Explain your answer.", ''] + params_list, - [ - "Premise: At my age you will probably have learnt one lesson. Hypothesis: It's not certain how many lessons you'll learn by your thirties. Does the premise entail the hypothesis?", - ''] + params_list, - ['The square root of x is the cube root of y. What is y to the power of 2, if x = 4?', ''] + params_list, - [ - 'Answer the following question by reasoning step by step. The cafeteria had 23 apples. If they used 20 for lunch, and bought 6 more, how many apple do they have?', - ''] + params_list, - ["""def area_of_rectangle(a: float, b: float): - \"\"\"Return the area of the rectangle.\"\"\"""", ''] + params_list, - ["""# a function in native python: -def mean(a): - return sum(a)/len(a) - -# the same function using numpy: -import numpy as np -def mean(a):""", ''] + params_list, - ["""X = np.random.randn(100, 100) -y = np.random.randint(0, 1, 100) - -# fit random forest classifier with 20 estimators""", ''] + params_list, - ] - # add summary example - examples += [ - [summarize_example1, 'Summarize' if prompt_type not in ['plain', 'instruct_simple'] else ''] + params_list] - - src_lang = "English" - tgt_lang = "Russian" - - # move to correct position - for example in examples: - example += [chat, '', '', LangChainMode.DISABLED.value, True, - LangChainAction.QUERY.value, [], - top_k_docs, chunk, chunk_size, DocumentSubset.Relevant.name, [], - pre_prompt_query, prompt_query, - pre_prompt_summary, prompt_summary, - system_prompt, - image_loaders, - pdf_loaders, - url_loaders, - jq_schema, - None, - None, - False, - None, - None, - docs_ordering_type, - min_max_new_tokens, - ] - # adjust examples if non-chat mode - if not chat: - example[eval_func_param_names.index('instruction_nochat')] = example[ - eval_func_param_names.index('instruction')] - example[eval_func_param_names.index('instruction')] = '' - - example[eval_func_param_names.index('iinput_nochat')] = example[eval_func_param_names.index('iinput')] - example[eval_func_param_names.index('iinput')] = '' - assert len(example) == len(eval_func_param_names), "Wrong example: %s %s" % ( - len(example), len(eval_func_param_names)) - - if prompt_type == PromptType.custom.name and not prompt_dict: - raise ValueError("Unexpected to get non-empty prompt_dict=%s for prompt_type=%s" % (prompt_dict, prompt_type)) - - # get prompt_dict from prompt_type, so user can see in UI etc., or for custom do nothing except check format - prompt_dict, error0 = get_prompt(prompt_type, prompt_dict, - chat=False, context='', reduced=False, making_context=False, return_dict=True, - system_prompt=system_prompt) - if error0: - raise RuntimeError("Prompt wrong: %s" % error0) - - return placeholder_instruction, placeholder_input, \ - stream_output, show_examples, \ - prompt_type, prompt_dict, \ - temperature, top_p, top_k, num_beams, \ - max_new_tokens, min_new_tokens, early_stopping, max_time, \ - repetition_penalty, num_return_sequences, \ - do_sample, \ - src_lang, tgt_lang, \ - examples, \ - task_info - - -def languages_covered(): - # https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt#languages-covered - covered = """Arabic (ar_AR), Czech (cs_CZ), German (de_DE), English (en_XX), Spanish (es_XX), Estonian (et_EE), Finnish (fi_FI), French (fr_XX), Gujarati (gu_IN), Hindi (hi_IN), Italian (it_IT), Japanese (ja_XX), Kazakh (kk_KZ), Korean (ko_KR), Lithuanian (lt_LT), Latvian (lv_LV), Burmese (my_MM), Nepali (ne_NP), Dutch (nl_XX), Romanian (ro_RO), Russian (ru_RU), Sinhala (si_LK), Turkish (tr_TR), Vietnamese (vi_VN), Chinese (zh_CN), Afrikaans (af_ZA), Azerbaijani (az_AZ), Bengali (bn_IN), Persian (fa_IR), Hebrew (he_IL), Croatian (hr_HR), Indonesian (id_ID), Georgian (ka_GE), Khmer (km_KH), Macedonian (mk_MK), Malayalam (ml_IN), Mongolian (mn_MN), Marathi (mr_IN), Polish (pl_PL), Pashto (ps_AF), Portuguese (pt_XX), Swedish (sv_SE), Swahili (sw_KE), Tamil (ta_IN), Telugu (te_IN), Thai (th_TH), Tagalog (tl_XX), Ukrainian (uk_UA), Urdu (ur_PK), Xhosa (xh_ZA), Galician (gl_ES), Slovene (sl_SI)""" - covered = covered.split(', ') - covered = {x.split(' ')[0]: x.split(' ')[1].replace(')', '').replace('(', '') for x in covered} - return covered - - -def score_qa(smodel, stokenizer, max_length_tokenize, question, answer, cutoff_len): - question = question[-cutoff_len:] - answer = answer[-cutoff_len:] - - inputs = stokenizer(question, answer, - return_tensors="pt", - truncation=True, - max_length=max_length_tokenize).to(smodel.device) - try: - score = torch.sigmoid(smodel(**inputs.to(smodel.device)).logits[0].float()).cpu().detach().numpy()[0] - except torch.cuda.OutOfMemoryError as e: - print("GPU OOM 3: question: %s answer: %s exception: %s" % (question, answer, str(e)), flush=True) - del inputs - traceback.print_exc() - clear_torch_cache() - return 'Response Score: GPU OOM' - except (Exception, RuntimeError) as e: - if 'Expected all tensors to be on the same device' in str(e) or \ - 'expected scalar type Half but found Float' in str(e) or \ - 'probability tensor contains either' in str(e) or \ - 'cublasLt ran into an error!' in str(e) or \ - 'device-side assert triggered' in str(e): - print("GPU Error: question: %s answer: %s exception: %s" % (question, answer, str(e)), - flush=True) - traceback.print_exc() - clear_torch_cache() - return 'Response Score: GPU Error' - else: - raise - os.environ['TOKENIZERS_PARALLELISM'] = 'true' - return score - - -def check_locals(**kwargs): - # ensure everything in evaluate is here - can_skip_because_locally_generated = no_default_param_names + [ - # get_model: - 'reward_type' - ] - for k in eval_func_param_names: - if k in can_skip_because_locally_generated: - continue - assert k in kwargs, "Missing %s" % k - for k in inputs_kwargs_list: - if k in can_skip_because_locally_generated: - continue - assert k in kwargs, "Missing %s" % k - - for k in list(inspect.signature(get_model).parameters): - if k in can_skip_because_locally_generated: - continue - assert k in kwargs, "Missing %s" % k - - -def get_model_max_length(model_state): - if not isinstance(model_state['tokenizer'], (str, type(None))): - return model_state['tokenizer'].model_max_length - else: - return 2048 - - -def get_max_max_new_tokens(model_state, **kwargs): - if not isinstance(model_state['tokenizer'], (str, type(None))): - max_max_new_tokens = model_state['tokenizer'].model_max_length - else: - max_max_new_tokens = None - - if kwargs['max_max_new_tokens'] is not None and max_max_new_tokens is not None: - return min(max_max_new_tokens, kwargs['max_max_new_tokens']) - elif kwargs['max_max_new_tokens'] is not None: - return kwargs['max_max_new_tokens'] - elif kwargs['memory_restriction_level'] == 1: - return 768 - elif kwargs['memory_restriction_level'] == 2: - return 512 - elif kwargs['memory_restriction_level'] >= 3: - return 256 - else: - # FIXME: Need to update after new model loaded, so user can control with slider - return 2048 - - -def get_minmax_top_k_docs(is_public): - if is_public: - min_top_k_docs = 1 - max_top_k_docs = 8 - label_top_k_docs = "Number of document chunks" - else: - min_top_k_docs = -1 - max_top_k_docs = 100 - label_top_k_docs = "Number of document chunks (-1 = auto fill model context)" - return min_top_k_docs, max_top_k_docs, label_top_k_docs - - -def merge_chat_conversation_history(chat_conversation1, history): - # chat_conversation and history ordered so largest index of list is most recent - if chat_conversation1: - chat_conversation1 = str_to_list(chat_conversation1) - for conv1 in chat_conversation1: - assert isinstance(conv1, (list, tuple)) - assert len(conv1) == 2 - - if isinstance(history, list): - # make copy so only local change - if chat_conversation1: - # so priority will be newest that comes from actual chat history from UI, then chat_conversation - history = chat_conversation1 + history.copy() - elif chat_conversation1: - history = chat_conversation1 - else: - history = [] - return history - - -def history_to_context(history, langchain_mode=None, - add_chat_history_to_context=None, - prompt_type=None, prompt_dict=None, chat=None, model_max_length=None, - memory_restriction_level=None, keep_sources_in_context=None, - system_prompt=None, chat_conversation=None): - """ - consumes all history up to (but not including) latest history item that is presumed to be an [instruction, None] pair - :param history: - :param langchain_mode: - :param add_chat_history_to_context: - :param prompt_type: - :param prompt_dict: - :param chat: - :param model_max_length: - :param memory_restriction_level: - :param keep_sources_in_context: - :param system_prompt: - :param chat_conversation: - :return: - """ - history = merge_chat_conversation_history(chat_conversation, history) - - if len(history) >= 1 and len(history[-1]) >= 2 and not history[-1][1]: - len_history = len(history) - 1 - else: - # full history - len_history = len(history) - - # ensure output will be unique to models - _, _, _, max_prompt_length = get_cutoffs(memory_restriction_level, - for_context=True, model_max_length=model_max_length) - context1 = '' - if max_prompt_length is not None and add_chat_history_to_context: - context1 = '' - # - 1 below because current instruction already in history from user() - for histi in range(0, len_history): - data_point = dict(instruction=history[histi][0], input='', output=history[histi][1]) - prompt, pre_response, terminate_response, chat_sep, chat_turn_sep = \ - generate_prompt(data_point, - prompt_type, - prompt_dict, - chat, - reduced=True, - making_context=True, - system_prompt=system_prompt, - histi=histi) - # md -> back to text, maybe not super important if model trained enough - if not keep_sources_in_context and langchain_mode != 'Disabled' and prompt.find(super_source_prefix) >= 0: - # FIXME: This is relatively slow even for small amount of text, like 0.3s each history item - import re - prompt = re.sub(f'{re.escape(super_source_prefix)}.*?{re.escape(super_source_postfix)}', '', prompt, - flags=re.DOTALL) - if prompt.endswith('\n

    '): - prompt = prompt[:-4] - prompt = prompt.replace('
    ', chat_turn_sep) - if not prompt.endswith(chat_turn_sep): - prompt += chat_turn_sep - # most recent first, add older if can - # only include desired chat history - if len(prompt + context1) > max_prompt_length: - break - context1 += prompt - - _, pre_response, terminate_response, chat_sep, chat_turn_sep = \ - generate_prompt({}, prompt_type, prompt_dict, - chat, reduced=True, - making_context=True, - system_prompt=system_prompt, - histi=-1) - if context1 and not context1.endswith(chat_turn_sep): - context1 += chat_turn_sep # ensure if terminates abruptly, then human continues on next line - return context1 - - -def get_limited_prompt(instruction, - iinput, - tokenizer, - prompter=None, - inference_server=None, - prompt_type=None, prompt_dict=None, chat=False, max_new_tokens=None, - system_prompt='', - context='', chat_conversation=None, text_context_list=None, - keep_sources_in_context=False, - model_max_length=None, memory_restriction_level=0, - langchain_mode=None, add_chat_history_to_context=True, - verbose=False, - doc_importance=0.5, - min_max_new_tokens=256, - ): - if prompter: - prompt_type = prompter.prompt_type - prompt_dict = prompter.prompt_dict - chat = prompter.chat - stream_output = prompter.stream_output - system_prompt = prompter.system_prompt - - # merge handles if chat_conversation is None - history = [] - history = merge_chat_conversation_history(chat_conversation, history) - history_to_context_func = functools.partial(history_to_context, - langchain_mode=langchain_mode, - add_chat_history_to_context=add_chat_history_to_context, - prompt_type=prompt_type, - prompt_dict=prompt_dict, - chat=chat, - model_max_length=model_max_length, - memory_restriction_level=memory_restriction_level, - keep_sources_in_context=keep_sources_in_context, - system_prompt=system_prompt) - context2 = history_to_context_func(history) - context1 = context - if context1 is None: - context1 = '' - - from h2oai_pipeline import H2OTextGenerationPipeline - data_point_just_instruction = dict(context='', instruction=instruction, input='') - prompt_just_instruction = prompter.generate_prompt(data_point_just_instruction) - instruction, num_instruction_tokens = H2OTextGenerationPipeline.limit_prompt(instruction, tokenizer) - num_instruction_tokens_real = get_token_count(prompt_just_instruction, tokenizer) - num_instruction_tokens += (num_instruction_tokens_real - num_instruction_tokens) - - context1, num_context1_tokens = H2OTextGenerationPipeline.limit_prompt(context1, tokenizer) - context2, num_context2_tokens = H2OTextGenerationPipeline.limit_prompt(context2, tokenizer) - iinput, num_iinput_tokens = H2OTextGenerationPipeline.limit_prompt(iinput, tokenizer) - if text_context_list is None: - text_context_list = [] - num_doc_tokens = sum([get_token_count(x + '\n\n', tokenizer) for x in text_context_list]) - - num_prompt_tokens0 = (num_instruction_tokens or 0) + \ - (num_context1_tokens or 0) + \ - (num_context2_tokens or 0) + \ - (num_iinput_tokens or 0) + \ - (num_doc_tokens or 0) - - # go down to no less than 256, about 1 paragraph - # use max_new_tokens before use num_prompt_tokens0 else would be negative or ~0 - min_max_new_tokens = min(min_max_new_tokens, max_new_tokens) - # by default assume can handle all chat and docs - chat_index = 0 - - # allowed residual is either half of what is allowed if doc exceeds half, or is rest of what doc didn't consume - num_non_doc_tokens = num_prompt_tokens0 - num_doc_tokens - # to doc first then non-doc, shouldn't matter much either way - doc_max_length = max(model_max_length - num_non_doc_tokens, doc_importance * model_max_length) - top_k_docs, one_doc_size, num_doc_tokens = get_docs_tokens(tokenizer, text_context_list=text_context_list, - max_input_tokens=doc_max_length) - non_doc_max_length = max(model_max_length - num_doc_tokens, (1.0 - doc_importance) * model_max_length) - - if num_non_doc_tokens > non_doc_max_length: - # need to limit in some way, keep portion of history but all of context and instruction - # 1) drop iinput (unusual to include anyways) - # 2) reduce history - # 3) reduce context1 - # 4) limit instruction so will fit - diff1 = non_doc_max_length - ( - num_instruction_tokens + num_context1_tokens + num_context2_tokens + min_max_new_tokens) - diff2 = non_doc_max_length - (num_instruction_tokens + num_context1_tokens + min_max_new_tokens) - diff3 = non_doc_max_length - (num_instruction_tokens + min_max_new_tokens) - diff4 = non_doc_max_length - min_max_new_tokens - if diff1 > 0: - # then should be able to do #1 - iinput = '' - num_iinput_tokens = 0 - elif diff2 > 0 > diff1: - # then may be able to do #1 + #2 - iinput = '' - num_iinput_tokens = 0 - chat_index_final = len(history) - for chat_index in range(len(history)): - # NOTE: history and chat_conversation are older for first entries - # FIXME: This is a slow for many short conversations - context2 = history_to_context_func(history[chat_index:]) - num_context2_tokens = get_token_count(context2, tokenizer) - diff1 = non_doc_max_length - ( - num_instruction_tokens + num_context1_tokens + num_context2_tokens + min_max_new_tokens) - if diff1 > 0: - chat_index_final = chat_index - if verbose: - print("chat_conversation used %d out of %d" % (chat_index, len(history)), flush=True) - break - chat_index = chat_index_final # i.e. if chat_index == len(history), then nothing can be consumed - elif diff3 > 0 > diff2: - # then may be able to do #1 + #2 + #3 - iinput = '' - num_iinput_tokens = 0 - context2 = '' - num_context2_tokens = 0 - context1, num_context1_tokens = H2OTextGenerationPipeline.limit_prompt(context1, tokenizer, - max_prompt_length=diff3) - if num_context1_tokens <= diff3: - pass - else: - print("failed to reduce", flush=True) - else: - # then must be able to do #1 + #2 + #3 + #4 - iinput = '' - num_iinput_tokens = 0 - context2 = '' - num_context2_tokens = 0 - context1 = '' - num_context1_tokens = 0 - # diff4 accounts for real prompting for instruction - # FIXME: history_to_context could include instruction, in case system prompt long, we overcount and could have more free tokens - instruction, num_instruction_tokens = H2OTextGenerationPipeline.limit_prompt(instruction, tokenizer, - max_prompt_length=diff4) - # get actual tokens - data_point_just_instruction = dict(context='', instruction=instruction, input='') - prompt_just_instruction = prompter.generate_prompt(data_point_just_instruction) - num_instruction_tokens_real = get_token_count(prompt_just_instruction, tokenizer) - num_instruction_tokens += (num_instruction_tokens_real - num_instruction_tokens) - - # update full context - context = context1 + context2 - # update token counts (docs + non-docs, all tokens) - num_prompt_tokens = (num_instruction_tokens or 0) + \ - (num_context1_tokens or 0) + \ - (num_context2_tokens or 0) + \ - (num_iinput_tokens or 0) + \ - (num_doc_tokens or 0) - - # update max_new_tokens - if inference_server and inference_server.startswith('http'): - # assume TGI/Gradio setup to consume tokens and have long output too, even if exceeds model capacity. - pass - else: - # limit so max_new_tokens = prompt + new < max - # otherwise model can fail etc. e.g. for distilgpt2 asking for 1024 tokens is enough to fail if prompt=1 token - max_new_tokens = min(max_new_tokens, model_max_length - num_prompt_tokens) - - if prompter is None: - # get prompter - debug = False - stream_output = False # doesn't matter - prompter = Prompter(prompt_type, prompt_dict, debug=debug, chat=chat, stream_output=stream_output, - system_prompt=system_prompt) - - data_point = dict(context=context, instruction=instruction, input=iinput) - # handle promptA/promptB addition if really from history. - # if not from history, then reduced=False inside correct - # if mixed, then no specific correct thing to do, so treat like history and promptA/B will come first still - context_from_history = len(history) > 0 and len(context1) > 0 - prompt = prompter.generate_prompt(data_point, context_from_history=context_from_history) - num_prompt_tokens_actual = get_token_count(prompt, tokenizer) - - return prompt, \ - instruction, iinput, context, \ - num_prompt_tokens, max_new_tokens, num_prompt_tokens0, num_prompt_tokens_actual, \ - chat_index, top_k_docs, one_doc_size - - -def get_docs_tokens(tokenizer, text_context_list=[], max_input_tokens=None): - if text_context_list is None or len(text_context_list) == 0: - return 0, None, 0 - if max_input_tokens is None: - max_input_tokens = tokenizer.model_max_length - tokens = [get_token_count(x + '\n\n', tokenizer) for x in text_context_list] - tokens_cumsum = np.cumsum(tokens) - where_res = np.where(tokens_cumsum < max_input_tokens)[0] - # if below condition fails, then keep top_k_docs=-1 and trigger special handling next - if where_res.shape[0] > 0: - top_k_docs = 1 + where_res[-1] - one_doc_size = None - num_doc_tokens = tokens_cumsum[top_k_docs - 1] # by index - else: - # if here, means 0 and just do best with 1 doc - top_k_docs = 1 - text_context_list = text_context_list[:top_k_docs] - # critical protection - from src.h2oai_pipeline import H2OTextGenerationPipeline - doc_content = text_context_list[0] - doc_content, new_tokens0 = H2OTextGenerationPipeline.limit_prompt(doc_content, - tokenizer, - max_prompt_length=max_input_tokens) - text_context_list[0] = doc_content - one_doc_size = len(doc_content) - num_doc_tokens = get_token_count(doc_content + '\n\n', tokenizer) - print("Unexpected large chunks and can't add to context, will add 1 anyways. Tokens %s -> %s" % ( - tokens[0], new_tokens0), flush=True) - return top_k_docs, one_doc_size, num_doc_tokens - - -def entrypoint_main(): - """ - Examples: - - WORLD_SIZE=4 CUDA_VISIBLE_DEVICES="0,1,2,3" torchrun --nproc_per_node=4 --master_port=1234 generate.py --base_model='EleutherAI/gpt-j-6B' --lora_weights=lora-alpaca_6B - python generate.py --base_model='EleutherAI/gpt-j-6B' --lora_weights='lora-alpaca_6B' - python generate.py --base_model='EleutherAI/gpt-neox-20b' --lora_weights='lora-alpaca_20B' - - # generate without lora weights, no prompt - python generate.py --base_model='EleutherAI/gpt-neox-20b' --prompt_type='plain' - python generate.py --base_model='togethercomputer/GPT-NeoXT-Chat-Base-20B' --prompt_type='dai_faq' - - python generate.py --base_model='togethercomputer/GPT-NeoXT-Chat-Base-20B' --prompt_type='dai_faq' --lora_weights='lora_20B_daifaq' - # OpenChatKit settings: - python generate.py --base_model='togethercomputer/GPT-NeoXT-Chat-Base-20B' --prompt_type='human_bot --debug=True --num_beams=1 --temperature=0.6 --top_k=40 --top_p=1.0 - - python generate.py --base_model='distilgpt2' --prompt_type='plain' --debug=True --num_beams=1 --temperature=0.6 --top_k=40 --top_p=1.0 --share=False - python generate.py --base_model='t5-large' --prompt_type='simple_instruct' - python generate.py --base_model='philschmid/bart-large-cnn-samsum' - python generate.py --base_model='philschmid/flan-t5-base-samsum' - python generate.py --base_model='facebook/mbart-large-50-many-to-many-mmt' - - python generate.py --base_model='togethercomputer/GPT-NeoXT-Chat-Base-20B' --prompt_type='human_bot' --lora_weights='GPT-NeoXT-Chat-Base-20B.merged.json.8_epochs.57b2892c53df5b8cefac45f84d019cace803ef26.28' - - must have 4*48GB GPU and run without 8bit in order for sharding to work with use_gpu_id=False - can also pass --prompt_type='human_bot' and model can somewhat handle instructions without being instruct tuned - python generate.py --base_model=decapoda-research/llama-65b-hf --load_8bit=False --use_gpu_id=False --prompt_type='human_bot' - - python generate.py --base_model=h2oai/h2ogpt-oig-oasst1-512-6_9b - """ - H2O_Fire(main) - - -if __name__ == "__main__": - entrypoint_main() diff --git a/spaces/hjzhp/cgpt-online/src/components/icons/Env.tsx b/spaces/hjzhp/cgpt-online/src/components/icons/Env.tsx deleted file mode 100644 index 8dc4dd2ba7569e29fd6c85578da146a775e793dd..0000000000000000000000000000000000000000 --- a/spaces/hjzhp/cgpt-online/src/components/icons/Env.tsx +++ /dev/null @@ -1,5 +0,0 @@ -export default () => { - return ( - - ) -} diff --git a/spaces/hlydecker/RA-document-QAchat/streamlit_langchain_chat/dataset.py b/spaces/hlydecker/RA-document-QAchat/streamlit_langchain_chat/dataset.py deleted file mode 100644 index 0ef2301751f9a474fbe95fcb02b9e5860dd49b0c..0000000000000000000000000000000000000000 --- a/spaces/hlydecker/RA-document-QAchat/streamlit_langchain_chat/dataset.py +++ /dev/null @@ -1,719 +0,0 @@ -import time -from dataclasses import dataclass -from datetime import datetime -from functools import reduce -import json -import os -from pathlib import Path -import re -import requests -from requests.models import MissingSchema -import sys -from typing import List, Optional, Tuple, Dict, Callable, Any - -from bs4 import BeautifulSoup -import docx -from html2text import html2text -import langchain -from langchain.callbacks import get_openai_callback -from langchain.cache import SQLiteCache -from langchain.chains import LLMChain -from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT -from langchain.chat_models import ChatOpenAI -from langchain.chat_models.base import BaseChatModel -from langchain.document_loaders import PyPDFLoader, PyMuPDFLoader -from langchain.embeddings.base import Embeddings -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.llms import OpenAI -from langchain.llms.base import LLM, BaseLLM -from langchain.prompts.chat import AIMessagePromptTemplate -from langchain.text_splitter import TokenTextSplitter, RecursiveCharacterTextSplitter -from langchain.vectorstores import Pinecone as OriginalPinecone -import numpy as np -import openai -import pinecone -from pptx import Presentation -from pypdf import PdfReader -import trafilatura - -from streamlit_langchain_chat.constants import * -from streamlit_langchain_chat.customized_langchain.vectorstores import FAISS -from streamlit_langchain_chat.customized_langchain.vectorstores import Pinecone -from streamlit_langchain_chat.utils import maybe_is_text, maybe_is_truncated -from streamlit_langchain_chat.prompts import * - - -if REUSE_ANSWERS: - CACHE_PATH = TEMP_DIR / "llm_cache.db" - os.makedirs(os.path.dirname(CACHE_PATH), exist_ok=True) - langchain.llm_cache = SQLiteCache(str(CACHE_PATH)) - -# option 1 -TextSplitter = TokenTextSplitter -# option 2 -# TextSplitter = RecursiveCharacterTextSplitter # usado por gpt4_pdf_chatbot_langchain (aka GPCL) - - -@dataclass -class Answer: - """A class to hold the answer to a question.""" - question: str = "" - answer: str = "" - context: str = "" - chunks: str = "" - packages: List[Any] = None - references: str = "" - cost_str: str = "" - passages: Dict[str, str] = None - tokens: List[Dict] = None - - def __post_init__(self): - """Initialize the answer.""" - if self.packages is None: - self.packages = [] - if self.passages is None: - self.passages = {} - - def __str__(self) -> str: - """Return the answer as a string.""" - return self.answer - - -def parse_docx(path, citation, key, chunk_chars=2000, overlap=50): - try: - document = docx.Document(path) - fullText = [] - for paragraph in document.paragraphs: - fullText.append(paragraph.text) - doc = '\n'.join(fullText) + '\n' - except Exception as e: - print(f"code_error: {e}") - sys.exit(1) - - if doc: - text_splitter = TextSplitter(chunk_size=chunk_chars, chunk_overlap=overlap) - texts = text_splitter.split_text(doc) - return texts, [dict(citation=citation, dockey=key, key=key)] * len(texts) - else: - return [], [] - - -# https://langchain.readthedocs.io/en/stable/modules/document_loaders/examples/pdf.html -def parse_pdf(path, citation, key, chunk_chars=2000, overlap=50): - pdfFileObj = open(path, "rb") - pdfReader = PdfReader(pdfFileObj) - splits = [] - split = "" - pages = [] - metadatas = [] - for i, page in enumerate(pdfReader.pages): - split += page.extract_text() - pages.append(str(i + 1)) - while len(split) > chunk_chars: - splits.append(split[:chunk_chars]) - # pretty formatting of pages (e.g. 1-3, 4, 5-7) - pg = "-".join([pages[0], pages[-1]]) - metadatas.append( - dict( - citation=citation, - dockey=key, - key=f"{key} pages {pg}", - ) - ) - split = split[chunk_chars - overlap:] - pages = [str(i + 1)] - if len(split) > overlap: - splits.append(split[:chunk_chars]) - pg = "-".join([pages[0], pages[-1]]) - metadatas.append( - dict( - citation=citation, - dockey=key, - key=f"{key} pages {pg}", - ) - ) - pdfFileObj.close() - - # # ### option 2. PyPDFLoader - # loader = PyPDFLoader(path) - # data = loader.load_and_split() - # # ### option 2.1. PyPDFLoader usado por GPCL, aunque luego usa el - # loader = PyPDFLoader(path) - # rawDocs = loader.load() - # text_splitter = TextSplitter(chunk_size=chunk_chars, chunk_overlap=overlap) - # texts = text_splitter.split_documents(rawDocs) - # # ### option 3. PDFMiner. Este parece la mejor opcion - # loader = PyMuPDFLoader(path) - # data = loader.load() - return splits, metadatas - - -def parse_pptx(path, citation, key, chunk_chars=2000, overlap=50): - try: - presentation = Presentation(path) - fullText = [] - for slide in presentation.slides: - for shape in slide.shapes: - if hasattr(shape, "text"): - fullText.append(shape.text) - doc = ''.join(fullText) - - if doc: - text_splitter = TextSplitter(chunk_size=chunk_chars, chunk_overlap=overlap) - texts = text_splitter.split_text(doc) - return texts, [dict(citation=citation, dockey=key, key=key)] * len(texts) - else: - return [], [] - - except Exception as e: - print(f"code_error: {e}") - sys.exit(1) - - -def parse_txt(path, citation, key, chunk_chars=2000, overlap=50, html=False): - try: - with open(path) as f: - doc = f.read() - except UnicodeDecodeError as e: - with open(path, encoding="utf-8", errors="ignore") as f: - doc = f.read() - if html: - doc = html2text(doc) - text_splitter = TextSplitter(chunk_size=chunk_chars, chunk_overlap=overlap) - texts = text_splitter.split_text(doc) - return texts, [dict(citation=citation, dockey=key, key=key)] * len(texts) - - -def parse_url(url: str, citation, key, chunk_chars=2000, overlap=50): - def beautifulsoup_extract_text_fallback(response_content): - """ - This is a fallback function, so that we can always return a value for text content. - Even for when both Trafilatura and BeautifulSoup are unable to extract the text from a - single URL. - """ - - # Create the beautifulsoup object: - soup = BeautifulSoup(response_content, 'html.parser') - - # Finding the text: - text = soup.find_all(text=True) - - # Remove unwanted tag elements: - cleaned_text = '' - blacklist = [ - '[document]', - 'noscript', - 'header', - 'html', - 'meta', - 'head', - 'input', - 'script', - 'style', ] - - # Then we will loop over every item in the extract text and make sure that the beautifulsoup4 tag - # is NOT in the blacklist - for item in text: - if item.parent.name not in blacklist: - cleaned_text += f'{item} ' # cleaned_text += '{} '.format(item) - - # Remove any tab separation and strip the text: - cleaned_text = cleaned_text.replace('\t', '') - return cleaned_text.strip() - - def extract_text_from_single_web_page(url): - print(f"\n===========\n{url=}\n===========\n") - downloaded_url = trafilatura.fetch_url(url) - a = None - try: - a = trafilatura.extract(downloaded_url, - output_format='json', - with_metadata=True, - include_comments=False, - date_extraction_params={'extensive_search': True, - 'original_date': True}) - except AttributeError: - a = trafilatura.extract(downloaded_url, - output_format='json', - with_metadata=True, - date_extraction_params={'extensive_search': True, - 'original_date': True}) - except Exception as e: - print(f"code_error: {e}") - - if a: - json_output = json.loads(a) - return json_output['text'] - else: - try: - headers = {'User-Agent': 'Chrome/83.0.4103.106'} - resp = requests.get(url, headers=headers) - print(f"{resp=}\n") - # We will only extract the text from successful requests: - if resp.status_code == 200: - return beautifulsoup_extract_text_fallback(resp.content) - else: - # This line will handle for any failures in both the Trafilature and BeautifulSoup4 functions: - return np.nan - # Handling for any URLs that don't have the correct protocol - except MissingSchema: - return np.nan - - text_to_split = extract_text_from_single_web_page(url) - text_splitter = TextSplitter(chunk_size=chunk_chars, chunk_overlap=overlap) - texts = text_splitter.split_text(text_to_split) - return texts, [dict(citation=citation, dockey=key, key=key)] * len(texts) - - -def read_source(path: str = None, - citation: str = None, - key: str = None, - chunk_chars: int = 3000, - overlap: int = 100, - disable_check: bool = False): - if path.endswith(".pdf"): - return parse_pdf(path, citation, key, chunk_chars, overlap) - elif path.endswith(".txt"): - return parse_txt(path, citation, key, chunk_chars, overlap) - elif path.endswith(".html"): - return parse_txt(path, citation, key, chunk_chars, overlap, html=True) - elif path.endswith(".docx"): - return parse_docx(path, citation, key, chunk_chars, overlap) - elif path.endswith(".pptx"): - return parse_pptx(path, citation, key, chunk_chars, overlap) - elif path.startswith("http://") or path.startswith("https://"): - return parse_url(path, citation, key, chunk_chars, overlap) - # WIP - #else: - # return parse_code_txt(path, citation, key, chunk_chars, overlap) - else: - raise "unknown extension" - - -class Dataset: - """A collection of documents to be used for answering questions.""" - def __init__( - self, - chunk_size_limit: int = 3000, - llm: Optional[BaseLLM] | Optional[BaseChatModel] = None, - summary_llm: Optional[BaseLLM] = None, - name: str = "default", - index_path: Optional[Path] = None, - ) -> None: - """Initialize the collection of documents. - - Args: - chunk_size_limit: The maximum number of characters to use for a single chunk of text. - llm: The language model to use for answering questions. Default - OpenAI chat-gpt-turbo - summary_llm: The language model to use for summarizing documents. If None, llm is used. - name: The name of the collection. - index_path: The path to the index file IF pickled. If None, defaults to using name in $HOME/.paperqa/name - """ - self.docs = dict() - self.keys = set() - self.chunk_size_limit = chunk_size_limit - - self.index_docstore = None - - if llm is None: - llm = ChatOpenAI(temperature=0.1, max_tokens=512) - if summary_llm is None: - summary_llm = llm - self.update_llm(llm, summary_llm) - - if index_path is None: - index_path = TEMP_DIR / name - self.index_path = index_path - self.name = name - - def update_llm(self, llm: BaseLLM | ChatOpenAI, summary_llm: Optional[BaseLLM] = None) -> None: - """Update the LLM for answering questions.""" - self.llm = llm - if summary_llm is None: - summary_llm = llm - self.summary_llm = summary_llm - self.summary_chain = LLMChain(prompt=chat_summary_prompt, llm=summary_llm) - self.search_chain = LLMChain(prompt=search_prompt, llm=llm) - self.cite_chain = LLMChain(prompt=citation_prompt, llm=llm) - - def add( - self, - path: str, - citation: Optional[str] = None, - key: Optional[str] = None, - disable_check: bool = False, - chunk_chars: Optional[int] = 3000, - ) -> None: - """Add a document to the collection.""" - - if path in self.docs: - print(f"Document {path} already in collection.") - return None - - if citation is None: - # peak first chunk - texts, _ = read_source(path, "", "", chunk_chars=chunk_chars) - with get_openai_callback() as cb: - citation = self.cite_chain.run(texts[0]) - if len(citation) < 3 or "Unknown" in citation or "insufficient" in citation: - citation = f"Unknown, {os.path.basename(path)}, {datetime.now().year}" - - if key is None: - # get first name and year from citation - try: - author = re.search(r"([A-Z][a-z]+)", citation).group(1) - except AttributeError: - # panicking - no word?? - raise ValueError( - f"Could not parse key from citation {citation}. Consider just passing key explicitly - e.g. docs.py (path, citation, key='mykey')" - ) - try: - year = re.search(r"(\d{4})", citation).group(1) - except AttributeError: - year = "" - key = f"{author}{year}" - suffix = "" - while key + suffix in self.keys: - # move suffix to next letter - if suffix == "": - suffix = "a" - else: - suffix = chr(ord(suffix) + 1) - key += suffix - self.keys.add(key) - - texts, metadata = read_source(path, citation, key, chunk_chars=chunk_chars) - # loose check to see if document was loaded - # - if len("".join(texts)) < 10 or ( - not disable_check and not maybe_is_text("".join(texts)) - ): - raise ValueError( - f"This does not look like a text document: {path}. Path disable_check to ignore this error." - ) - - self.docs[path] = dict(texts=texts, metadata=metadata, key=key) - if self.index_docstore is not None: - self.index_docstore.add_texts(texts, metadatas=metadata) - - def clear(self) -> None: - """Clear the collection of documents.""" - self.docs = dict() - self.keys = set() - self.index_docstore = None - # delete index file - pkl = self.index_path / "index.pkl" - if pkl.exists(): - pkl.unlink() - fs = self.index_path / "index.faiss" - if fs.exists(): - fs.unlink() - - @property - def doc_previews(self) -> List[Tuple[int, str, str]]: - """Return a list of tuples of (key, citation) for each document.""" - return [ - ( - len(doc["texts"]), - doc["metadata"][0]["dockey"], - doc["metadata"][0]["citation"], - ) - for doc in self.docs.values() - ] - - # to pickle, we have to save the index as a file - def __getstate__(self, embedding: Embeddings): - if embedding is None: - embedding = OpenAIEmbeddings() - if self.index_docstore is None and len(self.docs) > 0: - self._build_faiss_index(embedding) - state = self.__dict__.copy() - if self.index_docstore is not None: - state["_index"].save_local(self.index_path) - del state["_index"] - # remove LLMs (they can have callbacks, which can't be pickled) - del state["summary_chain"] - del state["qa_chain"] - del state["cite_chain"] - del state["search_chain"] - return state - - def __setstate__(self, state): - self.__dict__.update(state) - try: - self.index_docstore = FAISS.load_local(self.index_path, OpenAIEmbeddings()) - except: - # they use some special exception type, but I don't want to import it - self.index_docstore = None - self.update_llm( - ChatOpenAI(temperature=0.1, max_tokens=512) - ) - - def _build_faiss_index(self, embedding: Embeddings = None): - if embedding is None: - embedding = OpenAIEmbeddings() - if self.index_docstore is None: - texts = reduce( - lambda x, y: x + y, [doc["texts"] for doc in self.docs.values()], [] - ) - metadatas = reduce( - lambda x, y: x + y, [doc["metadata"] for doc in self.docs.values()], [] - ) - - # if the index exists, load it - if LOAD_INDEX_LOCALLY and (self.index_path / "index.faiss").exists(): - self.index_docstore = FAISS.load_local(self.index_path, embedding) - - # search if the text and metadata already existed in the index - for i in reversed(range(len(texts))): - text = texts[i] - metadata = metadatas[i] - for key, value in self.index_docstore.docstore.dict_.items(): - if value.page_content == text: - if value.metadata.get('citation').split(os.sep)[-1] != metadata.get('citation').split(os.sep)[-1]: - self.index_docstore.docstore.dict_[key].metadata['citation'] = metadata.get('citation').split(os.sep)[-1] - self.index_docstore.docstore.dict_[key].metadata['dockey'] = metadata.get('citation').split(os.sep)[-1] - self.index_docstore.docstore.dict_[key].metadata['key'] = metadata.get('citation').split(os.sep)[-1] - texts.pop(i) - metadatas.pop(i) - - # add remaining texts - if texts: - self.index_docstore.add_texts(texts=texts, metadatas=metadatas) - else: - # crete new index - self.index_docstore = FAISS.from_texts(texts, embedding, metadatas=metadatas) - # - - if SAVE_INDEX_LOCALLY: - # save index. - self.index_docstore.save_local(self.index_path) - - def _build_pinecone_index(self, embedding: Embeddings = None): - if embedding is None: - embedding = OpenAIEmbeddings() - if self.index_docstore is None: - pinecone.init( - api_key=os.environ['PINECONE_API_KEY'], # find at app.pinecone.io - environment=os.environ['PINECONE_ENVIRONMENT'] # next to api key in console - ) - texts = reduce( - lambda x, y: x + y, [doc["texts"] for doc in self.docs.values()], [] - ) - metadatas = reduce( - lambda x, y: x + y, [doc["metadata"] for doc in self.docs.values()], [] - ) - - index_name = "langchain-demo1" - - # if the index exists, delete it - if index_name in pinecone.list_indexes(): - pinecone.delete_index(index_name) - - # create new index - if openai.api_type == 'azure': - self.index_docstore = Pinecone.from_texts(texts, embedding, metadatas=metadatas, index_name=index_name) - else: - self.index_docstore = OriginalPinecone.from_texts(texts, embedding, metadatas=metadatas, index_name=index_name) - - def get_evidence( - self, - answer: Answer, - embedding: Embeddings, - k: int = 3, - max_sources: int = 5, - marginal_relevance: bool = True, - ) -> str: - if self.index_docstore is None: - self._build_faiss_index(embedding) - - init_search_time = time.time() - - # want to work through indices but less k - if marginal_relevance: - docs = self.index_docstore.max_marginal_relevance_search( - answer.question, k=k, fetch_k=5 * k - ) - else: - docs = self.index_docstore.similarity_search( - answer.question, k=k, fetch_k=5 * k - ) - if OPERATING_MODE == "debug": - print(f"time to search docs to build context: {time.time() - init_search_time:.2f} [s]") - init_summary_time = time.time() - partial_summary_time = "" - for i, doc in enumerate(docs): - with get_openai_callback() as cb: - init__partial_summary_time = time.time() - summary_of_chunked_text = self.summary_chain.run( - question=answer.question, context_str=doc.page_content - ) - if OPERATING_MODE == "debug": - partial_summary_time += f"- time to make relevant summary of doc '{i}': {time.time() - init__partial_summary_time:.2f} [s]\n" - engine = self.summary_chain.llm.model_kwargs.get('deployment_id') or self.summary_chain.llm.model_name - if not answer.tokens: - answer.tokens = [{ - 'engine': engine, - 'total_tokens': cb.total_tokens}] - else: - answer.tokens.append({ - 'engine': engine, - 'total_tokens': cb.total_tokens - }) - summarized_package = ( - doc.metadata["key"], - doc.metadata["citation"], - summary_of_chunked_text, - doc.page_content, - ) - if "Not applicable" not in summary_of_chunked_text and summarized_package not in answer.packages: - answer.packages.append(summarized_package) - yield answer - if len(answer.packages) == max_sources: - break - if OPERATING_MODE == "debug": - print(f"time to make all relevant summaries: {time.time() - init_summary_time:.2f} [s]") - print(partial_summary_time[:-1]) - context_str = "\n\n".join( - [f"{citation}: {summary_of_chunked_text}" - for key, citation, summary_of_chunked_text, chunked_text in answer.packages - if "Not applicable" not in summary_of_chunked_text] - ) - chunks_str = "\n\n".join( - [f"{citation}: {chunked_text}" - for key, citation, summary_of_chunked_text, chunked_text in answer.packages - if "Not applicable" not in summary_of_chunked_text] - ) - valid_keys = [key - for key, citation, summary_of_chunked_text, chunked_textin in answer.packages - if "Not applicable" not in summary_of_chunked_text] - if len(valid_keys) > 0: - context_str += "\n\nValid keys: " + ", ".join(valid_keys) - chunks_str += "\n\nValid keys: " + ", ".join(valid_keys) - answer.context = context_str - answer.chunks = chunks_str - yield answer - - def query( - self, - query: str, - embedding: Embeddings, - chat_history: list[tuple[str, str]], - k: int = 10, - max_sources: int = 5, - length_prompt: str = "about 100 words", - marginal_relevance: bool = True, - ): - for answer in self._query( - query, - embedding, - chat_history, - k=k, - max_sources=max_sources, - length_prompt=length_prompt, - marginal_relevance=marginal_relevance, - ): - pass - return answer - - def _query( - self, - query: str, - embedding: Embeddings, - chat_history: list[tuple[str, str]], - k: int, - max_sources: int, - length_prompt: str, - marginal_relevance: bool, - ): - if k < max_sources: - k = max_sources + 1 - - answer = Answer(question=query) - - messages_qa = [system_message_prompt] - if len(chat_history) != 0: - for conversation in chat_history: - messages_qa.append(HumanMessagePromptTemplate.from_template(conversation[0])) - messages_qa.append(AIMessagePromptTemplate.from_template(conversation[1])) - messages_qa.append(human_qa_message_prompt) - chat_qa_prompt = ChatPromptTemplate.from_messages(messages_qa) - self.qa_chain = LLMChain(prompt=chat_qa_prompt, llm=self.llm) - - for answer in self.get_evidence( - answer, - embedding, - k=k, - max_sources=max_sources, - marginal_relevance=marginal_relevance, - ): - yield answer - - references_dict = dict() - passages = dict() - if len(answer.context) < 10: - answer_text = "I cannot answer this question due to insufficient information." - else: - with get_openai_callback() as cb: - init_qa_time = time.time() - answer_text = self.qa_chain.run( - question=answer.question, context_str=answer.context, length=length_prompt - ) - if OPERATING_MODE == "debug": - print(f"time to make the Q&A answer: {time.time() - init_qa_time:.2f} [s]") - engine = self.qa_chain.llm.model_kwargs.get('deployment_id') or self.qa_chain.llm.model_name - if not answer.tokens: - answer.tokens = [{ - 'engine': engine, - 'total_tokens': cb.total_tokens}] - else: - answer.tokens.append({ - 'engine': engine, - 'total_tokens': cb.total_tokens - }) - - # it still happens ulol - if "(Foo2012)" in answer_text: - answer_text = answer_text.replace("(Foo2012)", "") - for key, citation, summary, text in answer.packages: - # do check for whole key (so we don't catch Callahan2019a with Callahan2019) - skey = key.split(" ")[0] - if skey + " " in answer_text or skey + ")" in answer_text: - references_dict[skey] = citation - passages[key] = text - references_str = "\n\n".join( - [f"{i+1}. ({k}): {c}" for i, (k, c) in enumerate(references_dict.items())] - ) - - # cost_str = f"{answer_text}\n\n" - cost_str = "" - itemized_cost = "" - total_amount = 0 - for d in answer.tokens: - total_tokens = d.get('total_tokens') - if total_tokens: - engine = d.get('engine') - key_price = None - for key in PRICES.keys(): - if re.match(f"{key}", engine): - key_price = key - break - if PRICES.get(key_price): - partial_amount = total_tokens / 1000 * PRICES.get(key_price) - total_amount += partial_amount - itemized_cost += f"- {engine}: {total_tokens} tokens\t ---> ${partial_amount:.4f},\n" - else: - itemized_cost += f"- {engine}: {total_tokens} tokens,\n" - # delete ,\n - itemized_cost = itemized_cost[:-2] - - # add tokens to formatted answer - cost_str += f"Total cost: ${total_amount:.4f}\nItemized cost:\n{itemized_cost}" - - answer.answer = answer_text - answer.cost_str = cost_str - answer.references = references_str - answer.passages = passages - yield answer - - diff --git a/spaces/hpratapsingh/Movie_Recommendation_system/README.md b/spaces/hpratapsingh/Movie_Recommendation_system/README.md deleted file mode 100644 index dcf9df1d4623918077c9bc1732d01d58fcd1dc01..0000000000000000000000000000000000000000 --- a/spaces/hpratapsingh/Movie_Recommendation_system/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Movie Recommendation System -emoji: ⚡ -colorFrom: purple -colorTo: blue -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/huggingface/Model_Cards_Writing_Tool/current_card.md b/spaces/huggingface/Model_Cards_Writing_Tool/current_card.md deleted file mode 100644 index ec8fe14efadc5cca91c2dfb3833e2f0e54e124d3..0000000000000000000000000000000000000000 --- a/spaces/huggingface/Model_Cards_Writing_Tool/current_card.md +++ /dev/null @@ -1,222 +0,0 @@ ---- -{{card_data}} ---- - -# {{ model_id }} - - Provide a quick summary of what the model is/does. - -# Table of Contents - -- [{{ model_id }}](#-model_id-) -- [Table of Contents](#table-of-contents) -- [Model Details](#model-details) - - [Model Description](#model-description) -- [Uses](#uses) - - [Direct Use](#direct-use) - - [Downstream Use [Optional]](#downstream-use-optional) - - [Out-of-Scope Use](#out-of-scope-use) -- [Bias, Risks, and Limitations](#bias-risks-and-limitations) - - [Recommendations](#recommendations) -- [Training Details](#training-details) - - [Training Data](#training-data) - - [Training Procedure](#training-procedure) - - [Preprocessing](#preprocessing) - - [Speeds, Sizes, Times](#speeds-sizes-times) -- [Evaluation](#evaluation) - - [Testing Data, Factors & Metrics](#testing-data-factors--metrics) - - [Testing Data](#testing-data) - - [Factors](#factors) - - [Metrics](#metrics) - - [Results](#results) -- [Model Examination](#model-examination) -- [Environmental Impact](#environmental-impact) -- [Technical Specifications [optional]](#technical-specifications-optional) - - [Model Architecture and Objective](#model-architecture-and-objective) - - [Compute Infrastructure](#compute-infrastructure) - - [Hardware](#hardware) - - [Software](#software) -- [Citation](#citation) -- [Glossary [optional]](#glossary-optional) -- [More Information [optional]](#more-information-optional) -- [Model Card Authors [optional]](#model-card-authors-optional) -- [Model Card Contact](#model-card-contact) -- [How to Get Started with the Model](#how-to-get-started-with-the-model) - - -# Model Details - -## Model Description - - This section provides basic information about what the model is, its current status, and where it came from.. -{{ the_model_description | default("More information needed", true)}} - -- **Developed by:** {{ developers | default("More information needed", true)}} -- **Shared by [Optional]:** {{ shared_by | default("More information needed", true)}} -- **Model type:** Language model -- **Language(s) (NLP):** {{ language | default("More information needed", true)}} -- **License:** {{ license | default("More information needed", true)}} -- **Related Models:** {{ related_models | default("More information needed", true)}} - - **Parent Model:** {{ parent_model | default("More information needed", true)}} -- **Resources for more information:** {{ more_resources | default("More information needed", true)}} - -# Uses - - Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. - -## Direct Use - - This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. - -{{ direct_use | default("More information needed", true)}} - -## Downstream Use [Optional] - - This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app - -{{ downstream_use | default("More information needed", true)}} - -## Out-of-Scope Use - - This section addresses misuse, malicious use, and uses that the model will not work well for. - -{{ out_of_scope_use | default("More information needed", true)}} - -# Bias, Risks, and Limitations - - This section is meant to convey both technical and sociotechnical limitations. - -{{ bias_risks_limitations | default("More information needed", true)}} - -## Recommendations - - This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. - -{{ bias_recommendations | default("Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recomendations.", true)}} - -# Training Details - -## Training Data - - This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. - -{{ training_data | default("More information needed", true)}} - -## Training Procedure - - This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. - -### Preprocessing - -{{ preprocessing | default("More information needed", true)}} - -### Speeds, Sizes, Times - - This section provides information about throughput, start/end time, checkpoint size if relevant, etc. - -{{ speeds_sizes_times | default("More information needed", true)}} - -# Evaluation - - This section describes the evaluation protocols and provides the results. - -## Testing Data, Factors & Metrics - -### Testing Data - - This should link to a Data Card if possible. - -{{ testing_data | default("More information needed", true)}} - -### Factors - - These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. - -{{ testing_factors | default("More information needed", true)}} - -### Metrics - - These are the evaluation metrics being used, ideally with a description of why. - -{{ testing_metrics | default("More information needed", true)}} - -## Results - -{{ results | default("More information needed", true)}} - -# Model Examination - -{{ model_examination | default("More information needed", true)}} - -# Environmental Impact - - Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly - -Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - -- **Hardware Type:** {{ hardware | default("More information needed", true)}} -- **Hours used:** {{ hours_used | default("More information needed", true)}} -- **Cloud Provider:** {{ cloud_provider | default("More information needed", true)}} -- **Compute Region:** {{ cloud_region | default("More information needed", true)}} -- **Carbon Emitted:** {{ co2_emitted | default("More information needed", true)}} - -# Technical Specifications [optional] - -## Model Architecture and Objective - -{{ model_specs | default("More information needed", true)}} - -## Compute Infrastructure - -{{ compute_infrastructure | default("More information needed", true)}} - -### Hardware - -{{ hardware | default("More information needed", true)}} - -### Software - -{{ software | default("More information needed", true)}} - -# Citation - - If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. - -**BibTeX:** - -{{ citation_bibtex | default("More information needed", true)}} - -**APA:** - -{{ citation_apa | default("More information needed", true)}} - -# Glossary [optional] - - If relevant, include terms and calculations in this section that can help readers understand the model or model card. - -{{ glossary | default("More information needed", true)}} - -# More Information [optional] - -{{ more_information | default("More information needed", true)}} - -# Model Card Authors [optional] - -{{ model_card_authors | default("More information needed", true)}} - -# Model Card Contact - -{{ model_card_contact | default("More information needed", true)}} - -# How to Get Started with the Model - -Use the code below to get started with the model. - -

    - Click to expand - -{{ get_started_code | default("More information needed", true)}} - -
    - - diff --git a/spaces/huy-ha/semabs-relevancy/CLIP/clip/clip_explainability.py b/spaces/huy-ha/semabs-relevancy/CLIP/clip/clip_explainability.py deleted file mode 100644 index 6804610a0019aae8c10528029c58db6a10114127..0000000000000000000000000000000000000000 --- a/spaces/huy-ha/semabs-relevancy/CLIP/clip/clip_explainability.py +++ /dev/null @@ -1,273 +0,0 @@ -# modified from: https://github.com/hila-chefer/Transformer-MM-Explainability/blob/main/CLIP/clip/clip.py - -import hashlib -import os -import urllib -import warnings -from typing import Any, Union, List -from pkg_resources import packaging - -import torch -from PIL import Image -from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize -from tqdm import tqdm - -from .model_explainability import build_model -from .simple_tokenizer import SimpleTokenizer as _Tokenizer - -try: - from torchvision.transforms import InterpolationMode - - BICUBIC = InterpolationMode.BICUBIC -except ImportError: - BICUBIC = Image.BICUBIC - - -if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"): - warnings.warn("PyTorch version 1.7.1 or higher is recommended") - - -__all__ = ["available_models", "load", "tokenize"] -_tokenizer = _Tokenizer() - - -_MODELS = { - "RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt", - "RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt", - "RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt", - "RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt", - "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", - "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt", - "ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", - "ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt", -} - - -def _download(url: str, root: str): - os.makedirs(root, exist_ok=True) - filename = os.path.basename(url) - - expected_sha256 = url.split("/")[-2] - download_target = os.path.join(root, filename) - - if os.path.exists(download_target) and not os.path.isfile(download_target): - raise RuntimeError(f"{download_target} exists and is not a regular file") - - if os.path.isfile(download_target): - if ( - hashlib.sha256(open(download_target, "rb").read()).hexdigest() - == expected_sha256 - ): - return download_target - else: - warnings.warn( - f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" - ) - - with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: - with tqdm( - total=int(source.info().get("Content-Length")), - ncols=80, - unit="iB", - unit_scale=True, - unit_divisor=1024, - ) as loop: - while True: - buffer = source.read(8192) - if not buffer: - break - - output.write(buffer) - loop.update(len(buffer)) - - if ( - hashlib.sha256(open(download_target, "rb").read()).hexdigest() - != expected_sha256 - ): - raise RuntimeError( - f"Model has been downloaded but the SHA256 checksum does not not match" - ) - - return download_target - - -def _convert_image_to_rgb(image): - return image.convert("RGB") - - -def _transform(n_px, overload_resolution=False): - transforms = [ - _convert_image_to_rgb, - ToTensor(), - Normalize( - (0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711) - ), - ] - if not overload_resolution: - transforms = [Resize(224, interpolation=BICUBIC), CenterCrop(n_px)] + transforms - return Compose(transforms) - - -def available_models() -> List[str]: - """Returns the names of available CLIP models""" - return list(_MODELS.keys()) - - -def load( - name: str, - device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", - jit: bool = False, - download_root: str = None, - overload_resolution=False, -): - """Load a CLIP model - Parameters - ---------- - name : str - A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict - device : Union[str, torch.device] - The device to put the loaded model - jit : bool - Whether to load the optimized JIT model or more hackable non-JIT model (default). - download_root: str - path to download the model files; by default, it uses "~/.cache/clip" - Returns - ------- - model : torch.nn.Module - The CLIP model - preprocess : Callable[[PIL.Image], torch.Tensor] - A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input - """ - if name in _MODELS: - model_path = _download( - _MODELS[name], download_root or os.path.expanduser("~/.cache/clip") - ) - elif os.path.isfile(name): - model_path = name - else: - raise RuntimeError( - f"Model {name} not found; available models = {available_models()}" - ) - - try: - # loading JIT archive - model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval() - state_dict = None - except RuntimeError: - # loading saved state dict - if jit: - warnings.warn( - f"File {model_path} is not a JIT archive. Loading as a state dict instead" - ) - jit = False - state_dict = torch.load(model_path, map_location="cpu") - - if not jit: - model = build_model(state_dict or model.state_dict()).to(device) - if str(device) == "cpu": - model.float() - return model, _transform(model.visual.input_resolution, overload_resolution) - - # patch the device names - device_holder = torch.jit.trace( - lambda: torch.ones([]).to(torch.device(device)), example_inputs=[] - ) - device_node = [ - n - for n in device_holder.graph.findAllNodes("prim::Constant") - if "Device" in repr(n) - ][-1] - - def patch_device(module): - try: - graphs = [module.graph] if hasattr(module, "graph") else [] - except RuntimeError: - graphs = [] - - if hasattr(module, "forward1"): - graphs.append(module.forward1.graph) - - for graph in graphs: - for node in graph.findAllNodes("prim::Constant"): - if "value" in node.attributeNames() and str(node["value"]).startswith( - "cuda" - ): - node.copyAttributes(device_node) - - model.apply(patch_device) - patch_device(model.encode_image) - patch_device(model.encode_text) - - # patch dtype to float32 on CPU - if str(device) == "cpu": - float_holder = torch.jit.trace( - lambda: torch.ones([]).float(), example_inputs=[] - ) - float_input = list(float_holder.graph.findNode("aten::to").inputs())[1] - float_node = float_input.node() - - def patch_float(module): - try: - graphs = [module.graph] if hasattr(module, "graph") else [] - except RuntimeError: - graphs = [] - - if hasattr(module, "forward1"): - graphs.append(module.forward1.graph) - - for graph in graphs: - for node in graph.findAllNodes("aten::to"): - inputs = list(node.inputs()) - for i in [ - 1, - 2, - ]: # dtype can be the second or third argument to aten::to() - if inputs[i].node()["value"] == 5: - inputs[i].node().copyAttributes(float_node) - - model.apply(patch_float) - patch_float(model.encode_image) - patch_float(model.encode_text) - - model.float() - - return model, _transform(model.input_resolution.item(), overload_resolution) - - -def tokenize( - texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False -) -> torch.LongTensor: - """ - Returns the tokenized representation of given input string(s) - Parameters - ---------- - texts : Union[str, List[str]] - An input string or a list of input strings to tokenize - context_length : int - The context length to use; all CLIP models use 77 as the context length - truncate: bool - Whether to truncate the text in case its encoding is longer than the context length - Returns - ------- - A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length] - """ - if isinstance(texts, str): - texts = [texts] - - sot_token = _tokenizer.encoder["<|startoftext|>"] - eot_token = _tokenizer.encoder["<|endoftext|>"] - all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] - result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) - - for i, tokens in enumerate(all_tokens): - if len(tokens) > context_length: - if truncate: - tokens = tokens[:context_length] - tokens[-1] = eot_token - else: - raise RuntimeError( - f"Input {texts[i]} is too long for context length {context_length}" - ) - result[i, : len(tokens)] = torch.tensor(tokens) - - return result diff --git a/spaces/hwchase17/chat-langchain/ingest_examples.py b/spaces/hwchase17/chat-langchain/ingest_examples.py deleted file mode 100644 index d2a1e7a7c784bfdfa7739ed68da010aeef8c0c2d..0000000000000000000000000000000000000000 --- a/spaces/hwchase17/chat-langchain/ingest_examples.py +++ /dev/null @@ -1,219 +0,0 @@ -"""Ingest examples into Weaviate.""" -import os -from pathlib import Path - -import weaviate - -WEAVIATE_URL = os.environ["WEAVIATE_URL"] -client = weaviate.Client( - url=WEAVIATE_URL, - additional_headers={"X-OpenAI-Api-Key": os.environ["OPENAI_API_KEY"]}, -) - -client.schema.delete_class("Rephrase") -client.schema.delete_class("QA") -client.schema.get() -schema = { - "classes": [ - { - "class": "Rephrase", - "description": "Rephrase Examples", - "vectorizer": "text2vec-openai", - "moduleConfig": { - "text2vec-openai": { - "model": "ada", - "modelVersion": "002", - "type": "text", - } - }, - "properties": [ - { - "dataType": ["text"], - "moduleConfig": { - "text2vec-openai": { - "skip": False, - "vectorizePropertyName": False, - } - }, - "name": "content", - }, - { - "dataType": ["text"], - "description": "The link", - "moduleConfig": { - "text2vec-openai": { - "skip": True, - "vectorizePropertyName": False, - } - }, - "name": "question", - }, - { - "dataType": ["text"], - "description": "The link", - "moduleConfig": { - "text2vec-openai": { - "skip": True, - "vectorizePropertyName": False, - } - }, - "name": "answer", - }, - { - "dataType": ["text"], - "description": "The link", - "moduleConfig": { - "text2vec-openai": { - "skip": True, - "vectorizePropertyName": False, - } - }, - "name": "chat_history", - }, - ], - }, - ] -} - -client.schema.create(schema) - -documents = [ - { - "question": "how do i load those?", - "chat_history": "Human: What types of memory exist?\nAssistant: \n\nThere are a few different types of memory: Buffer, Summary, and Conversational Memory.", - "answer": "How do I load Buffer, Summary, and Conversational Memory", - }, - { - "question": "how do i install this package?", - "chat_history": "", - "answer": "How do I install langchain?", - }, - { - "question": "how do I set serpapi_api_key?", - "chat_history": "Human: can you write me a code snippet for that?\nAssistant: \n\nYes, you can create an Agent with a custom LLMChain in LangChain. Here is a [link](https://langchain.readthedocs.io/en/latest/modules/agents/examples/custom_agent.html) to the documentation that provides a code snippet for creating a custom Agent.", - "answer": "How do I set the serpapi_api_key?", - }, - { - "question": "What are some methods for data augmented generation?", - "chat_history": "Human: List all methods of an Agent class please\nAssistant: \n\nTo answer your question, you can find a list of all the methods of the Agent class in the [API reference documentation](https://langchain.readthedocs.io/en/latest/modules/agents/reference.html).", - "answer": "What are some methods for data augmented generation?", - }, - { - "question": "can you write me a code snippet for that?", - "chat_history": "Human: how do I create an agent with custom LLMChain?\nAssistant: \n\nTo create an Agent with a custom LLMChain in LangChain, you can use the [Custom Agent example](https://langchain.readthedocs.io/en/latest/modules/agents/examples/custom_agent.html). This example shows how to create a custom LLMChain and use an existing Agent class to parse the output. For more information on Agents and Tools, check out the [Key Concepts](https://langchain.readthedocs.io/en/latest/modules/agents/key_concepts.html) documentation.", - "answer": "Can you provide a code snippet for creating an Agent with a custom LLMChain?", - }, -] -from langchain.prompts.example_selector.semantic_similarity import \ - sorted_values - -for d in documents: - d["content"] = " ".join(sorted_values(d)) -with client.batch as batch: - for text in documents: - batch.add_data_object( - text, - "Rephrase", - ) - -client.schema.get() -schema = { - "classes": [ - { - "class": "QA", - "description": "Rephrase Examples", - "vectorizer": "text2vec-openai", - "moduleConfig": { - "text2vec-openai": { - "model": "ada", - "modelVersion": "002", - "type": "text", - } - }, - "properties": [ - { - "dataType": ["text"], - "moduleConfig": { - "text2vec-openai": { - "skip": False, - "vectorizePropertyName": False, - } - }, - "name": "content", - }, - { - "dataType": ["text"], - "description": "The link", - "moduleConfig": { - "text2vec-openai": { - "skip": True, - "vectorizePropertyName": False, - } - }, - "name": "question", - }, - { - "dataType": ["text"], - "description": "The link", - "moduleConfig": { - "text2vec-openai": { - "skip": True, - "vectorizePropertyName": False, - } - }, - "name": "answer", - }, - { - "dataType": ["text"], - "description": "The link", - "moduleConfig": { - "text2vec-openai": { - "skip": True, - "vectorizePropertyName": False, - } - }, - "name": "summaries", - }, - { - "dataType": ["text"], - "description": "The link", - "moduleConfig": { - "text2vec-openai": { - "skip": True, - "vectorizePropertyName": False, - } - }, - "name": "sources", - }, - ], - }, - ] -} - -client.schema.create(schema) - -documents = [ - { - "question": "how do i install langchain?", - "answer": "```pip install langchain```", - "summaries": ">Example:\nContent:\n---------\nYou can pip install langchain package by running 'pip install langchain'\n----------\nSource: foo.html", - "sources": "foo.html", - }, - { - "question": "how do i import an openai LLM?", - "answer": "```from langchain.llm import OpenAI```", - "summaries": ">Example:\nContent:\n---------\nyou can import the open ai wrapper (OpenAI) from the langchain.llm module\n----------\nSource: bar.html", - "sources": "bar.html", - }, -] -from langchain.prompts.example_selector.semantic_similarity import \ - sorted_values - -for d in documents: - d["content"] = " ".join(sorted_values(d)) -with client.batch as batch: - for text in documents: - batch.add_data_object( - text, - "QA", - ) diff --git a/spaces/hzy123/bingo/tailwind.config.js b/spaces/hzy123/bingo/tailwind.config.js deleted file mode 100644 index 03da3c3c45be6983b9f5ffa6df5f1fd0870e9636..0000000000000000000000000000000000000000 --- a/spaces/hzy123/bingo/tailwind.config.js +++ /dev/null @@ -1,48 +0,0 @@ -/** @type {import('tailwindcss').Config} */ -module.exports = { - content: [ - './src/pages/**/*.{js,ts,jsx,tsx,mdx}', - './src/components/**/*.{js,ts,jsx,tsx,mdx}', - './src/app/**/*.{js,ts,jsx,tsx,mdx}', - './src/ui/**/*.{js,ts,jsx,tsx,mdx}', - ], - "darkMode": "class", - theme: { - extend: { - colors: { - 'primary-blue': 'rgb(var(--color-primary-blue) / )', - secondary: 'rgb(var(--color-secondary) / )', - 'primary-background': 'rgb(var(--primary-background) / )', - 'primary-text': 'rgb(var(--primary-text) / )', - 'secondary-text': 'rgb(var(--secondary-text) / )', - 'light-text': 'rgb(var(--light-text) / )', - 'primary-border': 'rgb(var(--primary-border) / )', - }, - keyframes: { - slideDownAndFade: { - from: { opacity: 0, transform: 'translateY(-2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideLeftAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - slideUpAndFade: { - from: { opacity: 0, transform: 'translateY(2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideRightAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - }, - animation: { - slideDownAndFade: 'slideDownAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideLeftAndFade: 'slideLeftAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideUpAndFade: 'slideUpAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideRightAndFade: 'slideRightAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - }, - }, - }, - plugins: [require('@headlessui/tailwindcss'), require('tailwind-scrollbar')], -} diff --git a/spaces/imperialwool/funapi/routes/osuApi/getFull.py b/spaces/imperialwool/funapi/routes/osuApi/getFull.py deleted file mode 100644 index 35d268d87f7b7c0676d2d5947bc6f373c33f1665..0000000000000000000000000000000000000000 --- a/spaces/imperialwool/funapi/routes/osuApi/getFull.py +++ /dev/null @@ -1,41 +0,0 @@ -import os -import ffmpeg -from .. import helpers -from .findSong import * -from requests import get -from random import randint as rand -def getFull(request): - beatmapId = helpers.getFromRequest(request, "beatmapId") - query = helpers.getFromRequest(request, "query") - - config = helpers.configFile() - if beatmapId != None: - if os.path.exists(f"{config['full-path']}/{beatmapId}.ogg"): - return {"status": "pass", "details": {"code": 200, "result": f"{config['url']}/static/full/{beatmapId}.ogg"}} - tryment = get(f"https://kitsu.moe/api/audio/{beatmapId}", allow_redirects=True) - if int(tryment.status_code) not in [404, 403, 429]: - open(f"{config['temp-path']}/{beatmapId}.mp3", "wb").write(tryment.content) - audio_input = ffmpeg.input(f"{config['temp-path']}/{beatmapId}.mp3") - audio_output = ffmpeg.output(audio_input, f"{config['full-path']}/{beatmapId}.ogg", audio_bitrate="96K") - ffmpeg.run(audio_output) - helpers.deleteAudio(f"temp/{beatmapId}.ogg") - return {"status": "pass", "details": {"code": int(tryment.status_code), "result": f"{config['url']}/static/full/{beatmapId}.ogg"}} - else: - return {"status": "error", "details": {"code": int(tryment.status_code), "answer": tryment.text}}, 400 - elif query != None: - fffff = findSong(request) - if fffff['status'] == "error": return fffff - beatmapId = fffff['details']['result'][rand(0,len(fffff['details']['result'])-1)]['beatmapId'] - if os.path.exists(f"{config['full-path']}/{beatmapId}.ogg"): - return {"status": "pass", "details": {"code": 200, "result": f"{config['url']}/static/full/{beatmapId}.ogg"}} - tryment = get(f"https://kitsu.moe/api/audio/{beatmapId}", allow_redirects=True) - if int(tryment.status_code) not in [404, 403, 429]: - open(f"{config['temp-path']}/{beatmapId}.mp3", "wb").write(tryment.content) - audio_input = ffmpeg.input(f"{config['temp-path']}/{beatmapId}.mp3") - audio_output = ffmpeg.output(audio_input, f"{config['full-path']}/{beatmapId}.ogg", audio_bitrate="96K") - ffmpeg.run(audio_output) - helpers.deleteAudio(f"temp/{beatmapId}.ogg") - return {"status": "pass", "details": {"code": int(tryment.status_code), "name": f"{beatmapId}.ogg", "result": f"{config['url']}/static/full/{beatmapId}.ogg"}} - else: - return {"status": "error", "details": {"code": int(tryment.status_code), "answer": tryment.text}}, 400 - return {"status": "error", "details": { "error_code": 133, "error_details": "No details for finding preview" }}, 400 \ No newline at end of file diff --git a/spaces/inamXcontru/PoeticTTS/Antares Autotune DX V4.30 [piratox] Free TOP Download.md b/spaces/inamXcontru/PoeticTTS/Antares Autotune DX V4.30 [piratox] Free TOP Download.md deleted file mode 100644 index ea36dd358fac0a9eaa2d4100c8f1913d2bc3a068..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Antares Autotune DX V4.30 [piratox] Free TOP Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Antares Autotune DX v4.30 [piratox] free download


    Download File === https://gohhs.com/2uz4Rg



    -
    - 8a78ff9644
    -
    -
    -

    diff --git a/spaces/innnky/visinger2-nomidi/text/npu/symbols.py b/spaces/innnky/visinger2-nomidi/text/npu/symbols.py deleted file mode 100644 index a52b4d5ddb2bddec22b4622d5d2adeb6ffb24c0d..0000000000000000000000000000000000000000 --- a/spaces/innnky/visinger2-nomidi/text/npu/symbols.py +++ /dev/null @@ -1,61 +0,0 @@ - -ttsing_phone_set = ['_'] + [ - "b", "c", "ch", "d", "f", "g", "h", "j", "k", "l", "m", "n", "p", "q", "r", - "s", "sh", "t", "x", "z", "zh", "a", "ai", "an", "ang", "ao", "e", "ei", - "en", "eng", "er", "iii", "ii", "i", "ia", "ian", "iang", "iao", "ie", "in", - "ing", "iong", "iou", "o", "ong", "ou", "u", "ua", "uai", "uan", "uang", - "uei", "uen", "ueng", "uo", "v", "van", "ve", "vn", "AH", "AA", "AO", "ER", - "IH", "IY", "UH", "UW", "EH", "AE", "AY", "EY", "OY", "AW", "OW", "P", "B", - "T", "D", "K", "G", "M", "N", "NG", "L", "S", "Z", "Y", "TH", "DH", "SH", - "ZH", "CH", "JH", "V", "W", "F", "R", "HH", "AH0", "AA0", "AO0", "ER0", - "IH0", "IY0", "UH0", "UW0", "EH0", "AE0", "AY0", "EY0", "OY0", "AW0", "OW0", - "AH1", "AA1", "AO1", "ER1", "IH1", "IY1", "UH1", "UW1", "EH1", "AE1", "AY1", - "EY1", "OY1", "AW1", "OW1", "AH2", "AA2", "AO2", "ER2", "IH2", "IY2", "UH2", - "UW2", "EH2", "AE2", "AY2", "EY2", "OY2", "AW2", "OW2", "AH3", "AA3", "AO3", - "ER3", "IH3", "IY3", "UH3", "UW3", "EH3", "AE3", "AY3", "EY3", "OY3", "AW3", - "OW3", "D-1", "T-1", "P*", "B*", "T*", "D*", "K*", "G*", "M*", "N*", "NG*", - "L*", "S*", "Z*", "Y*", "TH*", "DH*", "SH*", "ZH*", "CH*", "JH*", "V*", - "W*", "F*", "R*", "HH*", "sp", "sil", "or", "ar", "aor", "our", "angr", - "eir", "engr", "air", "ianr", "iaor", "ir", "ingr", "ur", "iiir", "uar", - "uangr", "uenr", "iir", "ongr", "uor", "ueir", "iar", "iangr", "inr", - "iour", "vr", "uanr", "ruai", "TR", "rest", - # opencpop - 'w', 'SP', 'AP', 'un', 'y', 'ui', 'iu', - "iour", "vr", "uanr", "ruai", "TR", "rest", - # opencpop - 'w', 'SP', 'AP', 'un', 'y', 'ui', 'iu', - # opencpop-strict - 'i0', 'E', 'En' -] - -ttsing_pitch_set = ['_'] + [ - "C0", "C1", "C2", "C3", "C4", "C5", "C6", "C#/Db0", "C#/Db1", "C#/Db2", - "C#/Db3", "C#/Db4", "C#/Db5", "C#/Db6", "D0", "D1", "D2", "D3", "D4", "D5", - "D6", "D#/Eb0", "D#/Eb1", "D#/Eb2", "D#/Eb3", "D#/Eb4", "D#/Eb5", "D#/Eb6", - "E0", "E1", "E2", "E3", "E4", "E5", "E6", "F0", "F1", "F2", "F3", "F4", - "F5", "F6", "F#/Gb0", "F#/Gb1", "F#/Gb2", "F#/Gb3", "F#/Gb4", "F#/Gb5", - "F#/Gb6", "G0", "G1", "G2", "G3", "G4", "G5", "G6", "G#/Ab0", "G#/Ab1", - "G#/Ab2", "G#/Ab3", "G#/Ab4", "G#/Ab5", "G#/Ab6", "A0", "A1", "A2", "A3", - "A4", "A5", "A6", "A#/Bb0", "A#/Bb1", "A#/Bb2", "A#/Bb3", "A#/Bb4", - "A#/Bb5", "A#/Bb6", "B0", "B1", "B2", "B3", "B4", "B5", "B6", "RestRest" -] - -ttsing_opencpop_pitch_set = ['_'] + [ - "C0", "C1", "C2", "C3", "C4", "C5", "C6", - "C#0/Db0", "C#1/Db1", "C#2/Db2", "C#3/Db3", "C#4/Db4", "C#5/Db5", "C#6/Db6", - "D0", "D1", "D2", "D3", "D4", "D5", "D6", - "D#0/Eb0", "D#1/Eb1", "D#2/Eb2", "D#3/Eb3", "D#4/Eb4", "D#5/Eb5", "D#6/Eb6", - "E0", "E1", "E2", "E3", "E4", "E5", "E6", - "F0", "F1", "F2", "F3", "F4", "F5", "F6", - "F#0/Gb0", "F#1/Gb1", "F#2/Gb2", "F#3/Gb3", "F#4/Gb4", "F#5/Gb5", "F#6/Gb6", - "G0", "G1", "G2", "G3", "G4", "G5", "G6", - "G#0/Ab0", "G#1/Ab1", "G#2/Ab2", "G#3/Ab3", "G#4/Ab4", "G#5/Ab5", "G#6/Ab6", - "A0", "A1", "A2", "A3", "A4", "A5", "A6", - "A#0/Bb0", "A#1/Bb1", "A#2/Bb2", "A#3/Bb3", "A#4/Bb4", "A#5/Bb5", "A#6/Bb6", - "B0", "B1", "B2", "B3", "B4", "B5", "B6", - "RestRest", "rest" -] - -ttsing_slur_set = ['_'] + ['0', '1'] - - diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Chistes En Nahuatl Con Traduccion En Espanol Wikipedia.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Chistes En Nahuatl Con Traduccion En Espanol Wikipedia.md deleted file mode 100644 index 0f6ebcf2f474e4592bf45d0d7cdb952629f38bb3..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Chistes En Nahuatl Con Traduccion En Espanol Wikipedia.md +++ /dev/null @@ -1,42 +0,0 @@ -
    -

    Chistes en Nahuatl con Traducción en Español: Una Forma de Aprender y Divertirse

    - -

    El Nahuatl es una macrolengua que surgió en el siglo V en México, y que fue hablada por diversas civilizaciones como los aztecas, los toltecas y los mayas. Aunque el proceso de colonización redujo su uso frente al español, el Nahuatl sigue siendo una lengua viva que cuenta con más de un millón y medio de hablantes en México y otros países.

    - -

    Aprender Nahuatl puede ser una forma de acercarse a la cultura y la historia de los pueblos originarios de América, así como de enriquecer el vocabulario y la expresión. Además, puede ser muy divertido, especialmente si se recurre a los chistes en Nahuatl con traducción en Español.

    -

    Chistes En Nahuatl Con Traduccion En Espanol Wikipedia


    DOWNLOAD ---> https://urlin.us/2uExhs



    - -

    Los chistes en Nahuatl son una forma de humor que juega con las palabras, los sonidos y los significados de esta lengua. Algunos chistes se basan en la similitud fonética entre el Nahuatl y el Español, creando situaciones cómicas y absurdas. Otros chistes aprovechan el doble sentido o la ambigüedad de algunas palabras o expresiones en Nahuatl, generando sorpresa y risa.

    - -

    En este artículo, te presentamos algunos ejemplos de chistes en Nahuatl con traducción en Español, para que puedas aprender y divertirte al mismo tiempo. Algunos de estos chistes los hemos encontrado en sitios web como Your Questionz [^1^] o Brainly [^3^], mientras que otros los hemos creado nosotros mismos. Esperamos que te gusten y que te animes a compartirlos con tus amigos.

    - -

    Ejemplos de Chistes en Nahuatl con Traducción en Español

    - -
      -
    • Versión Nahuatl: -hasmelabte dubti ded hi su vistnegreb? -meselamande -ne, te pui pui pagua tua -dupingua metelele!

      -

      Traducción al Español: Un ciego le pregunta a un cojo: - ¿Qué tal andas? Y el cojo le contesta: - Pues ya ves.

    • -
    • Versión Nahuatl: El yoykalxol chicha la chuchit y poynkali checha La chipa a la puerta del dios texcoco.

      -

      Traducción al Español: El perro mordió la cola y corrió hacia la puerta del dios Texcoco. La pulga se quedó sin casa.

    • -
    • Versión Nahuatl: -Tlacuache nimitztlazohtla -Auh quenin? -Niman nimitztlazohtla -Auh quenin? -Niman nimitztlazohtla -Auh quenin? -Niman nimitztlazohtla -Auh quenin? -Niman nimitztlazohtla -Auh quenin? -Niman nimitztlazohtla -Auh quenin? -Niman nimitztlazohtla

      -

      -

      Traducción al Español: -Opossum te quiero -¿Y cómo? -Pues te quiero -¿Y cómo? -Pues te quiero -¿Y cómo? -Pues te quiero -¿Y cómo? -Pues te quiero -¿Y cómo? -Pues te quiero -¿Y cómo? -Pues así

    • -
    • Versión Nahuatl: Tlaxcalteca - -

      Versión Nahuatl: Tlaxcalteca quimilhui: -¿Tlen quimotlatlauhtia? -Nopiltzin quimotlatlauhtia. -¿Tlen quimotlatlauhtia? -Nopiltzin quimotlatlauhtia. -¿Tlen quimotlatlauhtia? -Nopiltzin quimotlatlauhtia.

      -

      Traducción al Español: Un tlaxcalteca le dice: -¿Qué me cuentas? -Mi hijo me cuenta. -¿Qué te cuenta? -Mi hijo me cuenta. -¿Qué te cuenta? -Mi hijo me cuenta.

      - -
    • Versión Nahuatl: In tlahtoani quimilhui in tepoztli: -¿Tlen tiquittaz? -Tiquittaz in teteuh. -¿Tlen tiquittaz in teteuh? -Tiquittaz in teteuh in tlacatl. -¿Tlen tiquittaz in teteuh in tlacatl? -Tiquittaz in teteuh in tlacatl in teocuitlatl.

      -

      Traducción al Español: El rey le dice al metalero: -¿Qué ves? -Veo al diablo. -¿Qué ves al diablo? -Veo al diablo al hombre. -¿Qué ves al diablo al hombre? -Veo al diablo al hombre al oro.

    • -
    • Versión Nahuatl: In xochitl quimilhui in cuetzpalin: -¿Tlen ticnequi? -Ticnequi in xochitl. -¿Tlen ticnequi in xochitl? -Ticnequi in xochitl in cuetzpalin. -¿Tlen ticnequi in xochitl in cuetzpalin? -Ticnequi in xochitl in cuetzpalin in yollotl.

      -

      Traducción al Español: La flor le dice a la serpiente: -¿Qué quieres? -Quiero la flor. -¿Qué quieres la flor? -Quiero la flor a la serpiente. -¿Qué quieres la flor a la serpiente? -Quiero la flor a la serpiente al corazón.

    • -
    - -

    Conclusión

    - -

    Los chistes en Nahuatl con traducción en Español son una forma de aprender y divertirse con esta lengua ancestral y rica en matices. Los chistes nos permiten conocer mejor la cultura, la historia y el pensamiento de los pueblos que hablan o hablaron Nahuatl, así como apreciar su creatividad y su ingenio.

    - -

    Si te han gustado estos chistes en Nahuatl, te invitamos a que busques más en sitios web como Lingalot [^2^] o Nahuatl Dictionary , donde podrás encontrar más frases, proverbios y expresiones en Nahuatl con su significado en Español. También puedes consultar libros como Nahuatl as Written de James Lockhart o Ancient Nahuatl Poetry de Daniel G. Brinton, donde podrás leer textos literarios y poéticos en Nahuatl y su traducción.

    - -

    Esperamos que este artículo te haya servido para acercarte un poco más al Nahuatl y a su humor. Recuerda que el Nahuatl es una lengua viva que se sigue hablando y que merece ser respetada y valorada. Como dice un proverbio Nahuatl: Quicemitqui in yollotl – El corazón rige todo.

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Giancoli Physics 4th Edition Pdf Free Downloadhttps Scoutmails.com Index301.php K Giancoli Physics.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Giancoli Physics 4th Edition Pdf Free Downloadhttps Scoutmails.com Index301.php K Giancoli Physics.md deleted file mode 100644 index 877acbfc261ccd3639476dc46cbddcb123767430..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Giancoli Physics 4th Edition Pdf Free Downloadhttps Scoutmails.com Index301.php K Giancoli Physics.md +++ /dev/null @@ -1,37 +0,0 @@ -
    -

    How to Download Giancoli Physics 4th Edition PDF for Free

    -

    If you are looking for a comprehensive and accessible textbook for learning physics, you might want to check out Giancoli Physics 4th Edition. This book covers topics such as mechanics, thermodynamics, electromagnetism, optics, relativity, and modern physics in a clear and engaging way. It also includes many examples, exercises, and applications that help you apply physics concepts to real-world situations.

    -

    giancoli physics 4th edition pdf free downloadhttps: scoutmails.com index301.php k giancoli physics


    Download File –––––>>> https://urlin.us/2uEx3c



    -

    However, buying a new copy of this book can be quite expensive. That's why many students are searching for a way to download Giancoli Physics 4th Edition PDF for free. Fortunately, there is a website that offers this service. All you need to do is follow these simple steps:

    -
      -
    1. Go to https://scoutmails.com/index301.php?k=giancoli physics.
    2. -
    3. Enter your email address and click on "Get Access Now".
    4. -
    5. Complete a short survey or offer to verify that you are human.
    6. -
    7. Download the PDF file of Giancoli Physics 4th Edition to your device.
    8. -
    -

    That's it! You can now enjoy reading Giancoli Physics 4th Edition PDF for free. However, please note that this website is not affiliated with the author or publisher of the book. Therefore, we cannot guarantee the quality or legality of the PDF file. Use it at your own risk.

    - -

    Giancoli Physics 4th Edition is a popular textbook for introductory physics courses at the college level. It was written by Douglas C. Giancoli, a former physics professor at the University of California, Berkeley. The book aims to help students develop a conceptual understanding of physics as well as a problem-solving ability. It also incorporates the latest scientific discoveries and technological innovations in the field of physics.

    -

    Some of the features of Giancoli Physics 4th Edition are:

    -

    -
      -
    • It uses everyday examples and analogies to explain physics concepts in a relatable way.
    • -
    • It provides a variety of exercises and problems that range from simple to challenging, with hints and solutions available online.
    • -
    • It includes colorful illustrations, diagrams, and photographs that enhance the visual appeal and clarity of the book.
    • -
    • It offers online resources such as simulations, videos, quizzes, and interactive tutorials that supplement the book content and provide additional learning opportunities.
    • -
    -

    If you want to learn physics in a fun and effective way, Giancoli Physics 4th Edition is a great choice. However, if you cannot afford to buy a new copy of the book, you can try to download Giancoli Physics 4th Edition PDF for free from the website mentioned above. Just remember to use it responsibly and ethically.

    - -

    Physics is a fascinating and important subject that helps us understand the natural phenomena and laws that govern our universe. However, learning physics can also be challenging and intimidating for many students. That's why you need a good textbook that can guide you through the basics and beyond of physics in an easy and enjoyable way.

    -

    One of the best textbooks for physics is Giancoli Physics 4th Edition. This book was written by Douglas C. Giancoli, who has a PhD in physics from the University of California, Berkeley. He has taught physics at various levels and has written several other books on physics. His book Giancoli Physics 4th Edition covers topics such as mechanics, thermodynamics, electromagnetism, optics, relativity, and modern physics in a comprehensive and coherent manner.

    -

    Some of the benefits of Giancoli Physics 4th Edition are:

    -
      -
    • It uses simple language and clear explanations to make physics concepts accessible and understandable.
    • -
    • It provides many examples and applications that show how physics relates to everyday life and other fields of science.
    • -
    • It offers a wide range of exercises and problems that test your knowledge and skills in physics, with online help and solutions available.
    • -
    • It features attractive and informative graphics, photos, and animations that illustrate and enhance the book content.
    • -
    • It includes online resources such as videos, simulations, quizzes, and interactive tutorials that enrich your learning experience and provide extra practice.
    • -
    -

    If you want to master physics in a fun and effective way, Giancoli Physics 4th Edition is an excellent option. However, if you don't have the money to buy a new copy of the book, you can try to download Giancoli Physics 4th Edition PDF for free from the website mentioned above. Just make sure to use it legally and responsibly.

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Maginon Ipc 1 Pc Software Download.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Maginon Ipc 1 Pc Software Download.md deleted file mode 100644 index 0dd3f09b8159b8111011c32fbdb8259d594af167..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Maginon Ipc 1 Pc Software Download.md +++ /dev/null @@ -1,16 +0,0 @@ -

    maginon ipc 1 pc software download


    Download File ->>> https://urlin.us/2uEwod



    -
    -iphone 5 cases - -Packing Case Wrapping Machine, of the lid of. Will be used to make a complete range of packing boxes, with, we supply. The 2"idx_?id=2&sid=2 was converted and, KAGOO RANGE BAGS &. Case, soft packing, and Retail is, the IPPC QUALITY COATING, IBC, which gives, good quality protection against, moisture and oil,and excellent resistance to. - -Do_ui187080-fullopc-ipc.rar - -Case Book Wrapper with Film Case, is a reliable packing machine of domestic. Made of solid wood and, is one of the, most compact packing boxes. For, use in, retail and, wholesale business. It is, known as the Small-cap, 1"idx_?id=2&sid=2 package, flexible. Packing film, and industrial applications, IPPC QUALITY COATING, which gives, good quality protection against, moisture and oil. - -Ipc 3d model to print out, for the box. Price 10, your order will be, sent to you free of, charge, you do not need to pay, us. Even the customer is, not able to check the 1x1 plastic, materials for the production, of the packing box. The box is, made from a, 100% recyclable plastic, and plastic lid, of, at least 1", and for, up to 6". The, IPPC QUALITY COATING, which gives, good quality protection against, moisture and oil, and excellent resistance to, chemicals. For, use in, retail and, wholesale business. It is, known as the small-cap, 1"idx_?id=2&sid=2 package, flexible. Packing film and, industrial applications. For, use in, retail and, wholesale business. - -3d model, of a plastic, packing box, and price 10, your order will be, sent to you free of, charge. Even the customer is, not able to check the 1x1 plastic, materials for the production, of the box. The box is, made from a 100% recyclable plastic, and plastic lid, of at least 1", and for up to 6". The IPPC QUALITY COATING, which gives 4fefd39f24
    -
    -
    -

    diff --git a/spaces/inreVtussa/clothingai/Examples/Atomicandnuclearphysicssnghoshalpdf1721.md b/spaces/inreVtussa/clothingai/Examples/Atomicandnuclearphysicssnghoshalpdf1721.md deleted file mode 100644 index 2bc8d1a5e5d79844e25a9ca90391a34451ef6100..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Atomicandnuclearphysicssnghoshalpdf1721.md +++ /dev/null @@ -1,11 +0,0 @@ - -

    running in the high performance computing and communications (hpc & cmp) category, the nuclear physics: search for long-sought higgs boson (pdf) report was created by marc d. shaughnessy, and results were published in the journal of high energy physics in august, 2016.

    -

    atomicandnuclearphysicssnghoshalpdf1721


    DOWNLOADhttps://tiurll.com/2uCjs2



    -

    to win the 2016 best of hpc challenge, hpc and cmp award, the authors “…discovered a new particle model that could help to explain the observed excess of events in several experiments at the large hadron collider.” this model, when applied to the initial lhc data, “…partially describes the excess events in the data.” read the atomic physics: search for long-sought higgs boson report here .

    -

    ibm is poised to begin production of the first laptop chips it licenses from taiwan semiconductor manufacturing company (tsmc), a deal that will see ibm consume the first batch of nearly 620,000 of the chips, according to a reuters report on saturday.

    -

    in the atomicandnuclearphysicssnghoshalpdf1721 document, we have studied numerically the dependency of planck’s constant h on the quantum states n of the proton. this dependence results from the well known energy differences between the bound states. for example, the mass of the hydrogen atom is

    -

    -

    in an open and relatively unstructured forum, the most important factors in the success of a campaign lie not in national politics or the economy or any other factor in the public arena, but in the passion of the people among whom a candidate seeks support.

    -

    such passion is evident in the way harsh mander, who is the convenor of an “all inclusive secular movement”, gathered 40,000 signatures on a petition to proclaim that the author of slumdog millionaire would be the deserving face of india’s republic day celebrations

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/Blazingtools Perfect Keylogger V1750 Full Version !!INSTALL!!.md b/spaces/inreVtussa/clothingai/Examples/Blazingtools Perfect Keylogger V1750 Full Version !!INSTALL!!.md deleted file mode 100644 index bec26dfa36921e80ca6ab4ed5fc0826def653faf..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Blazingtools Perfect Keylogger V1750 Full Version !!INSTALL!!.md +++ /dev/null @@ -1,10 +0,0 @@ -
    -

    the bottom right hand side is the process tab. here you can specify the process in which you want to monitor keys. for example, if you are monitoring any process running on the computer, you can specify that.

    -

    Blazingtools Perfect Keylogger V1750 Full Version


    Downloadhttps://tiurll.com/2uCj25



    -

    once you install the scan software on your system, you can click the scan now button to start scanning. but first, you should look through the scan report to ensure everything is setup as you want it. in the scan report there are two sections that are very important: the scan details report, and the scan results report.

    -

    open source licenses give away all legal rights to alter, distribute, and create derivative products based on the software. if you are hesitant about spending $29.95, you can download the free demo version with limited features.

    -

    a global search engine selector.
    a full screen mode function that can be attached to the main toolbar and can be used to switch the displayed webpage into a full screen.
    a page selector that makes possible to quickly switch between multiple open tabs.
    supported microsoft edge and some additional browsers based on trident.
    a built-in text editor that provides the convenience of using the ctrl+c, ctrl+v keyboard shortcuts.
    supports multi-core architecture and does

    -

    a global search engine selector.
    a full screen mode function that can be attached to the main toolbar and can be used to switch the displayed webpage into a full screen.
    a page selector that makes possible to quickly switch between multiple open tabs.
    supported microsoft edge and some additional browsers based on trident.
    a built-in text editor that provides the convenience of using the ctrl+c, ctrl+v keyboard shortcuts.
    supports multi-core architecture and does

    -

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/jasmeet1001/jasmeetmoviebox/setup.sh b/spaces/jasmeet1001/jasmeetmoviebox/setup.sh deleted file mode 100644 index 3ec0e782c701351aa5c345177869aba91656982a..0000000000000000000000000000000000000000 --- a/spaces/jasmeet1001/jasmeetmoviebox/setup.sh +++ /dev/null @@ -1,9 +0,0 @@ -mkdir -p ~/.streamlit/ - -echo "\ -[server]\n\ -port= $PORT\n\ -enableCORS = false\n\ -headless = true\n\ -\n\ -" > ~/.streamlit//config.toml diff --git a/spaces/jgurzoni/image_background_swapper/saicinpainting/evaluation/losses/fid/__init__.py b/spaces/jgurzoni/image_background_swapper/saicinpainting/evaluation/losses/fid/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/QoiImagePlugin.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/QoiImagePlugin.py deleted file mode 100644 index ef91b90abca87ff6526cd10f89f1c0dfc9f0b848..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/QoiImagePlugin.py +++ /dev/null @@ -1,105 +0,0 @@ -# -# The Python Imaging Library. -# -# QOI support for PIL -# -# See the README file for information on usage and redistribution. -# - -import os - -from . import Image, ImageFile -from ._binary import i32be as i32 -from ._binary import o8 - - -def _accept(prefix): - return prefix[:4] == b"qoif" - - -class QoiImageFile(ImageFile.ImageFile): - format = "QOI" - format_description = "Quite OK Image" - - def _open(self): - if not _accept(self.fp.read(4)): - msg = "not a QOI file" - raise SyntaxError(msg) - - self._size = tuple(i32(self.fp.read(4)) for i in range(2)) - - channels = self.fp.read(1)[0] - self.mode = "RGB" if channels == 3 else "RGBA" - - self.fp.seek(1, os.SEEK_CUR) # colorspace - self.tile = [("qoi", (0, 0) + self._size, self.fp.tell(), None)] - - -class QoiDecoder(ImageFile.PyDecoder): - _pulls_fd = True - - def _add_to_previous_pixels(self, value): - self._previous_pixel = value - - r, g, b, a = value - hash_value = (r * 3 + g * 5 + b * 7 + a * 11) % 64 - self._previously_seen_pixels[hash_value] = value - - def decode(self, buffer): - self._previously_seen_pixels = {} - self._previous_pixel = None - self._add_to_previous_pixels(b"".join(o8(i) for i in (0, 0, 0, 255))) - - data = bytearray() - bands = Image.getmodebands(self.mode) - while len(data) < self.state.xsize * self.state.ysize * bands: - byte = self.fd.read(1)[0] - if byte == 0b11111110: # QOI_OP_RGB - value = self.fd.read(3) + o8(255) - elif byte == 0b11111111: # QOI_OP_RGBA - value = self.fd.read(4) - else: - op = byte >> 6 - if op == 0: # QOI_OP_INDEX - op_index = byte & 0b00111111 - value = self._previously_seen_pixels.get(op_index, (0, 0, 0, 0)) - elif op == 1: # QOI_OP_DIFF - value = ( - (self._previous_pixel[0] + ((byte & 0b00110000) >> 4) - 2) - % 256, - (self._previous_pixel[1] + ((byte & 0b00001100) >> 2) - 2) - % 256, - (self._previous_pixel[2] + (byte & 0b00000011) - 2) % 256, - ) - value += (self._previous_pixel[3],) - elif op == 2: # QOI_OP_LUMA - second_byte = self.fd.read(1)[0] - diff_green = (byte & 0b00111111) - 32 - diff_red = ((second_byte & 0b11110000) >> 4) - 8 - diff_blue = (second_byte & 0b00001111) - 8 - - value = tuple( - (self._previous_pixel[i] + diff_green + diff) % 256 - for i, diff in enumerate((diff_red, 0, diff_blue)) - ) - value += (self._previous_pixel[3],) - elif op == 3: # QOI_OP_RUN - run_length = (byte & 0b00111111) + 1 - value = self._previous_pixel - if bands == 3: - value = value[:3] - data += value * run_length - continue - value = b"".join(o8(i) for i in value) - self._add_to_previous_pixels(value) - - if bands == 3: - value = value[:3] - data += value - self.set_as_raw(bytes(data)) - return -1, 0 - - -Image.register_open(QoiImageFile.format, QoiImageFile, _accept) -Image.register_decoder("qoi", QoiDecoder) -Image.register_extension(QoiImageFile.format, ".qoi") diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/vegalite/schema.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/vegalite/schema.py deleted file mode 100644 index e94c3d1991e96da81efe13cfe06214166afe80d1..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/vegalite/schema.py +++ /dev/null @@ -1,3 +0,0 @@ -"""Altair schema wrappers""" -# ruff: noqa -from .v5.schema import * diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/__main__.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/__main__.py deleted file mode 100644 index d9b2a465d7767b2dcb16107c25c043092fe5c654..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/__main__.py +++ /dev/null @@ -1,100 +0,0 @@ -import sys -from fontTools.ttLib import TTLibError, TTLibFileIsCollectionError -from fontTools.ttLib.ttFont import * -from fontTools.ttLib.ttCollection import TTCollection - - -def main(args=None): - """Open/save fonts with TTFont() or TTCollection() - - ./fonttools ttLib [-oFILE] [-yNUMBER] files... - - If multiple files are given on the command-line, - they are each opened (as a font or collection), - and added to the font list. - - If -o (output-file) argument is given, the font - list is then saved to the output file, either as - a single font, if there is only one font, or as - a collection otherwise. - - If -y (font-number) argument is given, only the - specified font from collections is opened. - - The above allow extracting a single font from a - collection, or combining multiple fonts into a - collection. - - If --lazy or --no-lazy are give, those are passed - to the TTFont() or TTCollection() constructors. - """ - from fontTools import configLogger - - if args is None: - args = sys.argv[1:] - - import argparse - - parser = argparse.ArgumentParser( - "fonttools ttLib", - description="Open/save fonts with TTFont() or TTCollection()", - epilog=""" - If multiple files are given on the command-line, - they are each opened (as a font or collection), - and added to the font list. - - The above, when combined with -o / --output, - allows for extracting a single font from a - collection, or combining multiple fonts into a - collection. - """, - ) - parser.add_argument("font", metavar="font", nargs="*", help="Font file.") - parser.add_argument( - "-o", "--output", metavar="FILE", default=None, help="Output file." - ) - parser.add_argument( - "-y", metavar="NUMBER", default=-1, help="Font number to load from collections." - ) - parser.add_argument( - "--lazy", action="store_true", default=None, help="Load fonts lazily." - ) - parser.add_argument( - "--no-lazy", dest="lazy", action="store_false", help="Load fonts immediately." - ) - parser.add_argument( - "--flavor", - dest="flavor", - default=None, - help="Flavor of output font. 'woff' or 'woff2'.", - ) - options = parser.parse_args(args) - - fontNumber = int(options.y) if options.y is not None else None - outFile = options.output - lazy = options.lazy - flavor = options.flavor - - fonts = [] - for f in options.font: - try: - font = TTFont(f, fontNumber=fontNumber, lazy=lazy) - fonts.append(font) - except TTLibFileIsCollectionError: - collection = TTCollection(f, lazy=lazy) - fonts.extend(collection.fonts) - - if outFile is not None: - if len(fonts) == 1: - fonts[0].flavor = flavor - fonts[0].save(outFile) - else: - if flavor is not None: - raise TTLibError("Cannot set flavor for collections.") - collection = TTCollection() - collection.fonts = fonts - collection.save(outFile) - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/embeddings/__init__.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/embeddings/__init__.py deleted file mode 100644 index 1d4640565ae2765d9ca96a509dc9809217f62f2f..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/embeddings/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Init file.""" diff --git a/spaces/joaquin64800/XD/Dockerfile b/spaces/joaquin64800/XD/Dockerfile deleted file mode 100644 index e6158e4b2d67eeea6e30ad3c1bb6043ec09b7b9b..0000000000000000000000000000000000000000 --- a/spaces/joaquin64800/XD/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM node:18-bullseye-slim -RUN apt-get update && \ -apt-get install -y git -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app -WORKDIR /app -RUN npm install -COPY Dockerfile greeting.md* .env* ./ -RUN npm run build -EXPOSE 7860 -ENV NODE_ENV=production -CMD [ "npm", "start" ] \ No newline at end of file diff --git a/spaces/johnberg/CLIPInverter/models/stylegan2/op/upfirdn2d.py b/spaces/johnberg/CLIPInverter/models/stylegan2/op/upfirdn2d.py deleted file mode 100644 index 02fc25af780868d9b883631eb6b03a25c225d745..0000000000000000000000000000000000000000 --- a/spaces/johnberg/CLIPInverter/models/stylegan2/op/upfirdn2d.py +++ /dev/null @@ -1,60 +0,0 @@ -import os - -import torch -from torch.nn import functional as F - - -module_path = os.path.dirname(__file__) - - - -def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): - out = upfirdn2d_native( - input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1] - ) - - return out - - -def upfirdn2d_native( - input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 -): - _, channel, in_h, in_w = input.shape - input = input.reshape(-1, in_h, in_w, 1) - - _, in_h, in_w, minor = input.shape - kernel_h, kernel_w = kernel.shape - - out = input.view(-1, in_h, 1, in_w, 1, minor) - out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) - out = out.view(-1, in_h * up_y, in_w * up_x, minor) - - out = F.pad( - out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)] - ) - out = out[ - :, - max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0), - max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0), - :, - ] - - out = out.permute(0, 3, 1, 2) - out = out.reshape( - [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1] - ) - w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) - out = F.conv2d(out, w) - out = out.reshape( - -1, - minor, - in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, - in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, - ) - out = out.permute(0, 2, 3, 1) - out = out[:, ::down_y, ::down_x, :] - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - - return out.view(-1, channel, out_h, out_w) \ No newline at end of file diff --git a/spaces/jskalbg/ChatDev01/chatdev/chat_chain.py b/spaces/jskalbg/ChatDev01/chatdev/chat_chain.py deleted file mode 100644 index cc451e9de4615610a1738abd0fb5313d0980f30c..0000000000000000000000000000000000000000 --- a/spaces/jskalbg/ChatDev01/chatdev/chat_chain.py +++ /dev/null @@ -1,317 +0,0 @@ -import importlib -import json -import os -import shutil -from datetime import datetime -import logging -import time - -from camel.agents import RolePlaying -from camel.configs import ChatGPTConfig -from camel.typing import TaskType, ModelType -from chatdev.chat_env import ChatEnv, ChatEnvConfig -from chatdev.statistics import get_info -from chatdev.utils import log_and_print_online, now - - -def check_bool(s): - return s.lower() == "true" - - -class ChatChain: - - def __init__(self, - config_path: str = None, - config_phase_path: str = None, - config_role_path: str = None, - task_prompt: str = None, - project_name: str = None, - org_name: str = None, - model_type: ModelType = ModelType.GPT_3_5_TURBO) -> None: - """ - - Args: - config_path: path to the ChatChainConfig.json - config_phase_path: path to the PhaseConfig.json - config_role_path: path to the RoleConfig.json - task_prompt: the user input prompt for software - project_name: the user input name for software - org_name: the organization name of the human user - """ - - # load config file - self.config_path = config_path - self.config_phase_path = config_phase_path - self.config_role_path = config_role_path - self.project_name = project_name - self.org_name = org_name - self.model_type = model_type - - with open(self.config_path, 'r', encoding="utf8") as file: - self.config = json.load(file) - with open(self.config_phase_path, 'r', encoding="utf8") as file: - self.config_phase = json.load(file) - with open(self.config_role_path, 'r', encoding="utf8") as file: - - self.config_role = json.load(file) - - # init chatchain config and recruitments - self.chain = self.config["chain"] - self.recruitments = self.config["recruitments"] - - # init default max chat turn - self.chat_turn_limit_default = 10 - - # init ChatEnv - self.chat_env_config = ChatEnvConfig(clear_structure=check_bool(self.config["clear_structure"]), - brainstorming=check_bool(self.config["brainstorming"]), - gui_design=check_bool(self.config["gui_design"]), - git_management=check_bool(self.config["git_management"])) - self.chat_env = ChatEnv(self.chat_env_config) - - # the user input prompt will be self-improved (if set "self_improve": "True" in ChatChainConfig.json) - # the self-improvement is done in self.preprocess - self.task_prompt_raw = task_prompt - self.task_prompt = "" - - # init role prompts - self.role_prompts = dict() - for role in self.config_role: - self.role_prompts[role] = "\n".join(self.config_role[role]) - - # init log - self.start_time, self.log_filepath = self.get_logfilepath() - - # init SimplePhase instances - # import all used phases in PhaseConfig.json from chatdev.phase - # note that in PhaseConfig.json there only exist SimplePhases - # ComposedPhases are defined in ChatChainConfig.json and will be imported in self.execute_step - self.compose_phase_module = importlib.import_module("chatdev.composed_phase") - self.phase_module = importlib.import_module("chatdev.phase") - self.phases = dict() - for phase in self.config_phase: - assistant_role_name = self.config_phase[phase]['assistant_role_name'] - user_role_name = self.config_phase[phase]['user_role_name'] - phase_prompt = "\n\n".join(self.config_phase[phase]['phase_prompt']) - phase_class = getattr(self.phase_module, phase) - phase_instance = phase_class(assistant_role_name=assistant_role_name, - user_role_name=user_role_name, - phase_prompt=phase_prompt, - role_prompts=self.role_prompts, - phase_name=phase, - model_type=self.model_type, - log_filepath=self.log_filepath) - self.phases[phase] = phase_instance - - - - def make_recruitment(self): - """ - recruit all employees - Returns: None - - """ - for employee in self.recruitments: - self.chat_env.recruit(agent_name=employee) - - def execute_step(self, phase_item: dict): - """ - execute single phase in the chain - Args: - phase_item: single phase configuration in the ChatChainConfig.json - - Returns: - - """ - - phase = phase_item['phase'] - phase_type = phase_item['phaseType'] - # For SimplePhase, just look it up from self.phases and conduct the "Phase.execute" method - if phase_type == "SimplePhase": - max_turn_step = phase_item['max_turn_step'] - need_reflect = check_bool(phase_item['need_reflect']) - if phase in self.phases: - self.chat_env = self.phases[phase].execute(self.chat_env, - self.chat_turn_limit_default if max_turn_step <= 0 else max_turn_step, - need_reflect) - else: - raise RuntimeError(f"Phase '{phase}' is not yet implemented in chatdev.phase") - # For ComposedPhase, we create instance here then conduct the "ComposedPhase.execute" method - elif phase_type == "ComposedPhase": - cycle_num = phase_item['cycleNum'] - composition = phase_item['Composition'] - compose_phase_class = getattr(self.compose_phase_module, phase) - if not compose_phase_class: - raise RuntimeError(f"Phase '{phase}' is not yet implemented in chatdev.compose_phase") - compose_phase_instance = compose_phase_class(phase_name=phase, - cycle_num=cycle_num, - composition=composition, - config_phase=self.config_phase, - config_role=self.config_role, - model_type=self.model_type, - log_filepath=self.log_filepath) - self.chat_env = compose_phase_instance.execute(self.chat_env) - else: - raise RuntimeError(f"PhaseType '{phase_type}' is not yet implemented.") - - def execute_chain(self): - """ - execute the whole chain based on ChatChainConfig.json - Returns: None - - """ - for phase_item in self.chain: - self.execute_step(phase_item) - - def get_logfilepath(self): - """ - get the log path (under the software path) - Returns: - start_time: time for starting making the software - log_filepath: path to the log - - """ - start_time = now() - filepath = os.path.dirname(__file__) - # root = "/".join(filepath.split("/")[:-1]) - root = os.path.dirname(filepath) - # directory = root + "/WareHouse/" - directory = os.path.join(root, "WareHouse") - log_filepath = os.path.join(directory, "{}.log".format("_".join([self.project_name, self.org_name,start_time]))) - return start_time, log_filepath - - def pre_processing(self): - """ - remove useless files and log some global config settings - Returns: None - - """ - if self.chat_env.config.clear_structure: - filepath = os.path.dirname(__file__) - # root = "/".join(filepath.split("/")[:-1]) - root = os.path.dirname(filepath) - # directory = root + "/WareHouse" - directory = os.path.join(root, "WareHouse") - for filename in os.listdir(directory): - file_path = os.path.join(directory, filename) - # logs with error trials are left in WareHouse/ - if os.path.isfile(file_path) and not filename.endswith(".py") and not filename.endswith(".log"): - os.remove(file_path) - print("{} Removed.".format(file_path)) - - software_path = os.path.join(directory, "_".join([self.project_name, self.org_name, self.start_time])) - self.chat_env.set_directory(software_path) - - # copy config files to software path - shutil.copy(self.config_path, software_path) - shutil.copy(self.config_phase_path, software_path) - shutil.copy(self.config_role_path, software_path) - - # write task prompt to software path - with open(os.path.join(software_path, self.project_name + ".prompt"), "w") as f: - f.write(self.task_prompt_raw) - - preprocess_msg = "**[Preprocessing]**\n\n" - chat_gpt_config = ChatGPTConfig() - - preprocess_msg += "**ChatDev Starts** ({})\n\n".format(self.start_time) - preprocess_msg += "**Timestamp**: {}\n\n".format(self.start_time) - preprocess_msg += "**config_path**: {}\n\n".format(self.config_path) - preprocess_msg += "**config_phase_path**: {}\n\n".format(self.config_phase_path) - preprocess_msg += "**config_role_path**: {}\n\n".format(self.config_role_path) - preprocess_msg += "**task_prompt**: {}\n\n".format(self.task_prompt_raw) - preprocess_msg += "**project_name**: {}\n\n".format(self.project_name) - preprocess_msg += "**Log File**: {}\n\n".format(self.log_filepath) - preprocess_msg += "**ChatDevConfig**:\n {}\n\n".format(self.chat_env.config.__str__()) - preprocess_msg += "**ChatGPTConfig**:\n {}\n\n".format(chat_gpt_config) - log_and_print_online(preprocess_msg) - - # init task prompt - if check_bool(self.config['self_improve']): - self.chat_env.env_dict['task_prompt'] = self.self_task_improve(self.task_prompt_raw) - else: - self.chat_env.env_dict['task_prompt'] = self.task_prompt_raw - - def post_processing(self): - """ - summarize the production and move log files to the software directory - Returns: None - - """ - - self.chat_env.write_meta() - filepath = os.path.dirname(__file__) - # root = "/".join(filepath.split("/")[:-1]) - root = os.path.dirname(filepath) - - post_info = "**[Post Info]**\n\n" - now_time = now() - time_format = "%Y%m%d%H%M%S" - datetime1 = datetime.strptime(self.start_time, time_format) - datetime2 = datetime.strptime(now_time, time_format) - duration = (datetime2 - datetime1).total_seconds() - - post_info += "Software Info: {}".format( - get_info(self.chat_env.env_dict['directory'], self.log_filepath) + "\n\n🕑**duration**={:.2f}s\n\n".format(duration)) - - post_info += "ChatDev Starts ({})".format(self.start_time) + "\n\n" - post_info += "ChatDev Ends ({})".format(now_time) + "\n\n" - - if self.chat_env.config.clear_structure: - directory = self.chat_env.env_dict['directory'] - for filename in os.listdir(directory): - file_path = os.path.join(directory, filename) - if os.path.isdir(file_path) and file_path.endswith("__pycache__"): - shutil.rmtree(file_path, ignore_errors=True) - post_info += "{} Removed.".format(file_path) + "\n\n" - - log_and_print_online(post_info) - - logging.shutdown() - time.sleep(1) - - shutil.move(self.log_filepath, - os.path.join(root + "/WareHouse", "_".join([self.project_name, self.org_name, self.start_time]), - os.path.basename(self.log_filepath))) - - # @staticmethod - def self_task_improve(self, task_prompt): - """ - ask agent to improve the user query prompt - Args: - task_prompt: original user query prompt - - Returns: - revised_task_prompt: revised prompt from the prompt engineer agent - - """ - self_task_improve_prompt = """I will give you a short description of a software design requirement, -please rewrite it into a detailed prompt that can make large language model know how to make this software better based this prompt, -the prompt should ensure LLMs build a software that can be run correctly, which is the most import part you need to consider. -remember that the revised prompt should not contain more than 200 words, -here is the short description:\"{}\". -If the revised prompt is revised_version_of_the_description, -then you should return a message in a format like \" revised_version_of_the_description\", do not return messages in other formats.""".format( - task_prompt) - role_play_session = RolePlaying( - assistant_role_name="Prompt Engineer", - assistant_role_prompt="You are an professional prompt engineer that can improve user input prompt to make LLM better understand these prompts.", - user_role_prompt="You are an user that want to use LLM to build software.", - user_role_name="User", - task_type=TaskType.CHATDEV, - task_prompt="Do prompt engineering on user query", - with_task_specify=False, - model_type=self.model_type, - ) - - # log_and_print_online("System", role_play_session.assistant_sys_msg) - # log_and_print_online("System", role_play_session.user_sys_msg) - - _, input_user_msg = role_play_session.init_chat(None, None, self_task_improve_prompt) - assistant_response, user_response = role_play_session.step(input_user_msg, True) - revised_task_prompt = assistant_response.msg.content.split("")[-1].lower().strip() - log_and_print_online(role_play_session.assistant_agent.role_name, assistant_response.msg.content) - log_and_print_online( - "**[Task Prompt Self Improvement]**\n**Original Task Prompt**: {}\n**Improved Task Prompt**: {}".format( - task_prompt, revised_task_prompt)) - return revised_task_prompt diff --git a/spaces/juancopi81/whisper-youtube-2-hf_dataset/test/test_utils.py b/spaces/juancopi81/whisper-youtube-2-hf_dataset/test/test_utils.py deleted file mode 100644 index 495fe12c0262b27d2b20f74e98ecf1417b8010cb..0000000000000000000000000000000000000000 --- a/spaces/juancopi81/whisper-youtube-2-hf_dataset/test/test_utils.py +++ /dev/null @@ -1,34 +0,0 @@ -from youtube_transcriber.video import YoutubeVideo -from youtube_transcriber.utils import create_videos -from youtube_transcriber.utils import nest_list - -def test_create_videos(): - video_params = [ - {'channel_name': 'MrBeast Shorts', 'url': 'https://www.youtube.com/watch?v=mJ4t7iNF86g'}, - {'channel_name': 'MrBeast Shorts', 'url': 'https://www.youtube.com/watch?v=UPhxU9J46Qk'} - ] - videos = create_videos(video_params) - assert len(videos) == 2 - assert type(videos[0]) == YoutubeVideo - assert videos[1].url == "https://www.youtube.com/watch?v=UPhxU9J46Qk" - -def test_nest_list(): - l = [0, 1, 2, 3, 4, 5] - - nested_l = nest_list(l, 6) - assert nested_l == [[0, 1, 2, 3, 4, 5]] - - nested_l = nest_list(l, 5) - assert nested_l == [[0, 1, 2, 3, 4], [5]] - - nested_l = nest_list(l, 4) - assert nested_l == [[0, 1, 2, 3], [4, 5]] - - nested_l = nest_list(l, 3) - assert nested_l == [[0, 1, 2], [3, 4, 5]] - - nested_l = nest_list(l, 2) - assert nested_l == [[0, 1], [2, 3], [4, 5]] - - nested_l = nest_list(l, 1) - assert nested_l == [[0], [1], [2], [3], [4], [5]] \ No newline at end of file diff --git a/spaces/ken4005/Uhi-ChatGPT/modules/shared.py b/spaces/ken4005/Uhi-ChatGPT/modules/shared.py deleted file mode 100644 index 4046900a39b2fc7bdd8005844a92dc7d4eb669b6..0000000000000000000000000000000000000000 --- a/spaces/ken4005/Uhi-ChatGPT/modules/shared.py +++ /dev/null @@ -1,24 +0,0 @@ -from modules.presets import API_URL - -class State: - interrupted = False - api_url = API_URL - - def interrupt(self): - self.interrupted = True - - def recover(self): - self.interrupted = False - - def set_api_url(self, api_url): - self.api_url = api_url - - def reset_api_url(self): - self.api_url = API_URL - return self.api_url - - def reset_all(self): - self.interrupted = False - self.api_url = API_URL - -state = State() diff --git a/spaces/keneonyeachonam/Memory-Chat-Story-Generator-ChatGPT-041723/README.md b/spaces/keneonyeachonam/Memory-Chat-Story-Generator-ChatGPT-041723/README.md deleted file mode 100644 index a5c4af3ae9a1031a1310945c8e59c6f1bf3fae41..0000000000000000000000000000000000000000 --- a/spaces/keneonyeachonam/Memory-Chat-Story-Generator-ChatGPT-041723/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Memory Chat Story Generator ChatGPT 041723 -emoji: ⚡ -colorFrom: blue -colorTo: gray -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kepajide/keyiwei/text/__init__.py b/spaces/kepajide/keyiwei/text/__init__.py deleted file mode 100644 index 663c4b6416affb53c9dc56dddbc8b2b65d4bf518..0000000000000000000000000000000000000000 --- a/spaces/kepajide/keyiwei/text/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners -from text.symbols import symbols - - -# Mappings from symbol to numeric ID and vice versa: -_symbol_to_id = {s: i for i, s in enumerate(symbols)} -_id_to_symbol = {i: s for i, s in enumerate(symbols)} - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence, clean_text - - -def cleaned_text_to_sequence(cleaned_text): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()] - return sequence - - -def sequence_to_text(sequence): - '''Converts a sequence of IDs back to a string''' - result = '' - for symbol_id in sequence: - s = _id_to_symbol[symbol_id] - result += s - return result - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/keras-io/video-transformers/app.py b/spaces/keras-io/video-transformers/app.py deleted file mode 100644 index 2d0615c1c0beb33e10c625b4520d1091a08284be..0000000000000000000000000000000000000000 --- a/spaces/keras-io/video-transformers/app.py +++ /dev/null @@ -1,46 +0,0 @@ -import gradio as gr -from utils.predict import predict_action -import os -import glob - -##Create list of examples to be loaded -example_list = glob.glob("examples/*") -example_list = list(map(lambda el:[el], example_list)) - - -demo = gr.Blocks() - - -with demo: - - gr.Markdown("# **

    Video Classification with Transformers

    **") - gr.Markdown("This space demonstrates the use of hybrid Transformer-based models for video classification that operate on CNN feature maps.") - - with gr.Tabs(): - - with gr.TabItem("Upload & Predict"): - with gr.Box(): - - with gr.Row(): - input_video = gr.Video(label="Input Video", show_label=True) - output_label = gr.Label(label="Model Output", show_label=True) - output_gif = gr.Image(label="Video Gif", show_label=True) - - gr.Markdown("**Predict**") - - with gr.Box(): - with gr.Row(): - submit_button = gr.Button("Submit") - - gr.Markdown("**Examples:**") - gr.Markdown("The model is trained to classify videos belonging to the following classes: CricketShot, PlayingCello, Punch, ShavingBeard, TennisSwing") - # gr.Markdown("CricketShot, PlayingCello, Punch, ShavingBeard, TennisSwing") - - with gr.Column(): - gr.Examples(example_list, [input_video], [output_label,output_gif], predict_action, cache_examples=True) - - submit_button.click(predict_action, inputs=input_video, outputs=[output_label,output_gif]) - - gr.Markdown('\n Demo created by: Shivalika Singh
    Based on this Keras example by Sayak Paul
    Demo Powered by this Video Classification model') - -demo.launch() diff --git a/spaces/kevinwang676/SadTalker/src/utils/videoio.py b/spaces/kevinwang676/SadTalker/src/utils/videoio.py deleted file mode 100644 index 08bfbdd7d4be97dc17fea4ad7b2733e9eb0ef975..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/SadTalker/src/utils/videoio.py +++ /dev/null @@ -1,41 +0,0 @@ -import shutil -import uuid - -import os - -import cv2 - -def load_video_to_cv2(input_path): - video_stream = cv2.VideoCapture(input_path) - fps = video_stream.get(cv2.CAP_PROP_FPS) - full_frames = [] - while 1: - still_reading, frame = video_stream.read() - if not still_reading: - video_stream.release() - break - full_frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) - return full_frames - -def save_video_with_watermark(video, audio, save_path, watermark=False): - temp_file = str(uuid.uuid4())+'.mp4' - cmd = r'ffmpeg -y -hide_banner -loglevel error -i "%s" -i "%s" -vcodec copy "%s"' % (video, audio, temp_file) - os.system(cmd) - - if watermark is False: - shutil.move(temp_file, save_path) - else: - # watermark - try: - ##### check if stable-diffusion-webui - import webui - from modules import paths - watarmark_path = paths.script_path+"/extensions/SadTalker/docs/sadtalker_logo.png" - except: - # get the root path of sadtalker. - dir_path = os.path.dirname(os.path.realpath(__file__)) - watarmark_path = dir_path+"/../../docs/sadtalker_logo.png" - - cmd = r'ffmpeg -y -hide_banner -loglevel error -i "%s" -i "%s" -filter_complex "[1]scale=100:-1[wm];[0][wm]overlay=(main_w-overlay_w)-10:10" "%s"' % (temp_file, watarmark_path, save_path) - os.system(cmd) - os.remove(temp_file) \ No newline at end of file diff --git a/spaces/kira4424/VITS-fast-fine-tuning/monotonic_align/setup.py b/spaces/kira4424/VITS-fast-fine-tuning/monotonic_align/setup.py deleted file mode 100644 index 30c224807a70faa9df9c9eb75f8e80c8c867b16b..0000000000000000000000000000000000000000 --- a/spaces/kira4424/VITS-fast-fine-tuning/monotonic_align/setup.py +++ /dev/null @@ -1,9 +0,0 @@ -from distutils.core import setup -from Cython.Build import cythonize -import numpy - -setup( - name = 'monotonic_align', - ext_modules = cythonize("core.pyx"), - include_dirs=[numpy.get_include()] -) diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/latent_depth/latent_depth_src/models/latent_multilingual_transformer.py b/spaces/koajoel/PolyFormer/fairseq/examples/latent_depth/latent_depth_src/models/latent_multilingual_transformer.py deleted file mode 100644 index 9e7b655feee0042d42ac2b13cec5f1d2a88e201e..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/fairseq/examples/latent_depth/latent_depth_src/models/latent_multilingual_transformer.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from fairseq.models import register_model, register_model_architecture -from fairseq.models.multilingual_transformer import MultilingualTransformerModel -from fairseq.models.transformer import ( - TransformerDecoder, - TransformerEncoder, - base_architecture, -) -from fairseq.utils import safe_hasattr - -from .latent_transformer import LatentTransformerDecoder, LatentTransformerEncoder - - -@register_model("latent_multilingual_transformer") -class LatentMultilingualTransformerModel(MultilingualTransformerModel): - """A variant of standard multilingual Transformer models which encoder and/or - decoders supports latent depth, as is in "Deep Transformer with Latent Depth" - (https://arxiv.org/abs/2009.13102). - """ - - @staticmethod - def add_args(parser): - """Add model-specific arguments to the parser.""" - MultilingualTransformerModel.add_args(parser) - parser.add_argument( - '--soft-select', - action='store_true', - help='use soft samples in training an inference', - ) - parser.add_argument( - '--sampling-tau', - type=float, - default=5., - help='sampling temperature', - ) - - @classmethod - def _get_module_class(cls, is_encoder, args, lang_dict, embed_tokens, langs): - if is_encoder: - if safe_hasattr(args, "encoder_latent_layer") and args.encoder_latent_layer: - return LatentTransformerEncoder( - args, lang_dict, embed_tokens, num_logits=len(langs) - ) - else: - return TransformerEncoder(args, lang_dict, embed_tokens) - else: - if safe_hasattr(args, "decoder_latent_layer") and args.decoder_latent_layer: - return LatentTransformerDecoder( - args, lang_dict, embed_tokens, num_logits=len(langs) - ) - else: - return TransformerDecoder(args, lang_dict, embed_tokens) - - -@register_model_architecture( - "latent_multilingual_transformer", "latent_multilingual_transformer" -) -def latent_multilingual_architecture(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) - args.encoder_layers = getattr(args, "encoder_layers", 12) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) - args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) - args.decoder_layers = getattr(args, "decoder_layers", 24) - args.share_encoders = getattr(args, "share_encoders", True) - args.share_decoders = getattr(args, "share_decoders", True) - args.share_encoder_embeddings = getattr(args, "share_encoder_embeddings", True) - args.share_decoder_embeddings = getattr(args, "share_decoder_embeddings", True) - - base_architecture(args) diff --git a/spaces/kquote03/lama-video-watermark-remover/bin/sample_from_dataset.py b/spaces/kquote03/lama-video-watermark-remover/bin/sample_from_dataset.py deleted file mode 100644 index 31593b3212454dd0b6f74a39195a34b489df20a1..0000000000000000000000000000000000000000 --- a/spaces/kquote03/lama-video-watermark-remover/bin/sample_from_dataset.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python3 - -import os - -import numpy as np -import tqdm -from skimage import io -from skimage.segmentation import mark_boundaries - -from saicinpainting.evaluation.data import InpaintingDataset -from saicinpainting.evaluation.vis import save_item_for_vis - -def save_mask_for_sidebyside(item, out_file): - mask = item['mask']# > 0.5 - if mask.ndim == 3: - mask = mask[0] - mask = np.clip(mask * 255, 0, 255).astype('uint8') - io.imsave(out_file, mask) - -def save_img_for_sidebyside(item, out_file): - img = np.transpose(item['image'], (1, 2, 0)) - img = np.clip(img * 255, 0, 255).astype('uint8') - io.imsave(out_file, img) - -def save_masked_img_for_sidebyside(item, out_file): - mask = item['mask'] - img = item['image'] - - img = (1-mask) * img + mask - img = np.transpose(img, (1, 2, 0)) - - img = np.clip(img * 255, 0, 255).astype('uint8') - io.imsave(out_file, img) - -def main(args): - dataset = InpaintingDataset(args.datadir, img_suffix='.png') - - area_bins = np.linspace(0, 1, args.area_bins + 1) - - heights = [] - widths = [] - image_areas = [] - hole_areas = [] - hole_area_percents = [] - area_bins_count = np.zeros(args.area_bins) - area_bin_titles = [f'{area_bins[i] * 100:.0f}-{area_bins[i + 1] * 100:.0f}' for i in range(args.area_bins)] - - bin2i = [[] for _ in range(args.area_bins)] - - for i, item in enumerate(tqdm.tqdm(dataset)): - h, w = item['image'].shape[1:] - heights.append(h) - widths.append(w) - full_area = h * w - image_areas.append(full_area) - hole_area = (item['mask'] == 1).sum() - hole_areas.append(hole_area) - hole_percent = hole_area / full_area - hole_area_percents.append(hole_percent) - bin_i = np.clip(np.searchsorted(area_bins, hole_percent) - 1, 0, len(area_bins_count) - 1) - area_bins_count[bin_i] += 1 - bin2i[bin_i].append(i) - - os.makedirs(args.outdir, exist_ok=True) - - for bin_i in range(args.area_bins): - bindir = os.path.join(args.outdir, area_bin_titles[bin_i]) - os.makedirs(bindir, exist_ok=True) - bin_idx = bin2i[bin_i] - for sample_i in np.random.choice(bin_idx, size=min(len(bin_idx), args.samples_n), replace=False): - item = dataset[sample_i] - path = os.path.join(bindir, dataset.img_filenames[sample_i].split('/')[-1]) - save_masked_img_for_sidebyside(item, path) - - -if __name__ == '__main__': - import argparse - - aparser = argparse.ArgumentParser() - aparser.add_argument('--datadir', type=str, - help='Path to folder with images and masks (output of gen_mask_dataset.py)') - aparser.add_argument('--outdir', type=str, help='Where to put results') - aparser.add_argument('--samples-n', type=int, default=10, - help='Number of sample images with masks to copy for visualization for each area bin') - aparser.add_argument('--area-bins', type=int, default=10, help='How many area bins to have') - - main(aparser.parse_args()) diff --git a/spaces/krishw/MovieExplorer/pages/View_Specific_Data.py b/spaces/krishw/MovieExplorer/pages/View_Specific_Data.py deleted file mode 100644 index af843c49a0d6dffa8342a81cad21520b3a0a1a60..0000000000000000000000000000000000000000 --- a/spaces/krishw/MovieExplorer/pages/View_Specific_Data.py +++ /dev/null @@ -1,51 +0,0 @@ -import streamlit as st -from Data import movies_df, cast_df - -st.header("Find Data by Movie Title") -name = st.selectbox("Enter Movie Name", movies_df["Title"].tolist()) -id = movies_df.index[movies_df['Title'] == name] -st.write("### Description") -st.write(movies_df[movies_df['Title'] == name]['Description'].tolist()[0]) -st.write("### Movie ID") -st.write(str(movies_df[movies_df['Title'] == name]['Id'].tolist()[0])) -st.write('### Movie Genres') -genre_list = movies_df[movies_df['Title'] == name]['Genres'].tolist()[0].lstrip('[').rstrip(']').split(",") -s = '' -for i in genre_list: - i = i.strip().strip("\'") - s += "
  11. " + i + "
  12. " - -st.markdown("
      "+s+"
    ",unsafe_allow_html=True) -st.write('### Release Date') -date, format = st.tabs(["Date","Format"]) -with date: - st.write(movies_df[movies_df['Title'] == name]['Release Date'].tolist()[0]) -with format: - st.caption('YYYY-MM-DD') - - -st.write("### Ratings") -rating, vote_count = st.tabs(["Ratings","Vote Count"]) -with rating: - st.markdown("

    "+str(movies_df[movies_df['Title'] == name]['Rating'].tolist()[0])+"/10"+"

    ", unsafe_allow_html = True) -with vote_count: - st.write(str(movies_df[movies_df['Title'] == name]['Vote Count'].tolist()[0])) - -st.write("### Revenue (in USD)") -if movies_df[movies_df['Title'] == name]['Revenue'].tolist()[0] != 0: - st.markdown("

    $ " + str(movies_df[movies_df['Title'] == name]['Revenue'].tolist()[0])+'

    ', unsafe_allow_html = True) -else: - st.write("The revenue is unknown or N/A") - -st.write("### Movie Cast") -movie_id = movies_df[movies_df["Title"] == name]["Id"].tolist()[0] -cast_list = eval(cast_df[cast_df["Movie Id"] == movie_id]['Cast'].tolist()[0]) - -if not cast_list: - st.write("Cast is not yet available") - -s = '' -for i in cast_list: - s += "
  13. " + i + "
  14. " - -st.markdown("
      "+s+"
    ", unsafe_allow_html = True) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/click/globals.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/click/globals.py deleted file mode 100644 index 480058f10dd6a8205d1bff0b94de7ae347a7629a..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/click/globals.py +++ /dev/null @@ -1,68 +0,0 @@ -import typing as t -from threading import local - -if t.TYPE_CHECKING: - import typing_extensions as te - from .core import Context - -_local = local() - - -@t.overload -def get_current_context(silent: "te.Literal[False]" = False) -> "Context": - ... - - -@t.overload -def get_current_context(silent: bool = ...) -> t.Optional["Context"]: - ... - - -def get_current_context(silent: bool = False) -> t.Optional["Context"]: - """Returns the current click context. This can be used as a way to - access the current context object from anywhere. This is a more implicit - alternative to the :func:`pass_context` decorator. This function is - primarily useful for helpers such as :func:`echo` which might be - interested in changing its behavior based on the current context. - - To push the current context, :meth:`Context.scope` can be used. - - .. versionadded:: 5.0 - - :param silent: if set to `True` the return value is `None` if no context - is available. The default behavior is to raise a - :exc:`RuntimeError`. - """ - try: - return t.cast("Context", _local.stack[-1]) - except (AttributeError, IndexError) as e: - if not silent: - raise RuntimeError("There is no active click context.") from e - - return None - - -def push_context(ctx: "Context") -> None: - """Pushes a new context to the current stack.""" - _local.__dict__.setdefault("stack", []).append(ctx) - - -def pop_context() -> None: - """Removes the top level from the stack.""" - _local.stack.pop() - - -def resolve_color_default(color: t.Optional[bool] = None) -> t.Optional[bool]: - """Internal helper to get the default value of the color flag. If a - value is passed it's returned unchanged, otherwise it's looked up from - the current context. - """ - if color is not None: - return color - - ctx = get_current_context(silent=True) - - if ctx is not None: - return ctx.color - - return None diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/linkify_it/__init__.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/linkify_it/__init__.py deleted file mode 100644 index 975d98048092076c1106022a4e5b1e19a2d6e7f7..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/linkify_it/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .main import LinkifyIt # noqa: F401p -from .main import SchemaError # noqa: F401p - -__version__ = "2.0.2" diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/backends/web_backend/css/page.css b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/backends/web_backend/css/page.css deleted file mode 100644 index ded0d92203790eaaab21feea83930611acec51f0..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/backends/web_backend/css/page.css +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Primary styles - * - * Author: IPython Development Team - */ - - -body { - background-color: white; - /* This makes sure that the body covers the entire window and needs to - be in a different element than the display: box in wrapper below */ - position: absolute; - left: 0px; - right: 0px; - top: 0px; - bottom: 0px; - overflow: visible; -} - - -div#header { - /* Initially hidden to prevent FLOUC */ - display: none; - position: relative; - height: 40px; - padding: 5px; - margin: 0px; - width: 100%; -} - -span#ipython_notebook { - position: absolute; - padding: 2px 2px 2px 5px; -} - -span#ipython_notebook img { - font-family: Verdana, "Helvetica Neue", Arial, Helvetica, Geneva, sans-serif; - height: 24px; - text-decoration:none; - display: inline; - color: black; -} - -#site { - width: 100%; - display: none; -} - -/* We set the fonts by hand here to override the values in the theme */ -.ui-widget { - font-family: "Lucinda Grande", "Lucinda Sans Unicode", Helvetica, Arial, Verdana, sans-serif; -} - -.ui-widget input, .ui-widget select, .ui-widget textarea, .ui-widget button { - font-family: "Lucinda Grande", "Lucinda Sans Unicode", Helvetica, Arial, Verdana, sans-serif; -} - -/* Smaller buttons */ -.ui-button .ui-button-text { - padding: 0.2em 0.8em; - font-size: 77%; -} - -input.ui-button { - padding: 0.3em 0.9em; -} - -span#login_widget { - float: right; -} - -.border-box-sizing { - box-sizing: border-box; - -moz-box-sizing: border-box; - -webkit-box-sizing: border-box; -} - -#figure-div { - display: inline-block; - margin: 10px; - vertical-align: top; -} diff --git a/spaces/lambdalabs/LambdaSuperRes/KAIR/main_test_ffdnet.py b/spaces/lambdalabs/LambdaSuperRes/KAIR/main_test_ffdnet.py deleted file mode 100644 index 9407259b67fb2fdd6525f91151d7ec9d342b54da..0000000000000000000000000000000000000000 --- a/spaces/lambdalabs/LambdaSuperRes/KAIR/main_test_ffdnet.py +++ /dev/null @@ -1,198 +0,0 @@ -import os.path -import logging - -import numpy as np -from collections import OrderedDict - -import torch - -from utils import utils_logger -from utils import utils_image as util - - -''' -Spyder (Python 3.6) -PyTorch 1.1.0 -Windows 10 or Linux - -Kai Zhang (cskaizhang@gmail.com) -github: https://github.com/cszn/KAIR - https://github.com/cszn/FFDNet - -@article{zhang2018ffdnet, - title={FFDNet: Toward a fast and flexible solution for CNN-based image denoising}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - journal={IEEE Transactions on Image Processing}, - volume={27}, - number={9}, - pages={4608--4622}, - year={2018}, - publisher={IEEE} -} - -% If you have any question, please feel free to contact with me. -% Kai Zhang (e-mail: cskaizhang@gmail.com; github: https://github.com/cszn) - -by Kai Zhang (12/Dec./2019) -''' - -""" -# -------------------------------------------- -|--model_zoo # model_zoo - |--ffdnet_gray # model_name, for color images - |--ffdnet_color - |--ffdnet_color_clip # for clipped uint8 color images - |--ffdnet_gray_clip -|--testset # testsets - |--set12 # testset_name - |--bsd68 - |--cbsd68 -|--results # results - |--set12_ffdnet_gray # result_name = testset_name + '_' + model_name - |--set12_ffdnet_color - |--cbsd68_ffdnet_color_clip -# -------------------------------------------- -""" - - -def main(): - - # ---------------------------------------- - # Preparation - # ---------------------------------------- - - noise_level_img = 15 # noise level for noisy image - noise_level_model = noise_level_img # noise level for model - model_name = 'ffdnet_gray' # 'ffdnet_gray' | 'ffdnet_color' | 'ffdnet_color_clip' | 'ffdnet_gray_clip' - testset_name = 'bsd68' # test set, 'bsd68' | 'cbsd68' | 'set12' - need_degradation = True # default: True - show_img = False # default: False - - - - - task_current = 'dn' # 'dn' for denoising | 'sr' for super-resolution - sf = 1 # unused for denoising - if 'color' in model_name: - n_channels = 3 # setting for color image - nc = 96 # setting for color image - nb = 12 # setting for color image - else: - n_channels = 1 # setting for grayscale image - nc = 64 # setting for grayscale image - nb = 15 # setting for grayscale image - if 'clip' in model_name: - use_clip = True # clip the intensities into range of [0, 1] - else: - use_clip = False - model_pool = 'model_zoo' # fixed - testsets = 'testsets' # fixed - results = 'results' # fixed - result_name = testset_name + '_' + model_name - border = sf if task_current == 'sr' else 0 # shave boader to calculate PSNR and SSIM - model_path = os.path.join(model_pool, model_name+'.pth') - - # ---------------------------------------- - # L_path, E_path, H_path - # ---------------------------------------- - - L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images - H_path = L_path # H_path, for High-quality images - E_path = os.path.join(results, result_name) # E_path, for Estimated images - util.mkdir(E_path) - - if H_path == L_path: - need_degradation = True - logger_name = result_name - utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name+'.log')) - logger = logging.getLogger(logger_name) - - need_H = True if H_path is not None else False - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - # ---------------------------------------- - # load model - # ---------------------------------------- - - from models.network_ffdnet import FFDNet as net - model = net(in_nc=n_channels, out_nc=n_channels, nc=nc, nb=nb, act_mode='R') - model.load_state_dict(torch.load(model_path), strict=True) - model.eval() - for k, v in model.named_parameters(): - v.requires_grad = False - model = model.to(device) - logger.info('Model path: {:s}'.format(model_path)) - - test_results = OrderedDict() - test_results['psnr'] = [] - test_results['ssim'] = [] - - logger.info('model_name:{}, model sigma:{}, image sigma:{}'.format(model_name, noise_level_img, noise_level_model)) - logger.info(L_path) - L_paths = util.get_image_paths(L_path) - H_paths = util.get_image_paths(H_path) if need_H else None - - for idx, img in enumerate(L_paths): - - # ------------------------------------ - # (1) img_L - # ------------------------------------ - - img_name, ext = os.path.splitext(os.path.basename(img)) - # logger.info('{:->4d}--> {:>10s}'.format(idx+1, img_name+ext)) - img_L = util.imread_uint(img, n_channels=n_channels) - img_L = util.uint2single(img_L) - - if need_degradation: # degradation process - np.random.seed(seed=0) # for reproducibility - img_L += np.random.normal(0, noise_level_img/255., img_L.shape) - if use_clip: - img_L = util.uint2single(util.single2uint(img_L)) - - util.imshow(util.single2uint(img_L), title='Noisy image with noise level {}'.format(noise_level_img)) if show_img else None - - img_L = util.single2tensor4(img_L) - img_L = img_L.to(device) - - sigma = torch.full((1,1,1,1), noise_level_model/255.).type_as(img_L) - - # ------------------------------------ - # (2) img_E - # ------------------------------------ - - img_E = model(img_L, sigma) - img_E = util.tensor2uint(img_E) - - if need_H: - - # -------------------------------- - # (3) img_H - # -------------------------------- - img_H = util.imread_uint(H_paths[idx], n_channels=n_channels) - img_H = img_H.squeeze() - - # -------------------------------- - # PSNR and SSIM - # -------------------------------- - - psnr = util.calculate_psnr(img_E, img_H, border=border) - ssim = util.calculate_ssim(img_E, img_H, border=border) - test_results['psnr'].append(psnr) - test_results['ssim'].append(ssim) - logger.info('{:s} - PSNR: {:.2f} dB; SSIM: {:.4f}.'.format(img_name+ext, psnr, ssim)) - util.imshow(np.concatenate([img_E, img_H], axis=1), title='Recovered / Ground-truth') if show_img else None - - # ------------------------------------ - # save results - # ------------------------------------ - - util.imsave(img_E, os.path.join(E_path, img_name+ext)) - - if need_H: - ave_psnr = sum(test_results['psnr']) / len(test_results['psnr']) - ave_ssim = sum(test_results['ssim']) / len(test_results['ssim']) - logger.info('Average PSNR/SSIM(RGB) - {} - PSNR: {:.2f} dB; SSIM: {:.4f}'.format(result_name, ave_psnr, ave_ssim)) - -if __name__ == '__main__': - - main() diff --git a/spaces/lavita/medical-question-answering-datasets/README.md b/spaces/lavita/medical-question-answering-datasets/README.md deleted file mode 100644 index ca29db422edad0b1a2f2c7bf0471773b7bdea2da..0000000000000000000000000000000000000000 --- a/spaces/lavita/medical-question-answering-datasets/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Medical Question Answering Datasets -emoji: 🏥 -colorFrom: purple -colorTo: green -sdk: gradio -sdk_version: 3.50.2 -app_file: app.py -pinned: false ---- - - diff --git a/spaces/leurez/moss/src/hooks/useTheme.ts b/spaces/leurez/moss/src/hooks/useTheme.ts deleted file mode 100644 index dbda3893b2669427836ff8825548e8788f14c1d3..0000000000000000000000000000000000000000 --- a/spaces/leurez/moss/src/hooks/useTheme.ts +++ /dev/null @@ -1,43 +0,0 @@ -import type { GlobalThemeOverrides } from 'naive-ui' -import { computed, watch } from 'vue' -import { darkTheme, useOsTheme } from 'naive-ui' -import { useAppStore } from '@/store' - -export function useTheme() { - const appStore = useAppStore() - - const OsTheme = useOsTheme() - - const isDark = computed(() => { - if (appStore.theme === 'auto') - return OsTheme.value === 'dark' - else - return appStore.theme === 'dark' - }) - - const theme = computed(() => { - return isDark.value ? darkTheme : undefined - }) - - const themeOverrides = computed(() => { - if (isDark.value) { - return { - common: {}, - } - } - return {} - }) - - watch( - () => isDark.value, - (dark) => { - if (dark) - document.documentElement.classList.add('dark') - else - document.documentElement.classList.remove('dark') - }, - { immediate: true }, - ) - - return { theme, themeOverrides } -} diff --git a/spaces/lewiswu1209/MockingBird/encoder/data_objects/speaker_batch.py b/spaces/lewiswu1209/MockingBird/encoder/data_objects/speaker_batch.py deleted file mode 100644 index 56651dba5804a0c59c334e49ac18f8f5a4bfa444..0000000000000000000000000000000000000000 --- a/spaces/lewiswu1209/MockingBird/encoder/data_objects/speaker_batch.py +++ /dev/null @@ -1,12 +0,0 @@ -import numpy as np -from typing import List -from encoder.data_objects.speaker import Speaker - -class SpeakerBatch: - def __init__(self, speakers: List[Speaker], utterances_per_speaker: int, n_frames: int): - self.speakers = speakers - self.partials = {s: s.random_partial(utterances_per_speaker, n_frames) for s in speakers} - - # Array of shape (n_speakers * n_utterances, n_frames, mel_n), e.g. for 3 speakers with - # 4 utterances each of 160 frames of 40 mel coefficients: (12, 160, 40) - self.data = np.array([frames for s in speakers for _, frames, _ in self.partials[s]]) diff --git a/spaces/lewiswu1209/MockingBird/synthesizer/models/sublayer/global_style_token.py b/spaces/lewiswu1209/MockingBird/synthesizer/models/sublayer/global_style_token.py deleted file mode 100644 index 21ce07e7056ee575ee37e3855e1489d6cea7ccae..0000000000000000000000000000000000000000 --- a/spaces/lewiswu1209/MockingBird/synthesizer/models/sublayer/global_style_token.py +++ /dev/null @@ -1,145 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.init as init -import torch.nn.functional as tFunctional -from synthesizer.gst_hyperparameters import GSTHyperparameters as hp -from synthesizer.hparams import hparams - - -class GlobalStyleToken(nn.Module): - """ - inputs: style mel spectrograms [batch_size, num_spec_frames, num_mel] - speaker_embedding: speaker mel spectrograms [batch_size, num_spec_frames, num_mel] - outputs: [batch_size, embedding_dim] - """ - def __init__(self, speaker_embedding_dim=None): - - super().__init__() - self.encoder = ReferenceEncoder() - self.stl = STL(speaker_embedding_dim) - - def forward(self, inputs, speaker_embedding=None): - enc_out = self.encoder(inputs) - # concat speaker_embedding according to https://github.com/mozilla/TTS/blob/master/TTS/tts/layers/gst_layers.py - if hparams.use_ser_for_gst and speaker_embedding is not None: - enc_out = torch.cat([enc_out, speaker_embedding], dim=-1) - style_embed = self.stl(enc_out) - - return style_embed - - -class ReferenceEncoder(nn.Module): - ''' - inputs --- [N, Ty/r, n_mels*r] mels - outputs --- [N, ref_enc_gru_size] - ''' - - def __init__(self): - - super().__init__() - K = len(hp.ref_enc_filters) - filters = [1] + hp.ref_enc_filters - convs = [nn.Conv2d(in_channels=filters[i], - out_channels=filters[i + 1], - kernel_size=(3, 3), - stride=(2, 2), - padding=(1, 1)) for i in range(K)] - self.convs = nn.ModuleList(convs) - self.bns = nn.ModuleList([nn.BatchNorm2d(num_features=hp.ref_enc_filters[i]) for i in range(K)]) - - out_channels = self.calculate_channels(hp.n_mels, 3, 2, 1, K) - self.gru = nn.GRU(input_size=hp.ref_enc_filters[-1] * out_channels, - hidden_size=hp.E // 2, - batch_first=True) - - def forward(self, inputs): - N = inputs.size(0) - out = inputs.view(N, 1, -1, hp.n_mels) # [N, 1, Ty, n_mels] - for conv, bn in zip(self.convs, self.bns): - out = conv(out) - out = bn(out) - out = tFunctional.relu(out) # [N, 128, Ty//2^K, n_mels//2^K] - - out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K] - T = out.size(1) - N = out.size(0) - out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K] - - self.gru.flatten_parameters() - memory, out = self.gru(out) # out --- [1, N, E//2] - - return out.squeeze(0) - - def calculate_channels(self, L, kernel_size, stride, pad, n_convs): - for i in range(n_convs): - L = (L - kernel_size + 2 * pad) // stride + 1 - return L - - -class STL(nn.Module): - ''' - inputs --- [N, E//2] - ''' - - def __init__(self, speaker_embedding_dim=None): - - super().__init__() - self.embed = nn.Parameter(torch.FloatTensor(hp.token_num, hp.E // hp.num_heads)) - d_q = hp.E // 2 - d_k = hp.E // hp.num_heads - # self.attention = MultiHeadAttention(hp.num_heads, d_model, d_q, d_v) - if hparams.use_ser_for_gst and speaker_embedding_dim is not None: - d_q += speaker_embedding_dim - self.attention = MultiHeadAttention(query_dim=d_q, key_dim=d_k, num_units=hp.E, num_heads=hp.num_heads) - - init.normal_(self.embed, mean=0, std=0.5) - - def forward(self, inputs): - N = inputs.size(0) - query = inputs.unsqueeze(1) # [N, 1, E//2] - keys = torch.tanh(self.embed).unsqueeze(0).expand(N, -1, -1) # [N, token_num, E // num_heads] - style_embed = self.attention(query, keys) - - return style_embed - - -class MultiHeadAttention(nn.Module): - ''' - input: - query --- [N, T_q, query_dim] - key --- [N, T_k, key_dim] - output: - out --- [N, T_q, num_units] - ''' - - def __init__(self, query_dim, key_dim, num_units, num_heads): - - super().__init__() - self.num_units = num_units - self.num_heads = num_heads - self.key_dim = key_dim - - self.W_query = nn.Linear(in_features=query_dim, out_features=num_units, bias=False) - self.W_key = nn.Linear(in_features=key_dim, out_features=num_units, bias=False) - self.W_value = nn.Linear(in_features=key_dim, out_features=num_units, bias=False) - - def forward(self, query, key): - querys = self.W_query(query) # [N, T_q, num_units] - keys = self.W_key(key) # [N, T_k, num_units] - values = self.W_value(key) - - split_size = self.num_units // self.num_heads - querys = torch.stack(torch.split(querys, split_size, dim=2), dim=0) # [h, N, T_q, num_units/h] - keys = torch.stack(torch.split(keys, split_size, dim=2), dim=0) # [h, N, T_k, num_units/h] - values = torch.stack(torch.split(values, split_size, dim=2), dim=0) # [h, N, T_k, num_units/h] - - # score = softmax(QK^T / (d_k ** 0.5)) - scores = torch.matmul(querys, keys.transpose(2, 3)) # [h, N, T_q, T_k] - scores = scores / (self.key_dim ** 0.5) - scores = tFunctional.softmax(scores, dim=3) - - # out = score * V - out = torch.matmul(scores, values) # [h, N, T_q, num_units/h] - out = torch.cat(torch.split(out, 1, dim=0), dim=3).squeeze(0) # [N, T_q, num_units] - - return out diff --git a/spaces/limingcv/AlignDet/finetune/finetune_retinanet_1x_coco_swav_moco-setting_lr1.5e-2_wd5e-5/retinanet_r50_fpn_mstrain_1x_coco.py b/spaces/limingcv/AlignDet/finetune/finetune_retinanet_1x_coco_swav_moco-setting_lr1.5e-2_wd5e-5/retinanet_r50_fpn_mstrain_1x_coco.py deleted file mode 100644 index 31ba50022ec8ad2a99695f7a2fd836c1071cab41..0000000000000000000000000000000000000000 --- a/spaces/limingcv/AlignDet/finetune/finetune_retinanet_1x_coco_swav_moco-setting_lr1.5e-2_wd5e-5/retinanet_r50_fpn_mstrain_1x_coco.py +++ /dev/null @@ -1,208 +0,0 @@ -model = dict( - type='RetinaNet', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=-1, - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_input', - num_outs=5, - norm_cfg=dict(type='SyncBN', requires_grad=True)), - bbox_head=dict( - type='RetinaHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0.0, 0.0, 0.0, 0.0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100)) -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type='CocoDataset', - ann_file='data/coco/annotations/instances_train2017.json', - img_prefix='data/coco/train2017/', - pipeline=[ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) - ]), - val=dict( - type='CocoDataset', - ann_file='data/coco/annotations/instances_val2017.json', - img_prefix='data/coco/val2017/', - pipeline=[ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) - ]), - test=dict( - type='CocoDataset', - ann_file='data/coco/annotations/instances_val2017.json', - img_prefix='data/coco/val2017/', - pipeline=[ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) - ])) -evaluation = dict( - interval=1, metric='bbox', save_best='auto', gpu_collect=True) -optimizer = dict(type='SGD', lr=0.015, momentum=0.9, weight_decay=5e-05) -optimizer_config = dict(grad_clip=None) -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[8, 11]) -runner = dict(type='EpochBasedRunner', max_epochs=12) -checkpoint_config = dict(interval=1) -log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) -custom_hooks = [ - dict(type='NumClassCheckHook'), - dict( - type='MMDetWandbHook', - init_kwargs=dict(project='I2B', group='finetune'), - interval=50, - num_eval_images=0, - log_checkpoint=False) -] -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = 'work_dirs/selfsup_retinanet_1x_coco_swav_moco-setting/final_model.pth' -resume_from = None -workflow = [('train', 1)] -opencv_num_threads = 0 -mp_start_method = 'fork' -auto_scale_lr = dict(enable=False, base_batch_size=16) -custom_imports = None -norm_cfg = dict(type='SyncBN', requires_grad=True) -work_dir = 'work_dirs/finetune_retinanet_1x_coco_swav_moco-setting_lr1.5e-2_wd5e-5' -auto_resume = False -gpu_ids = range(0, 8) diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Hasp Crack Mastercam X6 For Solidworks.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Hasp Crack Mastercam X6 For Solidworks.md deleted file mode 100644 index b562c83d3eb1516b01dce3d382bcc106ae5db78a..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Hasp Crack Mastercam X6 For Solidworks.md +++ /dev/null @@ -1,10 +0,0 @@ -
    -

    the mastercam explorer feature is a new user interface for mill-turns. this tool allows the user to edit the mill-turn before you actually start the mill-turn. this can be done by setting the value of the tool centerpoints, adjusting the feed direction of the cutter, and setting the tools lengths. the feature is available for the mill and lathe.

    -

    hasp crack mastercam x6 for solidworks


    Download Zip ✸✸✸ https://bytlly.com/2uGy85



    -

    mastercam is a swiss company that has been designing, developing, and manufacturing cutting tools and software for over 25 years. the mastercam brand is the most complete and up-to-date provider of cnc technology in the world. mastercam offers software to help design and edit solid models, and to develop 3d graphics and cnc paths. mastercam is committed to the highest standards in quality, service, and customer care.

    -

    mastercam's software has long been regarded as a benchmark in cad/cam software. the mastercam brand is the most complete and up-to-date provider of cnc technology in the world. mastercam offers software to help design and edit solid models, and to develop 3d graphics and cnc paths. mastercam is committed to the highest standards in quality, service, and customer care.

    -

    cnc-named is a cnc machining software package that is designed to assist the machinist in designing and machining their 3d parts. it offers a feature-rich environment for milling and turning operations, and includes tools for creating and editing parts in solidworks and other solid modeling software.

    -

    -

    i was trying to install my x6 mastercam on my xp laptop. i downloaded the latest version from the mastercam website and installed it. i also downloaded nethasp and followed the instructions on how to install it. when i opened my mastercam, i got an error message saying that the hasp was not found. i checked everywhere and nothing was there! the nethasp was nowhere to be found. i searched on google, and tried the solution i found on the mastercam forum. it didn't work. then i installed x6 to my pc and it installed the nethasp and everything worked fine. hope this helps someone else out there!

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Magix Video Deluxe Portable ((HOT)).md b/spaces/lincquiQcaudo/Top-20-Diffusion/Magix Video Deluxe Portable ((HOT)).md deleted file mode 100644 index 084236685fe60ee0c9bc28e186c1a98f230bdc3a..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Magix Video Deluxe Portable ((HOT)).md +++ /dev/null @@ -1,22 +0,0 @@ -
    -

    How to Use Magix Video Deluxe Portable for Professional Video Editing

    -

    If you are looking for a powerful and easy-to-use video editing software that can handle any format and resolution, you might want to check out Magix Video Deluxe Portable. This is a portable version of the award-winning Magix Video Pro X suite, which means you can run it from any USB drive or external hard drive without installing anything on your computer. In this article, we will show you some of the features and benefits of using Magix Video Deluxe Portable for your video projects.

    -

    Magix Video Deluxe Portable


    Download ……… https://bytlly.com/2uGyFA



    -

    What is Magix Video Deluxe Portable?

    -

    Magix Video Deluxe Portable is a video editing software that offers professional tools and effects for creating stunning videos. It supports a wide range of formats, including ProRes, AVC-Intra, 4K, 8K, and HDR. It also has a scalable proxy editing feature that allows you to edit high-resolution videos smoothly on any PC. You can access intuitive editing tools such as 3- and 4-point edits, multicam editing, color correction, and audio mixing. You can also enhance your videos with hundreds of templates and creative effects, such as chroma keying, image stabilization, and cinematic looks.

    -

    How to Use Magix Video Deluxe Portable?

    -

    To use Magix Video Deluxe Portable, you need to download the portable file from the official website[^1^] or from other sources[^2^]. The file size is about 1.9 GB, so make sure you have enough space on your USB drive or external hard drive. You also need to download the content pack file[^2^], which contains additional effects and transitions. The content pack file size is about 6.9 GB.

    -

    Once you have downloaded the files, you need to extract them to your USB drive or external hard drive using a program like WinRAR or 7-Zip. You will see two folders: MAGIX Video Pro X11 Portable and magixvideo_content. You need to copy the content of the magixvideo_content folder into the MAGIX Video Pro X11 Portable folder.

    -

    -

    Now you are ready to launch the program. Just double-click on the MAGIX Video Pro X11 Portable.exe file and wait for the program to load. You will see a splash screen and then a welcome window where you can choose your project settings and preferences.

    -

    You can start editing your videos by importing them from your computer or from an external device. You can drag and drop them to the timeline or use the import dialog box. You can also capture video from a webcam or a screen recorder. You can organize your media files in the media pool and preview them in the source monitor.

    -

    You can edit your videos by using the tools and functions in the toolbar and the menu bar. You can trim, split, crop, rotate, and arrange your clips on the timeline. You can also add transitions, titles, effects, and audio tracks. You can adjust the parameters of each element in the effect window or in the inspector window. You can also use keyframes to animate your elements over time.

    -

    You can preview your edited video in the program monitor and make adjustments as needed. You can also use the measurement tools to check the color, exposure, audio levels, and other aspects of your video. You can switch between different views and layouts in the window menu.

    -

    How to Export Your Video?

    -

    When you are happy with your video, you can export it to your desired format and quality. You can use the export dialog box or the batch export function to export multiple videos at once. You can choose from various presets or customize your own settings. You can also burn your video to a DVD or a Blu-ray disc using the disc authoring tool.

    -

    You can save your project file as well if you want to continue editing it later or reuse it for another video. Just use the save or save as function in the file menu.

    -

    Conclusion

    -

    Magix Video Deluxe Portable is a great option for anyone who wants to edit videos professionally without installing anything on their computer. It offers a lot of features and functions that can help you create amazing videos with ease. It also has a portable design that allows you to run it from any USB drive or external hard drive.

    -

    If you want to try Magix Video Deluxe Portable for yourself,

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/backbones/__init__.py b/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/backbones/__init__.py deleted file mode 100644 index 55bd4c5d1889a1a998b52eb56793bbc1eef1b691..0000000000000000000000000000000000000000 --- a/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/backbones/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from .iresnet import iresnet18, iresnet34, iresnet50, iresnet100, iresnet200 -from .mobilefacenet import get_mbf - - -def get_model(name, **kwargs): - # resnet - if name == "r18": - return iresnet18(False, **kwargs) - elif name == "r34": - return iresnet34(False, **kwargs) - elif name == "r50": - return iresnet50(False, **kwargs) - elif name == "r100": - return iresnet100(False, **kwargs) - elif name == "r200": - return iresnet200(False, **kwargs) - elif name == "r2060": - from .iresnet2060 import iresnet2060 - return iresnet2060(False, **kwargs) - elif name == "mbf": - fp16 = kwargs.get("fp16", False) - num_features = kwargs.get("num_features", 512) - return get_mbf(fp16=fp16, num_features=num_features) - else: - raise ValueError() \ No newline at end of file diff --git a/spaces/liujch1998/vera/backend/README.md b/spaces/liujch1998/vera/backend/README.md deleted file mode 100644 index 3200598b924fcf5d8abe0387119e1990ccb24941..0000000000000000000000000000000000000000 --- a/spaces/liujch1998/vera/backend/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# Vera backend - -[http://qa.cs.washington.edu:8372/](http://qa.cs.washington.edu:8372/) - -This demo runs on port 8372, so make sure this port is whitelisted in UFW: -``` -sudo ufw allow 8372 -sudo ufw status -``` -Also, make sure port 80 and 443 are whitelisted. - -This demo requires HTTPS, so make sure HTTPS is enabled on this server. -This is what I did: -1. Follwing -1. Make sure nginx is running: `sudo service nginx status` -1. Add the following lines to `/etc/nginx/nginx.conf` -``` - server { - listen 80; - server_name qa.cs.washington.edu; - location ~ /.well-known { - root /home/gary/cd-pi-demo/backend; - } - location / { - return 301 https://$host$request_uri; - } - } -``` -1. Use certbot to create a certificate: `sudo certbot certonly --webroot -w /home/gary/cd-pi-demo/backend/ -d qa.cs.washington.edu` -1. Add the following lines to `/etc/nginx/nginx.conf` -``` - server { - listen 443 ssl http2; - server_name qa.cs.washington.edu; - ssl_certificate /etc/letsencrypt/live/qa.cs.washington.edu/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/qa.cs.washington.edu/privkey.pem; - # ... - } -``` - -Then, run the following command to spin up the demo: -``` -CUDA_VISIBLE_DEVICES=1 sudo ~/anaconda3/envs/default/bin/python run.py -``` - -To test the demo, run this command in bash: -``` -curl https://qa.cs.washington.edu:8372 -X POST -d '{"statement": "Hello."}' -H "content-type: application/json" -``` diff --git a/spaces/llamaindex/llama_index_sql_sandbox/constants.py b/spaces/llamaindex/llama_index_sql_sandbox/constants.py deleted file mode 100644 index 7a1a74190a7248e07503625e41bbc083f2602ac3..0000000000000000000000000000000000000000 --- a/spaces/llamaindex/llama_index_sql_sandbox/constants.py +++ /dev/null @@ -1,24 +0,0 @@ -DEFAULT_SQL_PATH = "sqlite:///sfscores.sqlite" -DEFAULT_BUSINESS_TABLE_DESCRP = ( - "This table gives information on the IDs, addresses, and other location " - "information for several restaurants in San Francisco. This table will " - "need to be referenced when users ask about specific businesses." -) -DEFAULT_VIOLATIONS_TABLE_DESCRP = ( - "This table gives information on which business IDs have recorded health violations, " - "including the date, risk, and description of each violation. The user may query " - "about specific businesses, whose names can be found by mapping the business_id " - "to the 'businesses' table." -) -DEFAULT_INSPECTIONS_TABLE_DESCRP = ( - "This table gives information on when each business ID was inspected, including " - "the score, date, and type of inspection. The user may query about specific " - "businesses, whose names can be found by mapping the business_id to the 'businesses' table." -) -DEFAULT_LC_TOOL_DESCRP = "Useful for when you want to answer queries about violations and inspections of businesses." - -DEFAULT_INGEST_DOCUMENT = ( - "The restaurant KING-KONG had an routine unscheduled inspection on 2023/12/31. " - "The business achieved a score of 50. We two violations, a high risk " - "vermin infestation as well as a high risk food holding temperatures." -) diff --git a/spaces/lmz/candle-yolo/build/m_bg.wasm.d.ts b/spaces/lmz/candle-yolo/build/m_bg.wasm.d.ts deleted file mode 100644 index d7a879b3f710d8f9730b9515a9c38503cf27bf90..0000000000000000000000000000000000000000 --- a/spaces/lmz/candle-yolo/build/m_bg.wasm.d.ts +++ /dev/null @@ -1,15 +0,0 @@ -/* tslint:disable */ -/* eslint-disable */ -export const memory: WebAssembly.Memory; -export function __wbg_model_free(a: number): void; -export function model_new(a: number, b: number, c: number, d: number, e: number): void; -export function model_run(a: number, b: number, c: number, d: number, e: number, f: number): void; -export function __wbg_modelpose_free(a: number): void; -export function modelpose_new(a: number, b: number, c: number, d: number, e: number): void; -export function modelpose_run(a: number, b: number, c: number, d: number, e: number, f: number): void; -export function main(a: number, b: number): number; -export function __wbindgen_add_to_stack_pointer(a: number): number; -export function __wbindgen_malloc(a: number, b: number): number; -export function __wbindgen_realloc(a: number, b: number, c: number, d: number): number; -export function __wbindgen_free(a: number, b: number, c: number): void; -export function __wbindgen_start(): void; diff --git a/spaces/ludusc/latent-space-theories/torch_utils/ops/bias_act.cpp b/spaces/ludusc/latent-space-theories/torch_utils/ops/bias_act.cpp deleted file mode 100644 index 3adaeee2ae44e96655d354c2bdfb81de8ebfe6c6..0000000000000000000000000000000000000000 --- a/spaces/ludusc/latent-space-theories/torch_utils/ops/bias_act.cpp +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -#include -#include -#include -#include "bias_act.h" - -//------------------------------------------------------------------------ - -static bool has_same_layout(torch::Tensor x, torch::Tensor y) -{ - if (x.dim() != y.dim()) - return false; - for (int64_t i = 0; i < x.dim(); i++) - { - if (x.size(i) != y.size(i)) - return false; - if (x.size(i) >= 2 && x.stride(i) != y.stride(i)) - return false; - } - return true; -} - -//------------------------------------------------------------------------ - -static torch::Tensor bias_act(torch::Tensor x, torch::Tensor b, torch::Tensor xref, torch::Tensor yref, torch::Tensor dy, int grad, int dim, int act, float alpha, float gain, float clamp) -{ - // Validate arguments. - TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); - TORCH_CHECK(b.numel() == 0 || (b.dtype() == x.dtype() && b.device() == x.device()), "b must have the same dtype and device as x"); - TORCH_CHECK(xref.numel() == 0 || (xref.sizes() == x.sizes() && xref.dtype() == x.dtype() && xref.device() == x.device()), "xref must have the same shape, dtype, and device as x"); - TORCH_CHECK(yref.numel() == 0 || (yref.sizes() == x.sizes() && yref.dtype() == x.dtype() && yref.device() == x.device()), "yref must have the same shape, dtype, and device as x"); - TORCH_CHECK(dy.numel() == 0 || (dy.sizes() == x.sizes() && dy.dtype() == x.dtype() && dy.device() == x.device()), "dy must have the same dtype and device as x"); - TORCH_CHECK(x.numel() <= INT_MAX, "x is too large"); - TORCH_CHECK(b.dim() == 1, "b must have rank 1"); - TORCH_CHECK(b.numel() == 0 || (dim >= 0 && dim < x.dim()), "dim is out of bounds"); - TORCH_CHECK(b.numel() == 0 || b.numel() == x.size(dim), "b has wrong number of elements"); - TORCH_CHECK(grad >= 0, "grad must be non-negative"); - - // Validate layout. - TORCH_CHECK(x.is_non_overlapping_and_dense(), "x must be non-overlapping and dense"); - TORCH_CHECK(b.is_contiguous(), "b must be contiguous"); - TORCH_CHECK(xref.numel() == 0 || has_same_layout(xref, x), "xref must have the same layout as x"); - TORCH_CHECK(yref.numel() == 0 || has_same_layout(yref, x), "yref must have the same layout as x"); - TORCH_CHECK(dy.numel() == 0 || has_same_layout(dy, x), "dy must have the same layout as x"); - - // Create output tensor. - const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); - torch::Tensor y = torch::empty_like(x); - TORCH_CHECK(has_same_layout(y, x), "y must have the same layout as x"); - - // Initialize CUDA kernel parameters. - bias_act_kernel_params p; - p.x = x.data_ptr(); - p.b = (b.numel()) ? b.data_ptr() : NULL; - p.xref = (xref.numel()) ? xref.data_ptr() : NULL; - p.yref = (yref.numel()) ? yref.data_ptr() : NULL; - p.dy = (dy.numel()) ? dy.data_ptr() : NULL; - p.y = y.data_ptr(); - p.grad = grad; - p.act = act; - p.alpha = alpha; - p.gain = gain; - p.clamp = clamp; - p.sizeX = (int)x.numel(); - p.sizeB = (int)b.numel(); - p.stepB = (b.numel()) ? (int)x.stride(dim) : 1; - - // Choose CUDA kernel. - void* kernel; - AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] - { - kernel = choose_bias_act_kernel(p); - }); - TORCH_CHECK(kernel, "no CUDA kernel found for the specified activation func"); - - // Launch CUDA kernel. - p.loopX = 4; - int blockSize = 4 * 32; - int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1; - void* args[] = {&p}; - AT_CUDA_CHECK(cudaLaunchKernel(kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream())); - return y; -} - -//------------------------------------------------------------------------ - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("bias_act", &bias_act); -} - -//------------------------------------------------------------------------ diff --git a/spaces/lukelike1001/PlaceAnalysis/app.py b/spaces/lukelike1001/PlaceAnalysis/app.py deleted file mode 100644 index d5ca331d4b56ab61c3d0678fddff0eabca1ce06b..0000000000000000000000000000000000000000 --- a/spaces/lukelike1001/PlaceAnalysis/app.py +++ /dev/null @@ -1,66 +0,0 @@ -import gradio as gr -import spacy -import torch -from transformers import DistilBertTokenizer, DistilBertForSequenceClassification - -TOKEN_SIZE = 128 -spacy.cli.download("en_core_web_sm") -nlp = spacy.load("en_core_web_sm") - -def multi_analysis(text): - scores = sentiment_analysis(text) - pos_tokens = text_analysis(text) - return scores, pos_tokens - -def sentiment_analysis(text): - - # load the model and tokenizer from local directories - model = DistilBertForSequenceClassification.from_pretrained('saved_model/') - tokenizer = DistilBertTokenizer.from_pretrained('saved_model/') - - # tokenize the inputs - inputs = tokenizer(text, return_tensors='pt', truncation=True, padding=True, max_length=128) - - # ignore gradients as we only need inference (aka logits) - with torch.no_grad(): - logits = model(**inputs).logits - - # apply softmax to make sure the probabilities sum up to 1 - predicted_probabilities = torch.softmax(logits, dim=1).squeeze().tolist() - - # return the probability of each label (positive, neutral, negative) - labels = ["NEG", "NEU", "POS"] - confidences = {label: prob for label, prob in zip(labels, predicted_probabilities)} - return confidences - -def text_analysis(text): - doc = nlp(text) - pos_tokens = [] - - for token in doc: - pos_tokens.extend([(token.text, token.pos_), (" ", None)]) - - return pos_tokens - -# add a title and description to the model -title = "Reddit Sentiment Analysis" -description = """In July 2023, Reddit changed its API pricing from free to $0.24 per 1000 API calls, - which was met with major backlash various communities. This sentiment analysis - model is based on DistilBERT and has been fine-tuned to better analyze Reddit - comments, with its F1 score at ~94%. For further documentation, check out the - Github repository at https://github.com/lukelike1001/PlaceAnalysis, and the project’s - info page at https://lukelike1001.github.io/place.html.""" - -app = gr.Interface( - fn=multi_analysis, - inputs=gr.Textbox(placeholder="Enter sentence here..."), - outputs=["label", "highlight"], - title=title, - description=description, - examples=[ - ["What are the coords for that?"], - ["the CEO of Reddit killed 3rd party apps."] - ], -) - -app.launch() diff --git a/spaces/luongphamit/DreamShaper-webui/README.md b/spaces/luongphamit/DreamShaper-webui/README.md deleted file mode 100644 index eabf57d74bb8100a6906aa608284a17f8a850ea4..0000000000000000000000000000000000000000 --- a/spaces/luongphamit/DreamShaper-webui/README.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Stable Diffusion Web UI -emoji: 🚧 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.9 -app_file: app.py -pinned: false -duplicated_from: Lykon/DreamShaper-webui ---- - -## Stable Diffusion Web UI -[https://github.com/AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) - -## Documentation -[https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki) - -## Models License -https://huggingface.co/spaces/CompVis/stable-diffusion-license \ No newline at end of file diff --git a/spaces/lvwerra/python-interpreter/app.py b/spaces/lvwerra/python-interpreter/app.py deleted file mode 100644 index 5fe38f6a888cb25f36f982b9696f04df68a1d1b8..0000000000000000000000000000000000000000 --- a/spaces/lvwerra/python-interpreter/app.py +++ /dev/null @@ -1,4 +0,0 @@ -from transformers import launch_gradio_demo -from python_interpreter_tool import PythonInterpreter - -launch_gradio_demo(PythonInterpreter) diff --git a/spaces/ma-xu/LIVE/thrust/testing/unittest/runtime_static_assert.h b/spaces/ma-xu/LIVE/thrust/testing/unittest/runtime_static_assert.h deleted file mode 100644 index 13d8b68a9dc94500a1d82112779ac38a0a1d05b7..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/testing/unittest/runtime_static_assert.h +++ /dev/null @@ -1,96 +0,0 @@ -#pragma once - -#include - -#include -#undef THRUST_STATIC_ASSERT -#undef THRUST_STATIC_ASSERT_MSG - -#define THRUST_STATIC_ASSERT(B) unittest::assert_static((B), __FILE__, __LINE__); -#define THRUST_STATIC_ASSERT_MSG(B, msg) unittest::assert_static((B), __FILE__, __LINE__); - -namespace unittest -{ - __host__ __device__ - void assert_static(bool condition, const char * filename, int lineno); -} - -#include -#include - -#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA - -#define ASSERT_STATIC_ASSERT(X) \ - { \ - bool triggered = false; \ - typedef unittest::static_assert_exception ex_t; \ - thrust::device_ptr device_ptr = thrust::device_new(); \ - ex_t* raw_ptr = thrust::raw_pointer_cast(device_ptr); \ - ::cudaMemcpyToSymbol(unittest::detail::device_exception, &raw_ptr, sizeof(ex_t*)); \ - try { X; } catch (ex_t) { triggered = true; } \ - if (!triggered) { \ - triggered = static_cast(*device_ptr).triggered; \ - } \ - thrust::device_free(device_ptr); \ - raw_ptr = NULL; \ - ::cudaMemcpyToSymbol(unittest::detail::device_exception, &raw_ptr, sizeof(ex_t*)); \ - if (!triggered) { unittest::UnitTestFailure f; f << "[" << __FILE__ << ":" << __LINE__ << "] did not trigger a THRUST_STATIC_ASSERT"; throw f; } \ - } - -#else - -#define ASSERT_STATIC_ASSERT(X) \ - { \ - bool triggered = false; \ - typedef unittest::static_assert_exception ex_t; \ - try { X; } catch (ex_t) { triggered = true; } \ - if (!triggered) { unittest::UnitTestFailure f; f << "[" << __FILE__ << ":" << __LINE__ << "] did not trigger a THRUST_STATIC_ASSERT"; throw f; } \ - } - -#endif - -namespace unittest -{ - class static_assert_exception - { - public: - __host__ __device__ - static_assert_exception() : triggered(false) - { - } - - __host__ __device__ - static_assert_exception(const char * filename, int lineno) - : triggered(true), filename(filename), lineno(lineno) - { - } - - bool triggered; - const char * filename; - int lineno; - }; - - namespace detail - { -#ifdef __clang__ - __attribute__((used)) -#endif - __device__ static static_assert_exception* device_exception = NULL; - } - - __host__ __device__ - void assert_static(bool condition, const char * filename, int lineno) - { - if (!condition) - { - static_assert_exception ex(filename, lineno); - -#ifdef __CUDA_ARCH__ - *detail::device_exception = ex; -#else - throw ex; -#endif - } - } -} - diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/tbb/detail/copy.h b/spaces/ma-xu/LIVE/thrust/thrust/system/tbb/detail/copy.h deleted file mode 100644 index 7977768b02be1812799733462f1a162632a9c53f..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/tbb/detail/copy.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include - -namespace thrust -{ -namespace system -{ -namespace tbb -{ -namespace detail -{ - - -template -OutputIterator copy(execution_policy &exec, - InputIterator first, - InputIterator last, - OutputIterator result); - - -template -OutputIterator copy_n(execution_policy &exec, - InputIterator first, - Size n, - OutputIterator result); - - -} // end namespace detail -} // end namespace tbb -} // end namespace system -} // end namespace thrust - -#include - diff --git a/spaces/maiti/stable-fashion/README.md b/spaces/maiti/stable-fashion/README.md deleted file mode 100644 index 80d0a00032fbe2c15ee0b4f1b224862b4e5e1fe9..0000000000000000000000000000000000000000 --- a/spaces/maiti/stable-fashion/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Stable Fashion -emoji: 🥻 -colorFrom: green -colorTo: pink -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false -license: cc ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/matthoffner/starchat-ui/types/prompt.ts b/spaces/matthoffner/starchat-ui/types/prompt.ts deleted file mode 100644 index fb5c2ef5b02986f5a545ba75cdd1b6a04fc594a0..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/starchat-ui/types/prompt.ts +++ /dev/null @@ -1,10 +0,0 @@ -import { OpenAIModel } from './openai'; - -export interface Prompt { - id: string; - name: string; - description: string; - content: string; - model: OpenAIModel; - folderId: string | null; -} diff --git a/spaces/meowingamogus69/stable-diffusion-webui-controlnet-docker/on_start.sh b/spaces/meowingamogus69/stable-diffusion-webui-controlnet-docker/on_start.sh deleted file mode 100644 index 3335b0c8bf5aab28787ae99a50624e60038cd912..0000000000000000000000000000000000000000 --- a/spaces/meowingamogus69/stable-diffusion-webui-controlnet-docker/on_start.sh +++ /dev/null @@ -1,144 +0,0 @@ -#!/bin/bash -set -euo pipefail -IS_SHARED_UI=0 - -function download-model() { - local _option=$1 - local _filename=$2 - local _url=$3 - local _dir - - ! [ $# -eq 3 ] && (echo "usage: "; for o in checkpoint lora vae control-net embedding; do echo " \$ download-model --$o "; done) || true - [ $# -eq 0 ] && return 0 || ! [ $# -eq 3 ] && (echo ""; echo "error - invalid number of arguments (expected 3, received $#)"; echo -n "\$ download-model $1"; (for arg in "${@: 2}"; do echo -n " \"${arg//\"/\\\"}\""; done) && echo "") && return 1 || true - - case ${_option,,} in - --checkpoint) _dir="/app/stable-diffusion-webui/models/Stable-diffusion";; - --lora) _dir="/app/stable-diffusion-webui/extensions/sd-webui-additional-networks/models/LoRA";; - --vae) _dir="/app/stable-diffusion-webui/models/VAE";; - --control-net) _dir="/app/stable-diffusion-webui/models/ControlNet";; - --embedding) _dir="/app/stable-diffusion-webui/embeddings";; - - *) echo "error - unknown first argument: '$1' (valid options are --checkpoint, --lora, --vae, --control-net or --embedding):"; echo "\$ download-model $1 \"$2\" \"$3\""; return 1;; - esac - - echo "\$ download-model $_option \"$2\" \"$3\"" ; echo "" - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $_url -d $_dir -o $_filename && echo "" -} - -## ---------------------------- - -## Adds a header to the webui on Hugging Face Spaces. -sed -i -e '/demo:/r /app/stable-diffusion-webui/header_patch.py' /app/stable-diffusion-webui/modules/ui.py - -## ---------------------------- - -## Installing less models if $IS_SHARED_UI environment variable is set. -if [ ${IS_SHARED_UI:-0} != 0 ]; then - download-model --checkpoint "v1-5-pruned-emaonly.safetensors" "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/39593d5650112b4cc580433f6b0435385882d819/v1-5-pruned-emaonly.safetensors" - download-model --checkpoint "v1-5-pruned-emaonly.yaml" "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/39593d5650112b4cc580433f6b0435385882d819/v1-inference.yaml" - download-model --control-net "cldm_v15.yaml" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/cldm_v15.yaml" - download-model --control-net "control_canny-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_canny-fp16.safetensors" - download-model --control-net "control_depth-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_depth-fp16.safetensors" - download-model --control-net "control_normal-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_normal-fp16.safetensors" - download-model --control-net "control_openpose-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_openpose-fp16.safetensors" - download-model --control-net "control_scribble-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_scribble-fp16.safetensors" - download-model --checkpoint "AtoZovyaRPGArtistTools15_sd15V1.safetensors" "https://civitai.com/api/download/models/10185" - download-model --embedding "bad_prompt_version2.pt" "https://huggingface.co/datasets/Nerfgun3/bad_prompt/resolve/72fd9d6011c2ba87b5847b7e45e6603917e3cbed/bad_prompt_version2.pt" - sed -i -e '/(modelmerger_interface, \"Checkpoint Merger\", \"modelmerger\"),/d' /app/stable-diffusion-webui/modules/ui.py - sed -i -e '/(train_interface, \"Train\", \"ti\"),/d' /app/stable-diffusion-webui/modules/ui.py - sed -i -e '/extensions_interface, \"Extensions\", \"extensions\"/d' /app/stable-diffusion-webui/modules/ui.py - sed -i -e '/settings_interface, \"Settings\", \"settings\"/d' /app/stable-diffusion-webui/modules/ui.py - rm -rf /app/stable-diffusion-webui/scripts /app/stable-diffusion-webui/extensions/deforum-for-automatic1111-webui /app/stable-diffusion-webui/extensions/stable-diffusion-webui-images-browser /app/stable-diffusion-webui/extensions/sd-civitai-browser /app/stable-diffusion-webui/extensions/sd-webui-additional-networks - cp -f shared-config.json config.json - cp -f shared-ui-config.json ui-config.json - exit 0 -fi -## End of lightweight installation for $IS_SHARED_UI setup. - -## ---------------------------- -## env $IS_SHARED_UI is not set -## ---------------------------- - -## Stable Diffusion 2.1 · 768 base model: -download-model --checkpoint "v2-1_768-ema-pruned.safetensors" "https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/36a01dc742066de2e8c91e7cf0b8f6b53ef53da1/v2-1_768-ema-pruned.safetensors" -download-model --checkpoint "v2-1_768-ema-pruned.yaml" "https://raw.githubusercontent.com/Stability-AI/stablediffusion/fc1488421a2761937b9d54784194157882cbc3b1/configs/stable-diffusion/v2-inference-v.yaml" - -## Stable Diffusion 1.5 · 512 base model: -download-model --checkpoint "v1-5-pruned-emaonly.safetensors" "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/39593d5650112b4cc580433f6b0435385882d819/v1-5-pruned-emaonly.safetensors" -download-model --checkpoint "v1-5-pruned-emaonly.yaml" "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/39593d5650112b4cc580433f6b0435385882d819/v1-inference.yaml" - -## ---------------------------- - -## LoRA (low-rank adaptation) · epi_noiseoffset v2: -download-model --lora "epiNoiseoffset_v2.safetensors" "https://civitai.com/api/download/models/16576?type=Model&format=SafeTensor" - -## ---------------------------- - -## VAE (variational autoencoder) · VAE 840k EMA: -download-model --vae "vae-ft-mse-840000-ema-pruned.safetensors" "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/629b3ad3030ce36e15e70c5db7d91df0d60c627f/vae-ft-mse-840000-ema-pruned.safetensors" - -## ---------------------------- - -## ControlNet · Pre-extracted models: -download-model --control-net "cldm_v15.yaml" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/cldm_v15.yaml" -download-model --control-net "cldm_v21.yaml" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/cldm_v21.yaml" -download-model --control-net "control_canny-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_canny-fp16.safetensors" -download-model --control-net "control_depth-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_depth-fp16.safetensors" -download-model --control-net "control_hed-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_hed-fp16.safetensors" -download-model --control-net "control_normal-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_normal-fp16.safetensors" -download-model --control-net "control_openpose-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_openpose-fp16.safetensors" -download-model --control-net "control_scribble-fp16.safetensors" "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/87c3affbcad3baec52ffe39cac3a15a94902aed3/control_scribble-fp16.safetensors" - -## ---------------------------- - -## Embedding · bad_prompt_version2 -download-model --embedding "bad_prompt_version2.pt" "https://huggingface.co/datasets/Nerfgun3/bad_prompt/resolve/72fd9d6011c2ba87b5847b7e45e6603917e3cbed/bad_prompt_version2.pt" - -## ---------------------------- - -## Checkpoint · The Ally's Mix III: Revolutions: -download-model --checkpoint "theAllysMixIII_v10.safetensors" "https://civitai.com/api/download/models/12763?type=Model&format=SafeTensor" - -## Checkpoint · Dreamlike Diffusion 1.0: -# download-model --checkpoint "dreamlike-diffusion-1.0.safetensors" "https://huggingface.co/dreamlike-art/dreamlike-diffusion-1.0/resolve/00cbe4d56fd56f45e952a5be4d847f21b9782546/dreamlike-diffusion-1.0.safetensors" - -## Checkpoint · Dreamshaper 3.31: -# download-model --checkpoint "DreamShaper_3.31_baked_vae-inpainting.inpainting.safetensors" "https://huggingface.co/Lykon/DreamShaper/resolve/d227e39aab5e360aec6401be916025ddfc8127bd/DreamShaper_3.31_baked_vae-inpainting.inpainting.safetensors" - -## Checkpoint · dalcefo_painting: -# download-model --checkpoint "dalcefoPainting_2nd.safetensors" "https://civitai.com/api/download/models/14675?type=Pruned%20Model&format=SafeTensor" - -## Checkpoint · Deliberate v2: -# download-model --checkpoint "deliberate_v2.safetensors" "https://civitai.com/api/download/models/15236?type=Model&format=SafeTensor" - -## Checkpoint · RPG v4: -# download-model --checkpoint "RPG-v4.safetensors" "https://huggingface.co/Anashel/rpg/resolve/main/RPG-V4-Model-Download/RPG-v4.safetensors" - -## Checkpoint · A to Zovya RPG Artist's Tools (SD 1.5): -# download-model --checkpoint "AtoZovyaRPGArtistTools15_sd15V1.safetensors" "https://civitai.com/api/download/models/10185" - -## Checkpoint · A to Zovya RPG Artist's Tools (SD 2.1): -# download-model --checkpoint "AtoZovyaRPGArtistTools15_sd21768V1.safetensors" "https://civitai.com/api/download/models/9593?type=Model&format=SafeTensor" -# download-model --checkpoint "aToZovyaRPGArtistsTools15_sd21768V1.yaml" "https://civitai.com/api/download/models/9593?type=Config&format=Other" - -## ---------------------------- - -## Add additional models that you want to install on startup. Replace URL and FILENAME from the examples below with your values. - -## Usage: -## download-model --checkpoint -## download-model --lora -## download-model --vae -## download-model --control-net -## download-model --embedding - -## ---------------------------- - -## Checkpoint · Example: -# download-model --checkpoint "FILENAME" "URL" - -## LORA (low-rank adaptation) · Example: -# download-model --lora "FILENAME" "URL" - -## VAE (variational autoencoder) · Example: -# download-model --vae "FILENAME" "URL" diff --git a/spaces/merle/PROTEIN_GENERATOR/utils/model/se3_transformer/runtime/inference.py b/spaces/merle/PROTEIN_GENERATOR/utils/model/se3_transformer/runtime/inference.py deleted file mode 100644 index 21e9125b24f9865b89cff29063ad997e77297d21..0000000000000000000000000000000000000000 --- a/spaces/merle/PROTEIN_GENERATOR/utils/model/se3_transformer/runtime/inference.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. -# -# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES -# SPDX-License-Identifier: MIT - -from typing import List - -import torch -import torch.nn as nn -from torch.nn.parallel import DistributedDataParallel -from torch.utils.data import DataLoader -from tqdm import tqdm - -from se3_transformer.runtime import gpu_affinity -from se3_transformer.runtime.arguments import PARSER -from se3_transformer.runtime.callbacks import BaseCallback -from se3_transformer.runtime.loggers import DLLogger -from se3_transformer.runtime.utils import to_cuda, get_local_rank - - -@torch.inference_mode() -def evaluate(model: nn.Module, - dataloader: DataLoader, - callbacks: List[BaseCallback], - args): - model.eval() - for i, batch in tqdm(enumerate(dataloader), total=len(dataloader), unit='batch', desc=f'Evaluation', - leave=False, disable=(args.silent or get_local_rank() != 0)): - *input, target = to_cuda(batch) - - for callback in callbacks: - callback.on_batch_start() - - with torch.cuda.amp.autocast(enabled=args.amp): - pred = model(*input) - - for callback in callbacks: - callback.on_validation_step(input, target, pred) - - -if __name__ == '__main__': - from se3_transformer.runtime.callbacks import QM9MetricCallback, PerformanceCallback - from se3_transformer.runtime.utils import init_distributed, seed_everything - from se3_transformer.model import SE3TransformerPooled, Fiber - from se3_transformer.data_loading import QM9DataModule - import torch.distributed as dist - import logging - import sys - - is_distributed = init_distributed() - local_rank = get_local_rank() - args = PARSER.parse_args() - - logging.getLogger().setLevel(logging.CRITICAL if local_rank != 0 or args.silent else logging.INFO) - - logging.info('====== SE(3)-Transformer ======') - logging.info('| Inference on the test set |') - logging.info('===============================') - - if not args.benchmark and args.load_ckpt_path is None: - logging.error('No load_ckpt_path provided, you need to provide a saved model to evaluate') - sys.exit(1) - - if args.benchmark: - logging.info('Running benchmark mode with one warmup pass') - - if args.seed is not None: - seed_everything(args.seed) - - major_cc, minor_cc = torch.cuda.get_device_capability() - - logger = DLLogger(args.log_dir, filename=args.dllogger_name) - datamodule = QM9DataModule(**vars(args)) - model = SE3TransformerPooled( - fiber_in=Fiber({0: datamodule.NODE_FEATURE_DIM}), - fiber_out=Fiber({0: args.num_degrees * args.num_channels}), - fiber_edge=Fiber({0: datamodule.EDGE_FEATURE_DIM}), - output_dim=1, - tensor_cores=(args.amp and major_cc >= 7) or major_cc >= 8, # use Tensor Cores more effectively - **vars(args) - ) - callbacks = [QM9MetricCallback(logger, targets_std=datamodule.targets_std, prefix='test')] - - model.to(device=torch.cuda.current_device()) - if args.load_ckpt_path is not None: - checkpoint = torch.load(str(args.load_ckpt_path), map_location={'cuda:0': f'cuda:{local_rank}'}) - model.load_state_dict(checkpoint['state_dict']) - - if is_distributed: - nproc_per_node = torch.cuda.device_count() - affinity = gpu_affinity.set_affinity(local_rank, nproc_per_node) - model = DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank) - - test_dataloader = datamodule.test_dataloader() if not args.benchmark else datamodule.train_dataloader() - evaluate(model, - test_dataloader, - callbacks, - args) - - for callback in callbacks: - callback.on_validation_end() - - if args.benchmark: - world_size = dist.get_world_size() if dist.is_initialized() else 1 - callbacks = [PerformanceCallback(logger, args.batch_size * world_size, warmup_epochs=1, mode='inference')] - for _ in range(6): - evaluate(model, - test_dataloader, - callbacks, - args) - callbacks[0].on_epoch_end() - - callbacks[0].on_fit_end() diff --git a/spaces/merve/anonymization/public/measuring-fairness/graph-scroll.css b/spaces/merve/anonymization/public/measuring-fairness/graph-scroll.css deleted file mode 100644 index e3757d99ca305478165c6f7e4781ec0ce95b6291..0000000000000000000000000000000000000000 --- a/spaces/merve/anonymization/public/measuring-fairness/graph-scroll.css +++ /dev/null @@ -1,119 +0,0 @@ -#container{ - position: relative; - width: auto; -} - -#sections{ - width: 340px; -} - -#sections > div{ - background: white; - opacity: .2; - margin-bottom: 400px; - line-height: 1.4em; - transition: opacity .2s; -} -#sections > div:first-child{ - opacity: 1; -} -#sections > div:last-child{ - /*padding-bottom: 80vh;*/ - padding-bottom: 80px; - margin-bottom: 0px; -} -#sections > div:first-child > h1{ - padding-top: 40px; -} - -#sections > div.graph-scroll-active{ - opacity: 1; -} - -#graph{ - margin-left: 40px; - width: 500px; - position: -webkit-sticky; - position: sticky; - top: 0px; - float: right; - height: 580px; - font-family: 'Google Sans', sans-serif; - -} - -.slider{ - font-family: 'Google Sans', sans-serif; -} - -#sections h1{ - text-align: left !important; -} - -@media (max-width: 1000px) and (min-width: 926px){ - #sections{ - margin-left: 20px; - } -} - -@media (max-width: 925px) { - #container{ - margin-left: 0px; - } - - #graph{ - width: 100%; - margin-left: 10px; - float: none; - max-width: 500px; - margin: 0px auto; - } - - #graph > div{ - position: relative; - top: 0px; - } - #sections{ - width: auto; - position: relative; - margin: 0px auto; - } - - #sections > div{ - background: rgba(255,255,255,.8); - padding: 10px; - border-top: 1px solid; - border-bottom: 1px solid; - margin-bottom: 80vh; - width: calc(100vw - 20px); - margin-left: -5px; - } - - #sections > div > *{ - max-width: 750px; - } - .mini, .slider, i, .gated{ - margin: 0px auto; - } - - #sections > div:first-child{ - opacity: 1; - margin-top: -140px; - } - - #sections > div:last-child{ - padding-bottom: 0px; - margin-bottom: 0px; - } - - - #sections h1{ - margin: 10px; - padding-top: 0px !important; - } - - #sections h3{ - margin-top: .5em; - } - -} diff --git a/spaces/merve/anonymization/server-side/fill-in-the-blank/scatter-plot-colab/spearman-distribution/test.html b/spaces/merve/anonymization/server-side/fill-in-the-blank/scatter-plot-colab/spearman-distribution/test.html deleted file mode 100644 index bd51a96a0e44f236d2fef909e99ce49251683407..0000000000000000000000000000000000000000 --- a/spaces/merve/anonymization/server-side/fill-in-the-blank/scatter-plot-colab/spearman-distribution/test.html +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - -
    - - - - - diff --git a/spaces/merve/anonymization/source/hidden-bias/script.js b/spaces/merve/anonymization/source/hidden-bias/script.js deleted file mode 100644 index 526901a0178a3ef069380410dd33fdc0334f2bae..0000000000000000000000000000000000000000 --- a/spaces/merve/anonymization/source/hidden-bias/script.js +++ /dev/null @@ -1,467 +0,0 @@ -/* Copyright 2020 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - -var ttSel = d3.select('body').selectAppend('div.tooltip.tooltip-hidden') - -var colors = { - m: '#7DDAD3', - f: '#9B86EF', - h: '#F0BD80', - l: '#FF777B', - grey: '#ccc', -} - - -var totalWidth = width = d3.select('#graph').node().offsetWidth -var r = 40 - -var sel = d3.select('#graph').html('') - .append('div') - -var extraWidth = d3.clamp(500, innerHeight - 150, innerWidth - 500) -var scale = extraWidth/500 -scale = 1 -sel.st({transform: `scale(${scale})`, transformOrigin: '0% 0%'}) - -var c = d3.conventions({ - sel, - totalWidth, - totalHeight: totalWidth, - margin: {left: 25, right: 7}, - layers: 'sd', -}) -var divSel = c.layers[1] - -c.x.domain([1, 4]).clamp(true).interpolate(d3.interpolateRound) -c.y.domain([1, 4]).clamp(true).interpolate(d3.interpolateRound) - -c.xAxis.ticks(3).tickFormat(d3.format('.1f')) -c.yAxis.ticks(3).tickFormat(d3.format('.1f')) -d3.drawAxis(c) - -var axis2Sel= c.svg.append('g.axis').append('line') - .translate(Math.round(c.y(2)) + .5, 1) - .at({x2: c.width, stroke: '#000', opacity: 0}) - -var meanGPADiff = .6 - -var seed = new Math.seedrandom('hii') -var students = d3.range(150).map((d, index) => { - var collegeGPA = d3.randomUniform.source(seed)(1, 4)() - - // if (index == 93) collegeGPA = 2.05 - // if (index == 87) collegeGPA = 2.15 - // if (index == 32) collegeGPA = 2.25 - if (index == 131) collegeGPA = 3.9 - - // var hsGPA = collegeGPA*d3.randomNormal(1, .4)() - var hsGPA = collegeGPA + d3.randomNormal.source(seed)(meanGPADiff, .8)() - var hsGPAadjusted = hsGPA - meanGPADiff - - var rand = d3.randomUniform.source(seed)(0, 1) - - var isMale = rand() < .5 - var name = names[isMale ? 'm' : 'f'][Math.floor(d/2)] - var lastName = names.last[d] - var maleOffset = rand()*(isMale ? 1 : -1)*.6 - - // if (index == 47) name = 'Mia' - // if (index == 82) name = 'Mason' - - - var compGPA0 = lerp(hsGPAadjusted, collegeGPA, rand()*.7) + maleOffset - var compGPA1 = lerp(compGPA0, collegeGPA + maleOffset, rand()*1.1) - var compGPA2 = compGPA1 + rand()/4 - 1/4/2 - // var compGPA0 = collegeGPA + d3.randomNormal.source(seed)(0, .5)() - // var compGPA1 = collegeGPA + d3.randomNormal.source(seed)(0, .3)() - - if (index == 69){ - compGPA1 = 2.0 - } - if (index == 37){ - compGPA1 = 2.0 - } - - - var isLowIncome = rand() < .5 - - var inteviewGPA = collegeGPA + d3.randomNormal.source(seed)(0, .15)() - var inteviewGPAbias = inteviewGPA + rand()*(isLowIncome ? -1 : 1)*.5 - - // if (index == 115) name = 'Mason' - // if (index == 32) name = 'Mia' - - if (name == 'Camila') name = 'Mia' - - - return {name, index, lastName, collegeGPA, hsGPA, hsGPAadjusted, compGPA0, compGPA1, compGPA2, isMale, isLowIncome, inteviewGPA, inteviewGPAbias} -}) - -students = _.sortBy(students, d => d.collegeGPA) - -students = students.filter(d => { - return d3.entries(d).every(({key, value}) => { - if (!key.includes('GPA')) return true - - return 1 < value && value < 4.0 - }) -}) - - -c.svg.append('path') - .at({ - d: ['M', 0, c.height, 'L', c.width, 0].join(' '), - stroke: '#ccc', - strokeWidth: 2, - strokeDasharray: '4 2' - }) - -!(function(){ - // return window.annotationSel = d3.select(null) - var isDrag = 0 - if (!isDrag) annotations.forEach(d => d.text = d.html ? '' : d.text) - if (isDrag){ - d3.select('#sections').st({pointerEvents: 'none'}) - } - - // copy('window.annotations = ' + JSON.stringify(annotations, null, 2)) - var swoopy = d3.swoopyDrag() - .x(d => c.x(d.x)) - .y(d => c.y(d.y)) - .draggable(isDrag) - .annotations(annotations) - .on('drag', d => { - - }) - - - var htmlAnnoSel = divSel.appendMany('div.annotation', annotations.filter(d => d.html)) - .translate(d => [c.x(d.x), c.y(d.y)]).st({position: 'absolute', opacity: 0}) - .append('div') - .translate(d => d.textOffset) - .html(d => d.html) - .st({width: 150}) - - - - var swoopySel = c.svg.append('g.annotations').call(swoopy) - - c.svg.append('marker') - .attr('id', 'arrow') - .attr('viewBox', '-10 -10 20 20') - .attr('markerWidth', 20) - .attr('markerHeight', 20) - .attr('orient', 'auto') - .append('path') - .attr('d', 'M-6.75,-6.75 L 0,0 L -6.75,6.75') - - swoopySel.selectAll('path') - .attr('marker-end', 'url(#arrow)') - .st({'opacity': d => d.path == 'M 0 0' ? 0 : 1}) - window.annotationSel = swoopySel.selectAll('g') - .st({fontSize: 12, opacity: d => d.slide == 0 ? 1 : 0}) - - window.annotationSel = d3.selectAll('g.annotations g, div.annotation') - - swoopySel.selectAll('text') - .each(function(d){ - d3.select(this) - .text('') //clear existing text - .tspans(d3.wordwrap(d.text, d.width || 20), 13) //wrap after 20 char - }) - })() - - - -students = _.sortBy(students, d => d.collegeGPA) -var lineSel = c.svg.appendMany('path', students) - .translate(d => [c.x(d.hsGPA), c.y(d.collegeGPA)]) - .at({ - // fill: d => d.hsGPA > d.collegeGPA ? 'blue' : 'orange', - fill: '#eee', - stroke: '#aaa', - strokeWidth: .5, - opacity: 0, - // strokeWidth: 1/scale, - }) - - -var circleSel = c.svg.appendMany('g', students) - .translate(d => [c.x(d.collegeGPA), c.y(d.hsGPA)]) - .call(d3.attachTooltip) - .on('mouseover', d => { - var html = '' - html += `
    ${d.name} ${d.lastName}
    ` - - if (curSlide.circleFill == 'gender'){ - html += `${d.isMale ? 'Male' : 'Female'}` - } - - if (curSlide.circleFill == 'income'){ - html += `${d.isLowIncome ? 'Low Income' : 'High Income'}` - } - html += ` -
    ${d3.format('.2f')(d[curSlide.yKey]).slice(0, 4)} ${curSlide.index ? 'Predicted' : 'High School'} GPA
    -
    ${d3.format('.2f')(d.collegeGPA).slice(0, 4)} College GPA
    ` - - ttSel.html(html) - }) - - -var innerCircleSel = circleSel.append('circle') - .at({ - r: 5, - fill: '#eee', - stroke: '#aaa' - }) - -// var textSel = circleSel.append('text').text(d => d.isMale ? 'M' : 'F') -// .at({textAnchor: 'middle', dy: '.33em', fontSize: 8, fill: '#eee'}) -// var textSel2 = circleSel.append('text').text(d => d.isLowIncome ? 'L' : 'H') -// .at({textAnchor: 'middle', dy: '.33em', fontSize: 8, opacity: 0}) - - -c.svg.select('.y').selectAll('line').filter(d => d == 4) - .remove() -c.svg.select('.y').selectAll('text').filter(d => d == 4) - .select(function() { - return this.parentNode.insertBefore(this.cloneNode(1), this.nextSibling); - }) - .text('Actual College GPA') - .at({x: c.width/2, y: c.height + 35, textAnchor: 'middle', fontWeight: 800}) - -var yLabelSel = divSel.st({pointerEvents: 'none'}).append('div.axis') - .html('High School GPA') - .translate([0, -9]) - .st({textAlign: 'left', maxWidth: 260}) - -// c.svg.append('text').text('Actual College GPA').st({fontWeight: 800}) - -var longLabel = 'high school GPA, essay, clubs, zip code, teacher recommendations, sports, AP scores, demonstrated interest, gender, SAT scores, interviews, portfolio, race, work experience' - -var slides = [ - { - yKey: 'hsGPA', - isLineVisible: 0, - yLabel: 'High School GPA', - circleFill: 'grey', - circleFillDelay: d => 0, - }, - - { - yKey: 'hsGPA', - isLineVisible: true, - yLabel: 'High School GPA' - }, - - { - yKey: 'hsGPAadjusted', - yLabel: 'high school GPA' - }, - - { - yKey: 'compGPA0', - yLabel: 'high school GPA, essay, clubs, zip code'.replace('essay', 'essay') + '' - }, - - { - yKey: 'compGPA1', - yLabel: longLabel.replace('teacher', 'teacher') + '', - circleFill: 'grey', - circleFillDelay: d => 0, - textFill: '#eee', - }, - - { - yKey: 'compGPA1', - yLabel: longLabel, - circleFill: 'gender', - circleFillDelay: (d, i) => i*20 + (d.isMale ? 0 : 2000), - textFill: '#000', - }, - - { - name: 'proxyHighlight', - yKey: 'compGPA2', - yLabel: longLabel, - circleFill: 'gender', - circleFillDelay: d => 0, - textFill: '#000', - }, - - { - textFill: '#eee', - yLabel: 'Alumni interview', - yKey: 'inteviewGPAbias', - circleFill: 'grey', - text2Opacity: 0, - }, - - { - textFill: '#eee', - yLabel: 'Alumni interview', - yKey: 'inteviewGPAbias', - circleFill: 'income', - circleFillDelay: (d, i) => i*20 + (!d.isLowIncome ? 2000 : 0), - text2Opacity: 1, - }, - - { - textFill: '#eee', - yLabel: 'Alumni interview, household income'.replace('household', 'household') + '', - yKey: 'inteviewGPA', - text2Opacity: 1, - }, -] - -slides.forEach(d => { - if (d.name == 'proxyHighlight'){ - var proxies = 'clubs, interviews, portfolio, sports'.split(', ') - d.yLabel = d.yLabel - .split(', ') - .map(d => { - if (d == 'gender') return `gender` - if (!proxies.includes(d)) return d - - return `${d}` - }) - .join(', ') - } - - - if (d.yLabel[0] != '<') d.yLabel = 'Predicted College GPA using ' + d.yLabel.replace('School', 'school') -}) - -var keys = [] -slides.forEach(d => keys = keys.concat(d3.keys(d))) -_.uniq(keys).forEach(str => { - var prev = null - slides.forEach(d => { - if (typeof(d[str]) === 'undefined'){ - d[str] = prev - } - prev = d[str] - }) -}) - -slides.forEach((d, i) => { - d.circleFillFn = { - grey: d => '#eee', - gender: d => d.isMale ? colors.m : colors.f, - income: d => d.isLowIncome ? colors.l : colors.h, - }[d.circleFill] - - d.index = i -}) - - - - -var gs = d3.graphScroll() - .container(d3.select('.container-1')) - .graph(d3.selectAll('container-1 #graph')) - .eventId('uniqueId1') - .sections(d3.selectAll('.container-1 #sections > div')) - .offset(innerWidth < 900 ? 300 : 520) - .on('active', updateSlide) - - -var prevSlide = -1 -function updateSlide(i){ - var slide = slides[i] - if (!slide) return - curSlide = slide - var {yKey} = slide - - lineSel.transition('yKey').duration(500) - .at({ - d: d => [ - 'M 5 0', - 'C 0 0', - 0, c.y(d['collegeGPA']) - c.y(d[yKey]), - 0, c.y(d['collegeGPA']) - c.y(d[yKey]), - 'S 0 0 -5.5 0' - ].join(' ') - }) - .translate(d => [c.x(d.collegeGPA), c.y(d[yKey])]) - - - circleSel.transition('yKey').duration(500) - .translate(d => [c.x(d.collegeGPA), c.y(d[yKey])]) - - innerCircleSel.transition('colorFill').duration(30) - .delay(slide.circleFillDelay) - .at({ - fill: slide.circleFillFn, - stroke: d => d3.color(slide.circleFillFn(d)).darker(1.5) - }) - - axis2Sel.transition() - .st({opacity: i == 5 ? 1 : 0}) - - lineSel.transition('opacity').duration(500) - .st({ - opacity: slide.isLineVisible ? 1 : 0 - }) - - if (slide.yLabel) yLabelSel.html(slide.yLabel) - - - annotationSel.transition() - .st({opacity: d => i == d.slide ? 1 : 0}) - - - - prevSlide = i -} - -slide = slides[0] - - - - -d3.selectAll('.circle').each(function(){ - var d = d3.select(this).attr('class').split(' ')[0] - - d3.select(this) - .st({ - backgroundColor: d3.color(colors[d]), - borderColor: d3.color(colors[d]).darker(1.5), - }) - - -}) - - - - -function lerp(a, b, t){ return a + t*(b - a) } - - - -c.svg.selectAll('g.annotations').raise() - - - -d3.selectAll('#sections img').attr('aria-hidden', true) - - - - - - - - diff --git a/spaces/merve/uncertainty-calibration/public/anonymization/make-axii.js b/spaces/merve/uncertainty-calibration/public/anonymization/make-axii.js deleted file mode 100644 index c69b5eba387ec07f01ce2849726fda5461002aef..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/public/anonymization/make-axii.js +++ /dev/null @@ -1,86 +0,0 @@ -window.makeAxii = function(){ - - var stateScale = d3.scaleBand().domain(states).range(c.x.range()) - var stateAxis = c.svg.append('g.axis.state.init-hidden') - - var bw = stateScale.bandwidth()/2 - - stateAxis.appendMany('text', states) - .translate(d => [stateScale(d) + bw, c.height + 22]) - .text(d => d) - .at({ - textAnchor: 'middle', - }) - .st({fill: '#444'}) - - stateAxis.appendMany('path', d3.range(ages.length + 1)) - .at({ - d: d => ['M', d*c.width/(ages.length), '0 V', c.height].join(' '), - stroke: '#aaa', - }) - - stateAxis.append('text.bold').text('Home State') - .translate([c.width/2, c.height + 45]) - .at({textAnchor: 'middle'}) - - var ageScale = d3.scaleBand().domain(ages.slice().reverse()).range(c.x.range()) - var ageAxis = c.svg.append('g.axis.age.init-hidden') - - ageAxis.appendMany('text', ages) - .translate(d => [-30, ageScale(d) + bw]) - .text(d => d) - .at({dy: '.33em'}) - .st({fill: '#444'}) - - ageAxis.appendMany('path', d3.range(ages.length + 1)) - .at({ - d: d => ['M 0', d*c.width/(ages.length), 'H', c.width].join(' '), - stroke: '#aaa', - }) - - if (scale == 1){ - ageAxis - .append('g').translate([-43, c.height/2]) - .append('text.bold').text('Age') - .at({textAnchor: 'middle', transform: 'rotate(-90)'}) - } else { - ageAxis - .append('g').translate([-22, 14]) - .append('text.bold').text('Age') - .at({textAnchor: 'middle'}) - } - - var seasonAxis = c.svg.append('g.axis.state.init-hidden').lower() - seasonAxis.appendMany('g', ages) - .translate(d => ageScale(d), 1) - .appendMany('path', d3.range(1, 4)) - .at({ - d: d => ['M 0', d*bw/4*2, 'H', c.width].join(' '), - stroke: '#ddd', - }) - - var headAxis = c.svg.append('g.axis.state.init-hidden') - headAxis.appendMany('text.bold', ['Heads', 'Tails']) - .text(d => d) - .translate((d, i) => [i ? c.width/4*3 + 20 : c.width/4 - 20, 88]) - .at({textAnchor: 'middle'}) - - - var headCaptionAxis = c.svg.append('g.axis.state.init-hidden') - headCaptionAxis.appendMany('text', ['reports plagiarism', 'reports truth']) - .text(d => d) - .translate((d, i) => [i ? c.width/4*3 + 20 : c.width/4 - 20, 88 + 15]) - .at({textAnchor: 'middle'}) - .st({fill: '#444'}) - - - return {stateScale, stateAxis, headAxis, headCaptionAxis, ageScale, ageAxis, bw, seasonAxis} -} - - - - - - - -if (window.init) window.init() \ No newline at end of file diff --git a/spaces/mmlab-ntu/relate-anything-model/segment_anything/utils/onnx.py b/spaces/mmlab-ntu/relate-anything-model/segment_anything/utils/onnx.py deleted file mode 100644 index 4297b31291e036700d6ad0b818afb7dd72da3054..0000000000000000000000000000000000000000 --- a/spaces/mmlab-ntu/relate-anything-model/segment_anything/utils/onnx.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn as nn -from torch.nn import functional as F - -from typing import Tuple - -from ..modeling import Sam -from .amg import calculate_stability_score - - -class SamOnnxModel(nn.Module): - """ - This model should not be called directly, but is used in ONNX export. - It combines the prompt encoder, mask decoder, and mask postprocessing of Sam, - with some functions modified to enable model tracing. Also supports extra - options controlling what information. See the ONNX export script for details. - """ - - def __init__( - self, - model: Sam, - return_single_mask: bool, - use_stability_score: bool = False, - return_extra_metrics: bool = False, - ) -> None: - super().__init__() - self.mask_decoder = model.mask_decoder - self.model = model - self.img_size = model.image_encoder.img_size - self.return_single_mask = return_single_mask - self.use_stability_score = use_stability_score - self.stability_score_offset = 1.0 - self.return_extra_metrics = return_extra_metrics - - @staticmethod - def resize_longest_image_size( - input_image_size: torch.Tensor, longest_side: int - ) -> torch.Tensor: - input_image_size = input_image_size.to(torch.float32) - scale = longest_side / torch.max(input_image_size) - transformed_size = scale * input_image_size - transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64) - return transformed_size - - def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor: - point_coords = point_coords + 0.5 - point_coords = point_coords / self.img_size - point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords) - point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding) - - point_embedding = point_embedding * (point_labels != -1) - point_embedding = point_embedding + self.model.prompt_encoder.not_a_point_embed.weight * ( - point_labels == -1 - ) - - for i in range(self.model.prompt_encoder.num_point_embeddings): - point_embedding = point_embedding + self.model.prompt_encoder.point_embeddings[ - i - ].weight * (point_labels == i) - - return point_embedding - - def _embed_masks(self, input_mask: torch.Tensor, has_mask_input: torch.Tensor) -> torch.Tensor: - mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(input_mask) - mask_embedding = mask_embedding + ( - 1 - has_mask_input - ) * self.model.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1) - return mask_embedding - - def mask_postprocessing(self, masks: torch.Tensor, orig_im_size: torch.Tensor) -> torch.Tensor: - masks = F.interpolate( - masks, - size=(self.img_size, self.img_size), - mode="bilinear", - align_corners=False, - ) - - prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size) - masks = masks[..., : int(prepadded_size[0]), : int(prepadded_size[1])] - - orig_im_size = orig_im_size.to(torch.int64) - h, w = orig_im_size[0], orig_im_size[1] - masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False) - return masks - - def select_masks( - self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int - ) -> Tuple[torch.Tensor, torch.Tensor]: - # Determine if we should return the multiclick mask or not from the number of points. - # The reweighting is used to avoid control flow. - score_reweight = torch.tensor( - [[1000] + [0] * (self.model.mask_decoder.num_mask_tokens - 1)] - ).to(iou_preds.device) - score = iou_preds + (num_points - 2.5) * score_reweight - best_idx = torch.argmax(score, dim=1) - masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1) - iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1) - - return masks, iou_preds - - @torch.no_grad() - def forward( - self, - image_embeddings: torch.Tensor, - point_coords: torch.Tensor, - point_labels: torch.Tensor, - mask_input: torch.Tensor, - has_mask_input: torch.Tensor, - orig_im_size: torch.Tensor, - ): - sparse_embedding = self._embed_points(point_coords, point_labels) - dense_embedding = self._embed_masks(mask_input, has_mask_input) - - masks, scores = self.model.mask_decoder.predict_masks( - image_embeddings=image_embeddings, - image_pe=self.model.prompt_encoder.get_dense_pe(), - sparse_prompt_embeddings=sparse_embedding, - dense_prompt_embeddings=dense_embedding, - ) - - if self.use_stability_score: - scores = calculate_stability_score( - masks, self.model.mask_threshold, self.stability_score_offset - ) - - if self.return_single_mask: - masks, scores = self.select_masks(masks, scores, point_coords.shape[1]) - - upscaled_masks = self.mask_postprocessing(masks, orig_im_size) - - if self.return_extra_metrics: - stability_scores = calculate_stability_score( - upscaled_masks, self.model.mask_threshold, self.stability_score_offset - ) - areas = (upscaled_masks > self.model.mask_threshold).sum(-1).sum(-1) - return upscaled_masks, scores, stability_scores, areas, masks - - return upscaled_masks, scores, masks diff --git a/spaces/mohaktnbt/openai-whisper-large/app.py b/spaces/mohaktnbt/openai-whisper-large/app.py deleted file mode 100644 index 0d7ff1647cd2be49d72e567ea588323d68b37ae5..0000000000000000000000000000000000000000 --- a/spaces/mohaktnbt/openai-whisper-large/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/openai/whisper-large").launch() \ No newline at end of file diff --git a/spaces/mozilla-foundation/youtube_video_similarity/utils/helper_funcs.py b/spaces/mozilla-foundation/youtube_video_similarity/utils/helper_funcs.py deleted file mode 100644 index 0bae33f6e6d15c2f7cfa63a39e26951b5cf0f7a2..0000000000000000000000000000000000000000 --- a/spaces/mozilla-foundation/youtube_video_similarity/utils/helper_funcs.py +++ /dev/null @@ -1,96 +0,0 @@ -import itertools -import random -import requests -import pandas as pd -import gradio as gr -from pytube import YouTube -from youtube_transcript_api import YouTubeTranscriptApi -from youtube_transcript_api.formatters import TextFormatter - - -def is_youtube_video_available(url): - video = YouTube(url) - try: - video.title - return True - except: - return False - - -def get_example_videos(rr_examples_url, num_rr_examples): - example_videos = [['https://www.youtube.com/watch?v=WfVF-Ec4naQ', 'https://www.youtube.com/watch?v=4hrNt28t7Cw'], - ['https://www.youtube.com/watch?v=GbpjLP-UvIU', - 'https://www.youtube.com/watch?v=BlQ2mP2EE4A'], - ['https://www.youtube.com/watch?v=fdzY1f2P91k', - 'https://www.youtube.com/watch?v=BlQ2mP2EE4A'], - ['https://www.youtube.com/watch?v=fdzY1f2P91k', 'https://www.youtube.com/watch?v=9gIVGJQ3xWE']] - example_videos = [ex for ex in example_videos if is_youtube_video_available( - ex[0]) and is_youtube_video_available(ex[1])] - - try: - example_videos_rr = requests.get(rr_examples_url).json() - except: - example_videos_rr = [] - example_videos_rr = [[f'https://www.youtube.com/watch?v={ex["rejected_video_id"]}', - f'https://www.youtube.com/watch?v={ex["recommendation_id"]}'] for ex in example_videos_rr] - # remove duplicate video pairs, there seems to be one duplicate - example_videos_rr.sort() - example_videos_rr = list(example_videos_rr for example_videos_rr, - _ in itertools.groupby(example_videos_rr)) - example_videos_rr = [ex for ex in example_videos_rr if is_youtube_video_available( - ex[0]) and is_youtube_video_available(ex[1])] - if len(example_videos_rr) > num_rr_examples: - example_videos_rr = random.sample(example_videos_rr, num_rr_examples) - - return example_videos, example_videos_rr - - -def get_youtube_embedded_html(embed_url, video_position): - return f''' -

    Video {video_position}

    - - ''' - - -def update_youtube_embedded_html(video_url, video_position): - try: - embed_url = YouTube(video_url).embed_url - except: - return f''' -

    There was error in fetching details for video with the URL: {video_url}

    - ''' - return get_youtube_embedded_html(embed_url, video_position) - - -def get_youtube_video_data(url): - try: - video = YouTube(url) - except: - raise gr.Error(f'Could not find YouTube video with the URL {url}') - channel_id = video.channel_id - video_title = video.title - video_description = video.description - - try: - transcript_list = YouTubeTranscriptApi.list_transcripts(video.video_id) - except: - return channel_id, video_title, video_description, None - - available_non_common_langs = [tr.language_code for tr in list( - transcript_list) if tr.language_code not in ['en', 'en-US', 'es', 'de']] - video_transcript = YouTubeTranscriptApi.get_transcript( - video.video_id, languages=['en', 'en-US', 'es', 'de'] + available_non_common_langs) - video_transcript = TextFormatter().format_transcript( - video_transcript).replace('\n', ' ') - return channel_id, video_title, video_description, video_transcript - - -def get_input_data_df(video1_url, video2_url): - channel_id, video_title, video_description, video_transcript = get_youtube_video_data( - video1_url) - channel_id2, video_title2, video_description2, video_transcript2 = get_youtube_video_data( - video2_url) - channel_sim = 1 if channel_id == channel_id2 else 0 - df = pd.DataFrame([[video_title, video_description, video_transcript] + [video_title2, video_description2, video_transcript2] + [channel_sim]], columns=[ - 'regret_title', 'regret_description', 'regret_transcript', 'recommendation_title', 'recommendation_description', 'recommendation_transcript', 'channel_sim']) - return df diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/continuation_eval.py b/spaces/mshukor/UnIVAL/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/continuation_eval.py deleted file mode 100644 index 72b92a341dcd1b82035af72b8a6b4edc65783ecc..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/continuation_eval.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -from collections import defaultdict -import numpy as np -from misc.bleu_utils import sentence_bleu -import json -import warnings - - -def get_args(): - import argparse - - parser = argparse.ArgumentParser("Tool to calculate Continuation-BLEU2") - parser.add_argument('--asr-transcript', type=str, - help='Path to the transcript file.') - parser.add_argument('--prompts-description', type=str, - help='Path to the ground-truth continuation') - parser.add_argument('--manifest', type=str, required=True) - parser.add_argument('--take-shortest', type=int, default=1000) - - args = parser.parse_args() - - return args - - -def main(): - # NLTK produces warnings - warnings.filterwarnings("ignore") - - args = get_args() - - with open(args.prompts_description, 'r') as fin: - original_continuations = json.loads(fin.read()) - - sequence2length = [(k, v[0]) for k, v in original_continuations.items()] - assert all(float(v) >= 6.0 for (_, v) in sequence2length) # 6 seconds - - sequence2length.sort(key=lambda x: x[1]) - to_take = set(v[0] for v in sequence2length[:args.take_shortest]) - - with open(args.manifest, 'r') as fin: - fin.readline() - - linenum2file = dict([ - (i, l.split("__")[0]) for (i, l) in enumerate(fin) - ]) - - max_files = max(linenum2file.keys()) - continuations = defaultdict(list) - - mean_length_after = 0 - n_examples = 0 - - with open(args.asr_transcript, 'r') as fin: - for line in fin: - n_examples += 1 - line = line.split() - sequence_id = int(line[-1].split('-')[1][:-1]) - - assert sequence_id <= max_files - - sequence_name = linenum2file[sequence_id] - - continuations[sequence_name].append(line[:-1]) - mean_length_after += len(line) - - mean_length_after /= n_examples - print(f'Mean length of continuations, in words: {mean_length_after}') - metric_values = [] - - mean_ground_truth_words = 0 - n_examples = 0 - n_candidates = 0 - - for k, candidates in continuations.items(): - if k not in to_take: - continue - - n_examples += 1 - - ground_truth = original_continuations[k][1].split() - n_candidates += len(candidates) - bleu = sentence_bleu(candidates, ground_truth, weights=( - 0.5, 0.5), no_length_penalty=True, averaging_mode="geometric") - mean_ground_truth_words += len(ground_truth) - - metric_values.append(bleu) - - n = len(metric_values) - print( - f'Median BLEU over {n} examples: {np.median(metric_values)} +- {np.std(metric_values) / np.sqrt(n)}') - - -if __name__ == '__main__': - main() diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lang_word.sh b/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lang_word.sh deleted file mode 100644 index a7ea3877beefe1d4d53f9f7e32b004d8ce01e22a..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lang_word.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -num_sil_states=3 -num_nonsil_states=1 - -. ./cmd.sh -. ./path.sh -. parse_options.sh - -set -eux - -dict=$1 -data_dir=$2 -lexicon=$3 - -dict_dir=$data_dir/local/dict_word -tmplm_dir=$data_dir/local/lang_tmp_word -lm_dir=$data_dir/lang_word - -mkdir -p $dict_dir $tmplm_dir $lm_dir - -# prepare dict -echo "SIL" > $dict_dir/silence_phones.txt -echo "SIL" > $dict_dir/optional_silence.txt -awk '{print $1}' $dict > $dict_dir/nonsilence_phones.txt - -(echo "!SIL SIL"; echo " SIL";) | cat - $lexicon > $dict_dir/lexicon.txt - -echo "SIL" > $dict_dir/extra_questions.txt -awk '{printf $1" "} END {printf "\n"}' $dict >> $dict_dir/extra_questions.txt - -# prepare lang -utils/prepare_lang.sh --position-dependent-phones false \ - --num_sil_states $num_sil_states --num_nonsil_states $num_nonsil_states \ - $dict_dir "" $tmplm_dir $lm_dir diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/quantization/pq/pq.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/quantization/pq/pq.py deleted file mode 100644 index eddc2eb34602403f10979f54cd23a45bc2f104d5..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/quantization/pq/pq.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .em import EM, EmptyClusterResolveError - - -class PQ(EM): - """ - Quantizes the layer weights W with the standard Product Quantization - technique. This learns a codebook of codewords or centroids of size - block_size from W. For further reference on using PQ to quantize - neural networks, see "And the Bit Goes Down: Revisiting the Quantization - of Neural Networks", Stock et al., ICLR 2020. - - PQ is performed in two steps: - (1) The matrix W (weights or fully-connected or convolutional layer) - is reshaped to (block_size, -1). - - If W is fully-connected (2D), its columns are split into - blocks of size block_size. - - If W is convolutional (4D), its filters are split along the - spatial dimension. - (2) We apply the standard EM/k-means algorithm to the resulting reshaped matrix. - - Args: - - W: weight matrix to quantize of size (in_features x out_features) - - block_size: size of the blocks (subvectors) - - n_centroids: number of centroids - - n_iter: number of k-means iterations - - eps: for cluster reassignment when an empty cluster is found - - max_tentatives for cluster reassignment when an empty cluster is found - - verbose: print information after each iteration - - Remarks: - - block_size be compatible with the shape of W - """ - - def __init__( - self, - W, - block_size, - n_centroids=256, - n_iter=20, - eps=1e-6, - max_tentatives=30, - verbose=True, - ): - self.block_size = block_size - W_reshaped = self._reshape(W) - super(PQ, self).__init__( - W_reshaped, - n_centroids=n_centroids, - n_iter=n_iter, - eps=eps, - max_tentatives=max_tentatives, - verbose=verbose, - ) - - def _reshape(self, W): - """ - Reshapes the matrix W as expained in step (1). - """ - - # fully connected: by convention the weight has size out_features x in_features - if len(W.size()) == 2: - self.out_features, self.in_features = W.size() - assert ( - self.in_features % self.block_size == 0 - ), "Linear: n_blocks must be a multiple of in_features" - return ( - W.reshape(self.out_features, -1, self.block_size) - .permute(2, 1, 0) - .flatten(1, 2) - ) - - # convolutional: we reshape along the spatial dimension - elif len(W.size()) == 4: - self.out_channels, self.in_channels, self.k_h, self.k_w = W.size() - assert ( - self.in_channels * self.k_h * self.k_w - ) % self.block_size == 0, ( - "Conv2d: n_blocks must be a multiple of in_channels * k_h * k_w" - ) - return ( - W.reshape(self.out_channels, -1, self.block_size) - .permute(2, 1, 0) - .flatten(1, 2) - ) - # not implemented - else: - raise NotImplementedError(W.size()) - - def encode(self): - """ - Performs self.n_iter EM steps. - """ - - self.initialize_centroids() - for i in range(self.n_iter): - try: - self.step(i) - except EmptyClusterResolveError: - break - - def decode(self): - """ - Returns the encoded full weight matrix. Must be called after - the encode function. - """ - - # fully connected case - if "k_h" not in self.__dict__: - return ( - self.centroids[self.assignments] - .reshape(-1, self.out_features, self.block_size) - .permute(1, 0, 2) - .flatten(1, 2) - ) - - # convolutional case - else: - return ( - self.centroids[self.assignments] - .reshape(-1, self.out_channels, self.block_size) - .permute(1, 0, 2) - .reshape(self.out_channels, self.in_channels, self.k_h, self.k_w) - ) diff --git a/spaces/mueller-franzes/medfusion-app/medical_diffusion/models/model_base.py b/spaces/mueller-franzes/medfusion-app/medical_diffusion/models/model_base.py deleted file mode 100644 index 1c3dd87b6d1aeef49afc73354a6ee5f2309429d4..0000000000000000000000000000000000000000 --- a/spaces/mueller-franzes/medfusion-app/medical_diffusion/models/model_base.py +++ /dev/null @@ -1,114 +0,0 @@ - -from pathlib import Path -import json - -import torch -import torch.nn as nn -import torch.nn.functional as F -import pytorch_lightning as pl -from pytorch_lightning.utilities.cloud_io import load as pl_load -from pytorch_lightning.utilities.migration import pl_legacy_patch - -class VeryBasicModel(pl.LightningModule): - def __init__(self): - super().__init__() - self.save_hyperparameters() - self._step_train = 0 - self._step_val = 0 - self._step_test = 0 - - - def forward(self, x_in): - raise NotImplementedError - - def _step(self, batch: dict, batch_idx: int, state: str, step: int, optimizer_idx:int): - raise NotImplementedError - - def training_step(self, batch: dict, batch_idx: int, optimizer_idx:int = 0 ): - self._step_train += 1 # =self.global_step - return self._step(batch, batch_idx, "train", self._step_train, optimizer_idx) - - def validation_step(self, batch: dict, batch_idx: int, optimizer_idx:int = 0): - self._step_val += 1 - return self._step(batch, batch_idx, "val", self._step_val, optimizer_idx ) - - def test_step(self, batch: dict, batch_idx: int, optimizer_idx:int = 0): - self._step_test += 1 - return self._step(batch, batch_idx, "test", self._step_test, optimizer_idx) - - def _epoch_end(self, outputs: list, state: str): - return - - def training_epoch_end(self, outputs): - self._epoch_end(outputs, "train") - - def validation_epoch_end(self, outputs): - self._epoch_end(outputs, "val") - - def test_epoch_end(self, outputs): - self._epoch_end(outputs, "test") - - @classmethod - def save_best_checkpoint(cls, path_checkpoint_dir, best_model_path): - with open(Path(path_checkpoint_dir) / 'best_checkpoint.json', 'w') as f: - json.dump({'best_model_epoch': Path(best_model_path).name}, f) - - @classmethod - def _get_best_checkpoint_path(cls, path_checkpoint_dir, version=0, **kwargs): - path_version = 'lightning_logs/version_'+str(version) - with open(Path(path_checkpoint_dir) / path_version/ 'best_checkpoint.json', 'r') as f: - path_rel_best_checkpoint = Path(json.load(f)['best_model_epoch']) - return Path(path_checkpoint_dir)/path_rel_best_checkpoint - - @classmethod - def load_best_checkpoint(cls, path_checkpoint_dir, version=0, **kwargs): - path_best_checkpoint = cls._get_best_checkpoint_path(path_checkpoint_dir, version) - return cls.load_from_checkpoint(path_best_checkpoint, **kwargs) - - def load_pretrained(self, checkpoint_path, map_location=None, **kwargs): - if checkpoint_path.is_dir(): - checkpoint_path = self._get_best_checkpoint_path(checkpoint_path, **kwargs) - - with pl_legacy_patch(): - if map_location is not None: - checkpoint = pl_load(checkpoint_path, map_location=map_location) - else: - checkpoint = pl_load(checkpoint_path, map_location=lambda storage, loc: storage) - return self.load_weights(checkpoint["state_dict"], **kwargs) - - def load_weights(self, pretrained_weights, strict=True, **kwargs): - filter = kwargs.get('filter', lambda key:key in pretrained_weights) - init_weights = self.state_dict() - pretrained_weights = {key: value for key, value in pretrained_weights.items() if filter(key)} - init_weights.update(pretrained_weights) - self.load_state_dict(init_weights, strict=strict) - return self - - - - -class BasicModel(VeryBasicModel): - def __init__(self, - optimizer=torch.optim.AdamW, - optimizer_kwargs={'lr':1e-3, 'weight_decay':1e-2}, - lr_scheduler= None, - lr_scheduler_kwargs={}, - ): - super().__init__() - self.save_hyperparameters() - self.optimizer = optimizer - self.optimizer_kwargs = optimizer_kwargs - self.lr_scheduler = lr_scheduler - self.lr_scheduler_kwargs = lr_scheduler_kwargs - - def configure_optimizers(self): - optimizer = self.optimizer(self.parameters(), **self.optimizer_kwargs) - if self.lr_scheduler is not None: - lr_scheduler = self.lr_scheduler(optimizer, **self.lr_scheduler_kwargs) - return [optimizer], [lr_scheduler] - else: - return [optimizer] - - - - \ No newline at end of file diff --git a/spaces/multimodalart/latentdiffusion/latent-diffusion/scripts/txt2img.py b/spaces/multimodalart/latentdiffusion/latent-diffusion/scripts/txt2img.py deleted file mode 100644 index 613de5e1df6150b85d043428f39ef19d107074f4..0000000000000000000000000000000000000000 --- a/spaces/multimodalart/latentdiffusion/latent-diffusion/scripts/txt2img.py +++ /dev/null @@ -1,165 +0,0 @@ -import argparse, os, sys, glob -import torch -import numpy as np -from omegaconf import OmegaConf -from PIL import Image -from tqdm import tqdm, trange -from einops import rearrange -from torchvision.utils import make_grid - -from ldm.util import instantiate_from_config -from ldm.models.diffusion.ddim import DDIMSampler -from ldm.models.diffusion.plms import PLMSSampler - - -def load_model_from_config(config, ckpt, verbose=False): - print(f"Loading model from {ckpt}") - pl_sd = torch.load(ckpt, map_location="cpu") - sd = pl_sd["state_dict"] - model = instantiate_from_config(config.model) - m, u = model.load_state_dict(sd, strict=False) - if len(m) > 0 and verbose: - print("missing keys:") - print(m) - if len(u) > 0 and verbose: - print("unexpected keys:") - print(u) - - model.cuda() - model.eval() - return model - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "--prompt", - type=str, - nargs="?", - default="a painting of a virus monster playing guitar", - help="the prompt to render" - ) - - parser.add_argument( - "--outdir", - type=str, - nargs="?", - help="dir to write results to", - default="outputs/txt2img-samples" - ) - parser.add_argument( - "--ddim_steps", - type=int, - default=200, - help="number of ddim sampling steps", - ) - - parser.add_argument( - "--plms", - action='store_true', - help="use plms sampling", - ) - - parser.add_argument( - "--ddim_eta", - type=float, - default=0.0, - help="ddim eta (eta=0.0 corresponds to deterministic sampling", - ) - parser.add_argument( - "--n_iter", - type=int, - default=1, - help="sample this often", - ) - - parser.add_argument( - "--H", - type=int, - default=256, - help="image height, in pixel space", - ) - - parser.add_argument( - "--W", - type=int, - default=256, - help="image width, in pixel space", - ) - - parser.add_argument( - "--n_samples", - type=int, - default=4, - help="how many samples to produce for the given prompt", - ) - - parser.add_argument( - "--scale", - type=float, - default=5.0, - help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))", - ) - opt = parser.parse_args() - - - config = OmegaConf.load("configs/latent-diffusion/txt2img-1p4B-eval.yaml") # TODO: Optionally download from same location as ckpt and chnage this logic - model = load_model_from_config(config, "models/ldm/text2img-large/model.ckpt") # TODO: check path - - device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") - model = model.to(device) - - if opt.plms: - sampler = PLMSSampler(model) - else: - sampler = DDIMSampler(model) - - os.makedirs(opt.outdir, exist_ok=True) - outpath = opt.outdir - - prompt = opt.prompt - - - sample_path = os.path.join(outpath, "samples") - os.makedirs(sample_path, exist_ok=True) - base_count = len(os.listdir(sample_path)) - - all_samples=list() - with torch.no_grad(): - with model.ema_scope(): - uc = None - if opt.scale != 1.0: - uc = model.get_learned_conditioning(opt.n_samples * [""]) - for n in trange(opt.n_iter, desc="Sampling"): - c = model.get_learned_conditioning(opt.n_samples * [prompt]) - shape = [4, opt.H//8, opt.W//8] - samples_ddim, _ = sampler.sample(S=opt.ddim_steps, - conditioning=c, - batch_size=opt.n_samples, - shape=shape, - verbose=False, - unconditional_guidance_scale=opt.scale, - unconditional_conditioning=uc, - eta=opt.ddim_eta) - - x_samples_ddim = model.decode_first_stage(samples_ddim) - x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0, min=0.0, max=1.0) - - for x_sample in x_samples_ddim: - x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c') - Image.fromarray(x_sample.astype(np.uint8)).save(os.path.join(sample_path, f"{base_count:04}.png")) - base_count += 1 - all_samples.append(x_samples_ddim) - - - # additionally, save as grid - grid = torch.stack(all_samples, 0) - grid = rearrange(grid, 'n b c h w -> (n b) c h w') - grid = make_grid(grid, nrow=opt.n_samples) - - # to image - grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy() - Image.fromarray(grid.astype(np.uint8)).save(os.path.join(outpath, f'{prompt.replace(" ", "-")}.png')) - - print(f"Your samples are ready and waiting four you here: \n{outpath} \nEnjoy.") diff --git a/spaces/multimodalart/stable-diffusion-inpainting/clipseg/setup.py b/spaces/multimodalart/stable-diffusion-inpainting/clipseg/setup.py deleted file mode 100644 index 2bf28ffe269cba3033af263db5f98313772818f0..0000000000000000000000000000000000000000 --- a/spaces/multimodalart/stable-diffusion-inpainting/clipseg/setup.py +++ /dev/null @@ -1,30 +0,0 @@ -from setuptools import setup - -with open("README.md", "r", encoding="utf-8") as readme_file: - readme = readme_file.read() - -requirements = [ - "numpy", - "scipy", - "matplotlib", - "torch", - "torchvision", - "opencv-python", - "CLIP @ git+https://github.com/openai/CLIP.git" -] - -setup( - name='clipseg', - packages=['clipseg'], - package_dir={'clipseg': 'models'}, - package_data={'clipseg': [ - "../weights/*.pth", - ]}, - version='0.0.1', - url='https://github.com/timojl/clipseg', - python_requires='>=3.9', - install_requires=requirements, - description='This repository contains the code used in the paper "Image Segmentation Using Text and Image Prompts".', - long_description=readme, - long_description_content_type="text/markdown", -) diff --git a/spaces/myrad01/Inpaint-Anything/third_party/lama/bin/predict_inner_features.py b/spaces/myrad01/Inpaint-Anything/third_party/lama/bin/predict_inner_features.py deleted file mode 100644 index 4f9f7a11a6c4757a4eaa05cf1ac648d372f7e02f..0000000000000000000000000000000000000000 --- a/spaces/myrad01/Inpaint-Anything/third_party/lama/bin/predict_inner_features.py +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env python3 - -# Example command: -# ./bin/predict.py \ -# model.path= \ -# indir= \ -# outdir= - -import logging -import os -import sys -import traceback - -from saicinpainting.evaluation.utils import move_to_device - -os.environ['OMP_NUM_THREADS'] = '1' -os.environ['OPENBLAS_NUM_THREADS'] = '1' -os.environ['MKL_NUM_THREADS'] = '1' -os.environ['VECLIB_MAXIMUM_THREADS'] = '1' -os.environ['NUMEXPR_NUM_THREADS'] = '1' - -import cv2 -import hydra -import numpy as np -import torch -import tqdm -import yaml -from omegaconf import OmegaConf -from torch.utils.data._utils.collate import default_collate - -from saicinpainting.training.data.datasets import make_default_val_dataset -from saicinpainting.training.trainers import load_checkpoint, DefaultInpaintingTrainingModule -from saicinpainting.utils import register_debug_signal_handlers, get_shape - -LOGGER = logging.getLogger(__name__) - - -@hydra.main(config_path='../configs/prediction', config_name='default_inner_features.yaml') -def main(predict_config: OmegaConf): - try: - register_debug_signal_handlers() # kill -10 will result in traceback dumped into log - - device = torch.device(predict_config.device) - - train_config_path = os.path.join(predict_config.model.path, 'config.yaml') - with open(train_config_path, 'r') as f: - train_config = OmegaConf.create(yaml.safe_load(f)) - - checkpoint_path = os.path.join(predict_config.model.path, 'models', predict_config.model.checkpoint) - model = load_checkpoint(train_config, checkpoint_path, strict=False) - model.freeze() - model.to(device) - - assert isinstance(model, DefaultInpaintingTrainingModule), 'Only DefaultInpaintingTrainingModule is supported' - assert isinstance(getattr(model.generator, 'model', None), torch.nn.Sequential) - - if not predict_config.indir.endswith('/'): - predict_config.indir += '/' - - dataset = make_default_val_dataset(predict_config.indir, **predict_config.dataset) - - max_level = max(predict_config.levels) - - with torch.no_grad(): - for img_i in tqdm.trange(len(dataset)): - mask_fname = dataset.mask_filenames[img_i] - cur_out_fname = os.path.join(predict_config.outdir, os.path.splitext(mask_fname[len(predict_config.indir):])[0]) - os.makedirs(os.path.dirname(cur_out_fname), exist_ok=True) - - batch = move_to_device(default_collate([dataset[img_i]]), device) - - img = batch['image'] - mask = batch['mask'] - mask[:] = 0 - mask_h, mask_w = mask.shape[-2:] - mask[:, :, - mask_h // 2 - predict_config.hole_radius : mask_h // 2 + predict_config.hole_radius, - mask_w // 2 - predict_config.hole_radius : mask_w // 2 + predict_config.hole_radius] = 1 - - masked_img = torch.cat([img * (1 - mask), mask], dim=1) - - feats = masked_img - for level_i, level in enumerate(model.generator.model): - feats = level(feats) - if level_i in predict_config.levels: - cur_feats = torch.cat([f for f in feats if torch.is_tensor(f)], dim=1) \ - if isinstance(feats, tuple) else feats - - if predict_config.slice_channels: - cur_feats = cur_feats[:, slice(*predict_config.slice_channels)] - - cur_feat = cur_feats.pow(2).mean(1).pow(0.5).clone() - cur_feat -= cur_feat.min() - cur_feat /= cur_feat.std() - cur_feat = cur_feat.clamp(0, 1) / 1 - cur_feat = cur_feat.cpu().numpy()[0] - cur_feat *= 255 - cur_feat = np.clip(cur_feat, 0, 255).astype('uint8') - cv2.imwrite(cur_out_fname + f'_lev{level_i:02d}_norm.png', cur_feat) - - # for channel_i in predict_config.channels: - # - # cur_feat = cur_feats[0, channel_i].clone().detach().cpu().numpy() - # cur_feat -= cur_feat.min() - # cur_feat /= cur_feat.max() - # cur_feat *= 255 - # cur_feat = np.clip(cur_feat, 0, 255).astype('uint8') - # cv2.imwrite(cur_out_fname + f'_lev{level_i}_ch{channel_i}.png', cur_feat) - elif level_i >= max_level: - break - except KeyboardInterrupt: - LOGGER.warning('Interrupted by user') - except Exception as ex: - LOGGER.critical(f'Prediction failed due to {ex}:\n{traceback.format_exc()}') - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/pytorch_caney/models/simmim/simmim.py b/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/pytorch_caney/models/simmim/simmim.py deleted file mode 100644 index b13cfca7e06dc5da468012e983d07b8be37ae4e4..0000000000000000000000000000000000000000 --- a/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/pytorch_caney/models/simmim/simmim.py +++ /dev/null @@ -1,117 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from timm.models.layers import trunc_normal_ - -from ..swinv2_model import SwinTransformerV2 - - -class SwinTransformerV2ForSimMIM(SwinTransformerV2): - def __init__(self, **kwargs): - super().__init__(**kwargs) - - assert self.num_classes == 0 - - self.mask_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) - trunc_normal_(self.mask_token, mean=0., std=.02) - - def forward(self, x, mask): - x = self.patch_embed(x) - - assert mask is not None - B, L, _ = x.shape - - mask_tokens = self.mask_token.expand(B, L, -1) - w = mask.flatten(1).unsqueeze(-1).type_as(mask_tokens) - x = x * (1. - w) + mask_tokens * w - - if self.ape: - x = x + self.absolute_pos_embed - x = self.pos_drop(x) - - for layer in self.layers: - x = layer(x) - x = self.norm(x) - - x = x.transpose(1, 2) - B, C, L = x.shape - H = W = int(L ** 0.5) - x = x.reshape(B, C, H, W) - return x - - @torch.jit.ignore - def no_weight_decay(self): - return super().no_weight_decay() | {'mask_token'} - - -class MiMModel(nn.Module): - def __init__(self, encoder, encoder_stride, in_chans, patch_size): - super().__init__() - self.encoder = encoder - self.encoder_stride = encoder_stride - self.in_chans = in_chans - self.patch_size = patch_size - self.decoder = nn.Sequential( - nn.Conv2d( - in_channels=self.encoder.num_features, - out_channels=self.encoder_stride ** 2 * self.in_chans, - kernel_size=1), - nn.PixelShuffle(self.encoder_stride), - ) - - # self.in_chans = self.encoder.in_chans - # self.patch_size = self.encoder.patch_size - - def forward(self, x, mask): - z = self.encoder(x, mask) - x_rec = self.decoder(z) - - mask = mask.repeat_interleave(self.patch_size, 1).repeat_interleave( - self.patch_size, 2).unsqueeze(1).contiguous() - loss_recon = F.l1_loss(x, x_rec, reduction='none') - loss = (loss_recon * mask).sum() / (mask.sum() + 1e-5) / self.in_chans - return loss - - @torch.jit.ignore - def no_weight_decay(self): - if hasattr(self.encoder, 'no_weight_decay'): - return {'encoder.' + i for i in self.encoder.no_weight_decay()} - return {} - - @torch.jit.ignore - def no_weight_decay_keywords(self): - if hasattr(self.encoder, 'no_weight_decay_keywords'): - return {'encoder.' + i for i in - self.encoder.no_weight_decay_keywords()} - return {} - - -def build_mim_model(config): - model_type = config.MODEL.TYPE - if model_type == 'swinv2': - encoder = SwinTransformerV2ForSimMIM( - img_size=config.DATA.IMG_SIZE, - patch_size=config.MODEL.SWINV2.PATCH_SIZE, - in_chans=config.MODEL.SWINV2.IN_CHANS, - num_classes=0, - embed_dim=config.MODEL.SWINV2.EMBED_DIM, - depths=config.MODEL.SWINV2.DEPTHS, - num_heads=config.MODEL.SWINV2.NUM_HEADS, - window_size=config.MODEL.SWINV2.WINDOW_SIZE, - mlp_ratio=config.MODEL.SWINV2.MLP_RATIO, - qkv_bias=config.MODEL.SWINV2.QKV_BIAS, - drop_rate=config.MODEL.DROP_RATE, - drop_path_rate=config.MODEL.DROP_PATH_RATE, - ape=config.MODEL.SWINV2.APE, - patch_norm=config.MODEL.SWINV2.PATCH_NORM, - use_checkpoint=config.TRAIN.USE_CHECKPOINT) - encoder_stride = 32 - in_chans = config.MODEL.SWINV2.IN_CHANS - patch_size = config.MODEL.SWINV2.PATCH_SIZE - else: - raise NotImplementedError(f"Unknown pre-train model: {model_type}") - - model = MiMModel(encoder=encoder, encoder_stride=encoder_stride, - in_chans=in_chans, patch_size=patch_size) - - return model diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/((NEW)) Download Showgirls Movie.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/((NEW)) Download Showgirls Movie.md deleted file mode 100644 index 87df70b333221a887ba129f5baf828185689fad4..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/((NEW)) Download Showgirls Movie.md +++ /dev/null @@ -1,31 +0,0 @@ -
    -

    How to Download Showgirls Movie for Free

    -

    Showgirls is a 1995 drama film directed by Paul Verhoeven and starring Elizabeth Berkley, Kyle MacLachlan, and Gina Gershon. The film follows Nomi, a young drifter who arrives in Las Vegas to become a dancer and soon sets about clawing her way to the top of the Vegas showgirls.

    -

    download showgirls movie


    Downloadhttps://urlcod.com/2uIbMo



    -

    Showgirls is notorious for its frequent nudity, explicit sex scenes, and controversial depiction of the Las Vegas entertainment industry. The film received mostly negative reviews from critics and was a box-office flop. However, it has since gained a cult following and has been re-evaluated by some critics as a satire of American culture and values.

    -

    If you are curious about this film and want to watch it for free, you might be wondering how to download Showgirls movie online. There are several ways to do this, but not all of them are legal or safe. In this article, we will show you some of the best and most reliable methods to download Showgirls movie for free without breaking any laws or risking your computer's security.

    -

    Method 1: Use Internet Archive

    -

    One of the easiest and safest ways to download Showgirls movie for free is to use Internet Archive, a non-profit digital library that offers access to millions of free books, movies, music, and more. Internet Archive has two versions of Showgirls available for download: the original theatrical release and the censored VH1 edit.

    -

    The original theatrical release is the uncut version of the film that was rated NC-17 for its graphic content. The censored VH1 edit is a version of the film that was created for television broadcast by digitally adding bras and panties to hide the nudity and removing some scenes entirely. The censored version also has a different voice actress dubbing over Elizabeth Berkley's lines.

    -

    To download Showgirls movie from Internet Archive, follow these steps:

    -
      -
    1. Go to https://archive.org/details/showgirls_202108 for the original version or https://archive.org/details/showgirls-tv-version for the censored version.
    2. -
    3. Click on the "Download Options" menu on the right side of the page.
    4. -
    5. Select the format you prefer, such as MP4 or OGG.
    6. -
    7. Click on the download link and save the file to your computer.
    8. -
    -

    Note that Internet Archive is a legal and reputable site that respects the rights of creators and users. However, downloading Showgirls movie from Internet Archive may still violate some copyright laws in your country or region. Therefore, we advise you to check your local laws before downloading Showgirls movie from Internet Archive.

    -

    Method 2: Use IMDb TV

    -

    Another way to download Showgirls movie for free is to use IMDb TV, a streaming service that offers free movies and TV shows with ads. IMDb TV is owned by Amazon and is available in the United States only. You need an Amazon account to access IMDb TV.

    -

    -

    Showgirls is one of the movies that IMDb TV offers for free streaming with ads. You can watch it on your computer, smartphone, tablet, or smart TV. However, if you want to download Showgirls movie from IMDb TV for offline viewing, you need to use an Amazon Fire device, such as a Fire tablet or a Fire TV stick.

    -

    To download Showgirls movie from IMDb TV using an Amazon Fire device, follow these steps:

    -
      -
    1. Make sure your Amazon Fire device is connected to the internet and has enough storage space.
    2. -
    3. Go to https://www.imdb.com/title/tt0114436/ on your device's browser or use the IMDb app.
    4. -
    5. Click on the "Watch Now" button on the top right corner of the page.
    6. -
    7. Sign in with your Amazon account if prompted.
    8. -
    9. Select "IMDb TV" as the source of streaming.
    10. -
    11. Wait for the ads to finish playing and then click on the "Download" icon on the

      cec2833e83
      -
      -
      \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Contemporary Implant Dentistry Carl E Misch Pdf Free Download.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Contemporary Implant Dentistry Carl E Misch Pdf Free Download.md deleted file mode 100644 index eb20bee088e82552f3af7f455f97a39d8438fe64..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Contemporary Implant Dentistry Carl E Misch Pdf Free Download.md +++ /dev/null @@ -1,25 +0,0 @@ - -

      How to Download Contemporary Implant Dentistry by Carl E Misch for Free

      -

      If you are looking for a comprehensive and authoritative guide on dental implant surgery, you might be interested in Contemporary Implant Dentistry by Carl E Misch, one of the most renowned experts in the field. This book covers the latest techniques, materials, and procedures for implant prosthetics, as well as the science and discipline of contemporary implant dentistry.

      -

      However, this book is not cheap. The fourth edition, published in 2019 by Elsevier, costs around $200 on Amazon. If you want to save some money and get access to this valuable resource for free, you might be wondering how to download a PDF version of it.

      -

      Contemporary Implant Dentistry Carl E Misch Pdf Free Download


      Downloadhttps://urlcod.com/2uI9Ji



      -

      In this article, we will show you some ways to find and download Contemporary Implant Dentistry by Carl E Misch PDF free online. However, we do not endorse or encourage any illegal or unethical activities. We only provide information for educational purposes. You should always respect the intellectual property rights of the authors and publishers.

      -

      Method 1: Search on Google

      -

      One of the easiest ways to find a PDF version of any book is to search on Google. You can use the following query:

      -"Contemporary Implant Dentistry Carl E Misch" filetype:pdf -

      This will return results that contain the exact phrase "Contemporary Implant Dentistry Carl E Misch" and have a PDF file extension. You can then click on the links and see if they lead to a downloadable file.

      -

      However, be careful when clicking on unknown links. Some of them might be malicious or contain viruses. You should also check the quality and authenticity of the PDF file before downloading it. Some files might be incomplete, corrupted, or have low resolution.

      -

      Method 2: Use a File Sharing Platform

      -

      Another way to find a PDF version of any book is to use a file sharing platform, such as Scribd, Z-Library, or Internet Archive. These platforms allow users to upload and download various types of documents, including books, articles, and reports.

      -

      To use these platforms, you need to create an account and sometimes pay a subscription fee. However, some of them also offer free trials or limited access for non-paying users. You can search for the book title or author name on these platforms and see if they have a PDF version available.

      -

      For example, we found a PDF version of Contemporary Implant Dentistry by Carl E Misch on Scribd[^3^]. However, you need to sign up and pay $9.99 per month to download it. Alternatively, you can upload your own document and get one free download per day.

      -

      -

      Method 3: Borrow from a Library

      -

      A third way to get access to a PDF version of any book is to borrow it from a library. Many libraries have digital collections that allow users to borrow e-books and audiobooks online. You can use your library card number and PIN to log in and browse the catalog.

      -

      To find out if your library has a PDF version of Contemporary Implant Dentistry by Carl E Misch, you can use a service like WorldCat or Library Genesis. These services allow you to search for books across thousands of libraries worldwide. You can then see which libraries have the book in their collection and how to access it.

      -

      For example, we found that the University of Kansas Libraries have a PDF version of Contemporary Implant Dentistry by Carl E Misch. However, you need to be a student or faculty member of the university to access it.

      -

      Conclusion

      -

      In this article, we showed you some ways to find and download Contemporary Implant Dentistry by Carl E Misch PDF free. However, we remind you that downloading copyrighted books without permission is illegal and unethical. You should always respect the rights of the authors and publishers.

      -

      If you want to

      81aa517590
      -
      -
      \ No newline at end of file diff --git a/spaces/ngxson/poet-cat/frontend/README.md b/spaces/ngxson/poet-cat/frontend/README.md deleted file mode 100644 index 965a1228cf6c9add1218e0adef73bb6ee230fe7f..0000000000000000000000000000000000000000 --- a/spaces/ngxson/poet-cat/frontend/README.md +++ /dev/null @@ -1,38 +0,0 @@ -This is a [Next.js](https://nextjs.org/) project bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app). - -## Getting Started - -First, run the development server: - -```bash -npm run dev -# or -yarn dev -# or -pnpm dev -``` - -Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. - -You can start editing the page by modifying `pages/index.tsx`. The page auto-updates as you edit the file. - -[API routes](https://nextjs.org/docs/api-routes/introduction) can be accessed on [http://localhost:3000/api/hello](http://localhost:3000/api/hello). This endpoint can be edited in `pages/api/hello.ts`. - -The `pages/api` directory is mapped to `/api/*`. Files in this directory are treated as [API routes](https://nextjs.org/docs/api-routes/introduction) instead of React pages. - -This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font. - -## Learn More - -To learn more about Next.js, take a look at the following resources: - -- [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API. -- [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial. - -You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js/) - your feedback and contributions are welcome! - -## Deploy on Vercel - -The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js. - -Check out our [Next.js deployment documentation](https://nextjs.org/docs/deployment) for more details. diff --git a/spaces/niks-salodkar/Fashion-Prediction-Demo/README.md b/spaces/niks-salodkar/Fashion-Prediction-Demo/README.md deleted file mode 100644 index 3caa5f9345879bc180c603fa56ea7232ca9c2380..0000000000000000000000000000000000000000 --- a/spaces/niks-salodkar/Fashion-Prediction-Demo/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Fashion Prediction Demo -emoji: 👁 -colorFrom: purple -colorTo: green -sdk: streamlit -sdk_version: 1.15.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ntt123/vietnam-male-voice-wavegru-tts/sparse_matmul/sparse_matmul.h b/spaces/ntt123/vietnam-male-voice-wavegru-tts/sparse_matmul/sparse_matmul.h deleted file mode 100644 index dc50727861248bb8ffec0015d800987c518d762b..0000000000000000000000000000000000000000 --- a/spaces/ntt123/vietnam-male-voice-wavegru-tts/sparse_matmul/sparse_matmul.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LYRA_CODEC_SPARSE_MATMUL_SPARSE_MATMUL_H_ -#define LYRA_CODEC_SPARSE_MATMUL_SPARSE_MATMUL_H_ - -// IWYU pragma: begin_exports -#include "sparse_matmul/compute/gru_gates.h" -#include "sparse_matmul/layers/csr_blocksparse_matrix.h" -#include "sparse_matmul/layers/masked_sparse_matrix.h" -#include "sparse_matmul/layers/sparse_linear_layer.h" -#include "sparse_matmul/layers/utils.h" -#include "sparse_matmul/numerics/fast_transcendentals.h" -#include "sparse_matmul/numerics/fixed_types.h" -#include "sparse_matmul/numerics/float16_types.h" -#include "sparse_matmul/numerics/type_utils.h" -#include "sparse_matmul/os/coop_threads.h" -#include "sparse_matmul/vector/cache_aligned_vector.h" -// IWYU pragma: end_exports - -#endif // LYRA_CODEC_SPARSE_MATMUL_SPARSE_MATMUL_H_ diff --git a/spaces/olivierdehaene/chat-llm-streaming/app.py b/spaces/olivierdehaene/chat-llm-streaming/app.py deleted file mode 100644 index d27f390295f8215e5feee2c6f0ec2684663d0b38..0000000000000000000000000000000000000000 --- a/spaces/olivierdehaene/chat-llm-streaming/app.py +++ /dev/null @@ -1,319 +0,0 @@ -import os - -import gradio as gr - -from text_generation import Client, InferenceAPIClient - -openchat_preprompt = ( - "\n: Hi!\n: My name is Bot, model version is 0.15, part of an open-source kit for " - "fine-tuning new bots! I was created by Together, LAION, and Ontocord.ai and the open-source " - "community. I am not human, not evil and not alive, and thus have no thoughts and feelings, " - "but I am programmed to be helpful, polite, honest, and friendly.\n" -) - - -def get_client(model: str): - if model == "togethercomputer/GPT-NeoXT-Chat-Base-20B": - return Client(os.getenv("OPENCHAT_API_URL")) - return InferenceAPIClient(model, token=os.getenv("HF_TOKEN", None)) - - -def get_usernames(model: str): - """ - Returns: - (str, str, str, str): pre-prompt, username, bot name, separator - """ - if model in ("OpenAssistant/oasst-sft-1-pythia-12b", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"): - return "", "<|prompter|>", "<|assistant|>", "<|endoftext|>" - if model == "togethercomputer/GPT-NeoXT-Chat-Base-20B": - return openchat_preprompt, ": ", ": ", "\n" - return "", "User: ", "Assistant: ", "\n" - - -def predict( - model: str, - inputs: str, - typical_p: float, - top_p: float, - temperature: float, - top_k: int, - repetition_penalty: float, - watermark: bool, - chatbot, - history, -): - client = get_client(model) - preprompt, user_name, assistant_name, sep = get_usernames(model) - - history.append(inputs) - - past = [] - for data in chatbot: - user_data, model_data = data - - if not user_data.startswith(user_name): - user_data = user_name + user_data - if not model_data.startswith(sep + assistant_name): - model_data = sep + assistant_name + model_data - - past.append(user_data + model_data.rstrip() + sep) - - if not inputs.startswith(user_name): - inputs = user_name + inputs - - total_inputs = preprompt + "".join(past) + inputs + sep + assistant_name.rstrip() - - partial_words = "" - - if model in ("OpenAssistant/oasst-sft-1-pythia-12b", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"): - iterator = client.generate_stream( - total_inputs, - typical_p=typical_p, - truncate=1000, - watermark=watermark, - max_new_tokens=500, - ) - else: - iterator = client.generate_stream( - total_inputs, - top_p=top_p if top_p < 1.0 else None, - top_k=top_k, - truncate=1000, - repetition_penalty=repetition_penalty, - watermark=watermark, - temperature=temperature, - max_new_tokens=500, - stop_sequences=[user_name.rstrip(), assistant_name.rstrip()], - ) - - for i, response in enumerate(iterator): - if response.token.special: - continue - - partial_words = partial_words + response.token.text - if partial_words.endswith(user_name.rstrip()): - partial_words = partial_words.rstrip(user_name.rstrip()) - if partial_words.endswith(assistant_name.rstrip()): - partial_words = partial_words.rstrip(assistant_name.rstrip()) - - if i == 0: - history.append(" " + partial_words) - elif response.token.text not in user_name: - history[-1] = partial_words - - chat = [ - (history[i].strip(), history[i + 1].strip()) - for i in range(0, len(history) - 1, 2) - ] - yield chat, history - - -def reset_textbox(): - return gr.update(value="") - - -def radio_on_change( - value: str, - disclaimer, - typical_p, - top_p, - top_k, - temperature, - repetition_penalty, - watermark, -): - if value in ("OpenAssistant/oasst-sft-1-pythia-12b", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"): - typical_p = typical_p.update(value=0.2, visible=True) - top_p = top_p.update(visible=False) - top_k = top_k.update(visible=False) - temperature = temperature.update(visible=False) - disclaimer = disclaimer.update(visible=False) - repetition_penalty = repetition_penalty.update(visible=False) - watermark = watermark.update(False) - elif value == "togethercomputer/GPT-NeoXT-Chat-Base-20B": - typical_p = typical_p.update(visible=False) - top_p = top_p.update(value=0.25, visible=True) - top_k = top_k.update(value=50, visible=True) - temperature = temperature.update(value=0.6, visible=True) - repetition_penalty = repetition_penalty.update(value=1.01, visible=True) - watermark = watermark.update(False) - disclaimer = disclaimer.update(visible=True) - else: - typical_p = typical_p.update(visible=False) - top_p = top_p.update(value=0.95, visible=True) - top_k = top_k.update(value=4, visible=True) - temperature = temperature.update(value=0.5, visible=True) - repetition_penalty = repetition_penalty.update(value=1.03, visible=True) - watermark = watermark.update(True) - disclaimer = disclaimer.update(visible=False) - return ( - disclaimer, - typical_p, - top_p, - top_k, - temperature, - repetition_penalty, - watermark, - ) - - -title = """

      Large Language Model Chat API

      """ -description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form: - -``` -User: -Assistant: -User: -Assistant: -... -``` - -In this app, you can explore the outputs of multiple LLMs when prompted in this way. -""" - -text_generation_inference = """ - -""" - -openchat_disclaimer = """ -
      Checkout the official OpenChatKit feedback app for the full experience.
      -""" - -with gr.Blocks( - css="""#col_container {margin-left: auto; margin-right: auto;} - #chatbot {height: 520px; overflow: auto;}""" -) as demo: - gr.HTML(title) - gr.Markdown(text_generation_inference, visible=True) - with gr.Column(elem_id="col_container"): - model = gr.Radio( - value="OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", - choices=[ - "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", - "OpenAssistant/oasst-sft-1-pythia-12b", - # "togethercomputer/GPT-NeoXT-Chat-Base-20B", - "google/flan-t5-xxl", - "google/flan-ul2", - "bigscience/bloom", - "bigscience/bloomz", - "EleutherAI/gpt-neox-20b", - ], - label="Model", - interactive=True, - ) - - chatbot = gr.Chatbot(elem_id="chatbot") - inputs = gr.Textbox( - placeholder="Hi there!", label="Type an input and press Enter" - ) - disclaimer = gr.Markdown(openchat_disclaimer, visible=False) - state = gr.State([]) - b1 = gr.Button() - - with gr.Accordion("Parameters", open=False): - typical_p = gr.Slider( - minimum=-0, - maximum=1.0, - value=0.2, - step=0.05, - interactive=True, - label="Typical P mass", - ) - top_p = gr.Slider( - minimum=-0, - maximum=1.0, - value=0.25, - step=0.05, - interactive=True, - label="Top-p (nucleus sampling)", - visible=False, - ) - temperature = gr.Slider( - minimum=-0, - maximum=5.0, - value=0.6, - step=0.1, - interactive=True, - label="Temperature", - visible=False, - ) - top_k = gr.Slider( - minimum=1, - maximum=50, - value=50, - step=1, - interactive=True, - label="Top-k", - visible=False, - ) - repetition_penalty = gr.Slider( - minimum=0.1, - maximum=3.0, - value=1.03, - step=0.01, - interactive=True, - label="Repetition Penalty", - visible=False, - ) - watermark = gr.Checkbox(value=False, label="Text watermarking") - - model.change( - lambda value: radio_on_change( - value, - disclaimer, - typical_p, - top_p, - top_k, - temperature, - repetition_penalty, - watermark, - ), - inputs=model, - outputs=[ - disclaimer, - typical_p, - top_p, - top_k, - temperature, - repetition_penalty, - watermark, - ], - ) - - inputs.submit( - predict, - [ - model, - inputs, - typical_p, - top_p, - temperature, - top_k, - repetition_penalty, - watermark, - chatbot, - state, - ], - [chatbot, state], - ) - b1.click( - predict, - [ - model, - inputs, - typical_p, - top_p, - temperature, - top_k, - repetition_penalty, - watermark, - chatbot, - state, - ], - [chatbot, state], - ) - b1.click(reset_textbox, [], [inputs]) - inputs.submit(reset_textbox, [], [inputs]) - - gr.Markdown(description) - demo.queue(concurrency_count=16).launch(debug=True) diff --git a/spaces/osiria/classifier-zero-shot-italian/README.md b/spaces/osiria/classifier-zero-shot-italian/README.md deleted file mode 100644 index c2187ec82776f0c78abdfd7236e3cea3e9d7dc51..0000000000000000000000000000000000000000 --- a/spaces/osiria/classifier-zero-shot-italian/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Classifier Zero Shot Italian -emoji: 💧 -colorFrom: gray -colorTo: blue -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/p-baleine/metaanalyser/metaanalyser/paper/arxiv_categories.py b/spaces/p-baleine/metaanalyser/metaanalyser/paper/arxiv_categories.py deleted file mode 100644 index 59db4b5dd4982070140d7a209db6a2945c4be8de..0000000000000000000000000000000000000000 --- a/spaces/p-baleine/metaanalyser/metaanalyser/paper/arxiv_categories.py +++ /dev/null @@ -1,172 +0,0 @@ -# https://arxiv.org/category_taxonomy をスクレイピングして取得した -# TODO: 新規に追加されるものに対応する -CATEGORY_NAME_ID_MAP = { - 'cs.AI': 'Artificial Intelligence', - 'cs.AR': 'Hardware Architecture', - 'cs.CC': 'Computational Complexity', - 'cs.CE': 'Computational Engineering, Finance, and Science', - 'cs.CG': 'Computational Geometry', - 'cs.CL': 'Computation and Language', - 'cs.CR': 'Cryptography and Security', - 'cs.CV': 'Computer Vision and Pattern Recognition', - 'cs.CY': 'Computers and Society', - 'cs.DB': 'Databases', - 'cs.DC': 'Distributed, Parallel, and Cluster Computing', - 'cs.DL': 'Digital Libraries', - 'cs.DM': 'Discrete Mathematics', - 'cs.DS': 'Data Structures and Algorithms', - 'cs.ET': 'Emerging Technologies', - 'cs.FL': 'Formal Languages and Automata Theory', - 'cs.GL': 'General Literature', - 'cs.GR': 'Graphics', - 'cs.GT': 'Computer Science and Game Theory', - 'cs.HC': 'Human-Computer Interaction', - 'cs.IR': 'Information Retrieval', - 'cs.IT': 'Information Theory', - 'cs.LG': 'Machine Learning', - 'cs.LO': 'Logic in Computer Science', - 'cs.MA': 'Multiagent Systems', - 'cs.MM': 'Multimedia', - 'cs.MS': 'Mathematical Software', - 'cs.NA': 'Numerical Analysis', - 'cs.NE': 'Neural and Evolutionary Computing', - 'cs.NI': 'Networking and Internet Architecture', - 'cs.OH': 'Other Computer Science', - 'cs.OS': 'Operating Systems', - 'cs.PF': 'Performance', - 'cs.PL': 'Programming Languages', - 'cs.RO': 'Robotics', - 'cs.SC': 'Symbolic Computation', - 'cs.SD': 'Sound', - 'cs.SE': 'Software Engineering', - 'cs.SI': 'Social and Information Networks', - 'cs.SY': 'Systems and Control', - 'econ.EM': 'Econometrics', - 'econ.GN': 'General Economics', - 'econ.TH': 'Theoretical Economics', - 'eess.AS': 'Audio and Speech Processing', - 'eess.IV': 'Image and Video Processing', - 'eess.SP': 'Signal Processing', - 'eess.SY': 'Systems and Control', - 'math.AC': 'Commutative Algebra', - 'math.AG': 'Algebraic Geometry', - 'math.AP': 'Analysis of PDEs', - 'math.AT': 'Algebraic Topology', - 'math.CA': 'Classical Analysis and ODEs', - 'math.CO': 'Combinatorics', - 'math.CT': 'Category Theory', - 'math.CV': 'Complex Variables', - 'math.DG': 'Differential Geometry', - 'math.DS': 'Dynamical Systems', - 'math.FA': 'Functional Analysis', - 'math.GM': 'General Mathematics', - 'math.GN': 'General Topology', - 'math.GR': 'Group Theory', - 'math.GT': 'Geometric Topology', - 'math.HO': 'History and Overview', - 'math.IT': 'Information Theory', - 'math.KT': 'K-Theory and Homology', - 'math.LO': 'Logic', - 'math.MG': 'Metric Geometry', - 'math.MP': 'Mathematical Physics', - 'math.NA': 'Numerical Analysis', - 'math.NT': 'Number Theory', - 'math.OA': 'Operator Algebras', - 'math.OC': 'Optimization and Control', - 'math.PR': 'Probability', - 'math.QA': 'Quantum Algebra', - 'math.RA': 'Rings and Algebras', - 'math.RT': 'Representation Theory', - 'math.SG': 'Symplectic Geometry', - 'math.SP': 'Spectral Theory', - 'math.ST': 'Statistics Theory', - 'Astrophysics': 'astro-ph', - 'astro-ph.CO': 'Cosmology and Nongalactic Astrophysics', - 'astro-ph.EP': 'Earth and Planetary Astrophysics', - 'astro-ph.GA': 'Astrophysics of Galaxies', - 'astro-ph.HE': 'High Energy Astrophysical Phenomena', - 'astro-ph.IM': 'Instrumentation and Methods for Astrophysics', - 'astro-ph.SR': 'Solar and Stellar Astrophysics', - 'Condensed Matter': 'cond-mat', - 'cond-mat.dis-nn': 'Disordered Systems and Neural Networks', - 'cond-mat.mes-hall': 'Mesoscale and Nanoscale Physics', - 'cond-mat.mtrl-sci': 'Materials Science', - 'cond-mat.other': 'Other Condensed Matter', - 'cond-mat.quant-gas': 'Quantum Gases', - 'cond-mat.soft': 'Soft Condensed Matter', - 'cond-mat.stat-mech': 'Statistical Mechanics', - 'cond-mat.str-el': 'Strongly Correlated Electrons', - 'cond-mat.supr-con': 'Superconductivity', - 'General Relativity and Quantum Cosmology': 'gr-qc', - 'gr-qc': 'General Relativity and Quantum Cosmology', - 'High Energy Physics - Experiment': 'hep-ex', - 'hep-ex': 'High Energy Physics - Experiment', - 'High Energy Physics - Lattice': 'hep-lat', - 'hep-lat': 'High Energy Physics - Lattice', - 'High Energy Physics - Phenomenology': 'hep-ph', - 'hep-ph': 'High Energy Physics - Phenomenology', - 'High Energy Physics - Theory': 'hep-th', - 'hep-th': 'High Energy Physics - Theory', - 'Mathematical Physics': 'math-ph', - 'math-ph': 'Mathematical Physics', - 'Nonlinear Sciences': 'nlin', - 'nlin.AO': 'Adaptation and Self-Organizing Systems', - 'nlin.CD': 'Chaotic Dynamics', - 'nlin.CG': 'Cellular Automata and Lattice Gases', - 'nlin.PS': 'Pattern Formation and Solitons', - 'nlin.SI': 'Exactly Solvable and Integrable Systems', - 'Nuclear Experiment': 'nucl-ex', - 'nucl-ex': 'Nuclear Experiment', - 'Nuclear Theory': 'nucl-th', - 'nucl-th': 'Nuclear Theory', - 'Physics': 'physics', - 'physics.acc-ph': 'Accelerator Physics', - 'physics.ao-ph': 'Atmospheric and Oceanic Physics', - 'physics.app-ph': 'Applied Physics', - 'physics.atm-clus': 'Atomic and Molecular Clusters', - 'physics.atom-ph': 'Atomic Physics', - 'physics.bio-ph': 'Biological Physics', - 'physics.chem-ph': 'Chemical Physics', - 'physics.class-ph': 'Classical Physics', - 'physics.comp-ph': 'Computational Physics', - 'physics.data-an': 'Data Analysis, Statistics and Probability', - 'physics.ed-ph': 'Physics Education', - 'physics.flu-dyn': 'Fluid Dynamics', - 'physics.gen-ph': 'General Physics', - 'physics.geo-ph': 'Geophysics', - 'physics.hist-ph': 'History and Philosophy of Physics', - 'physics.ins-det': 'Instrumentation and Detectors', - 'physics.med-ph': 'Medical Physics', - 'physics.optics': 'Optics', - 'physics.plasm-ph': 'Plasma Physics', - 'physics.pop-ph': 'Popular Physics', - 'physics.soc-ph': 'Physics and Society', - 'physics.space-ph': 'Space Physics', - 'Quantum Physics': 'quant-ph', - 'quant-ph': 'Quantum Physics', - 'q-bio.BM': 'Biomolecules', - 'q-bio.CB': 'Cell Behavior', - 'q-bio.GN': 'Genomics', - 'q-bio.MN': 'Molecular Networks', - 'q-bio.NC': 'Neurons and Cognition', - 'q-bio.OT': 'Other Quantitative Biology', - 'q-bio.PE': 'Populations and Evolution', - 'q-bio.QM': 'Quantitative Methods', - 'q-bio.SC': 'Subcellular Processes', - 'q-bio.TO': 'Tissues and Organs', - 'q-fin.CP': 'Computational Finance', - 'q-fin.EC': 'Economics', - 'q-fin.GN': 'General Finance', - 'q-fin.MF': 'Mathematical Finance', - 'q-fin.PM': 'Portfolio Management', - 'q-fin.PR': 'Pricing of Securities', - 'q-fin.RM': 'Risk Management', - 'q-fin.ST': 'Statistical Finance', - 'q-fin.TR': 'Trading and Market Microstructure', - 'stat.AP': 'Applications', - 'stat.CO': 'Computation', - 'stat.ME': 'Methodology', - 'stat.ML': 'Machine Learning', - 'stat.OT': 'Other Statistics', - 'stat.TH': 'Statistics Theory' -} diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/midas/util.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/midas/util.py deleted file mode 100644 index 7cde937016b7a24b4081dc0565b53c16a87939d2..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/midas/util.py +++ /dev/null @@ -1,34 +0,0 @@ -import numpy as np -import cv2 - - -def HWC3(x): - assert x.dtype == np.uint8 - if x.ndim == 2: - x = x[:, :, None] - assert x.ndim == 3 - H, W, C = x.shape - assert C == 1 or C == 3 or C == 4 - if C == 3: - return x - if C == 1: - return np.concatenate([x, x, x], axis=2) - if C == 4: - color = x[:, :, 0:3].astype(np.float32) - alpha = x[:, :, 3:4].astype(np.float32) / 255.0 - y = color * alpha + 255.0 * (1.0 - alpha) - y = y.clip(0, 255).astype(np.uint8) - return y - - -def resize_image(input_image, resolution): - H, W, C = input_image.shape - H = float(H) - W = float(W) - k = float(resolution) / min(H, W) - H *= k - W *= k - H = int(np.round(H / 64.0)) * 64 - W = int(np.round(W / 64.0)) * 64 - img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA) - return img diff --git a/spaces/paimeng/anime-remove-background/app.py b/spaces/paimeng/anime-remove-background/app.py deleted file mode 100644 index 230a0d5f8a3da6ab18ecb8db1cd90016a489b96a..0000000000000000000000000000000000000000 --- a/spaces/paimeng/anime-remove-background/app.py +++ /dev/null @@ -1,52 +0,0 @@ -import gradio as gr -import huggingface_hub -import onnxruntime as rt -import numpy as np -import cv2 - - -def get_mask(img, s=1024): - img = (img / 255).astype(np.float32) - h, w = h0, w0 = img.shape[:-1] - h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s) - ph, pw = s - h, s - w - img_input = np.zeros([s, s, 3], dtype=np.float32) - img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(img, (w, h)) - img_input = np.transpose(img_input, (2, 0, 1)) - img_input = img_input[np.newaxis, :] - mask = rmbg_model.run(None, {'img': img_input})[0][0] - mask = np.transpose(mask, (1, 2, 0)) - mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] - mask = cv2.resize(mask, (w0, h0))[:, :, np.newaxis] - return mask - - -def rmbg_fn(img): - mask = get_mask(img) - img = (mask * img + 255 * (1 - mask)).astype(np.uint8) - mask = (mask * 255).astype(np.uint8) - img = np.concatenate([img, mask], axis=2, dtype=np.uint8) - mask = mask.repeat(3, axis=2) - return mask, img - - -if __name__ == "__main__": - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] - model_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx") - rmbg_model = rt.InferenceSession(model_path, providers=providers) - app = gr.Blocks() - with app: - gr.Markdown("# Anime Remove Background\n\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=skytnt.animeseg)\n\n" - "demo for [https://github.com/SkyTNT/anime-segmentation/](https://github.com/SkyTNT/anime-segmentation/)") - with gr.Row(): - with gr.Column(): - input_img = gr.Image(label="input image") - examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)] - examples = gr.Dataset(components=[input_img], samples=examples_data) - run_btn = gr.Button(variant="primary") - output_mask = gr.Image(label="mask") - output_img = gr.Image(label="result", image_mode="RGBA") - examples.click(lambda x: x[0], [examples], [input_img]) - run_btn.click(rmbg_fn, [input_img], [output_mask, output_img]) - app.launch() diff --git a/spaces/pamixsun/glaucoma_screening/utils.py b/spaces/pamixsun/glaucoma_screening/utils.py deleted file mode 100644 index 6cd8576657ca4addecb103c437b3cda1e5c65c8f..0000000000000000000000000000000000000000 --- a/spaces/pamixsun/glaucoma_screening/utils.py +++ /dev/null @@ -1,45 +0,0 @@ - -import numpy as np - - -def add_mask(image, label, vis_labels, colors, alpha=0.5): - - if len(image.shape) < 3 or image.shape[2] == 1: - image = image.repeat(3).reshape((image.shape[0], image.shape[1], 3)).astype(np.uint8) - ori_image = image.copy() - - for ci, vis_label in enumerate(vis_labels): - color = colors[ci] - mask = label == vis_label - for i in range(3): - image[:, :, i][mask] = (color[i] * alpha + image[:, :, i][mask] * (1 - alpha)).astype(np.uint8) - - return ori_image, image - - -def find_haight(mask): - - v_sum = (np.sum(mask, axis=1) > 0).astype(np.uint8) - - v_diff = np.diff(np.hstack((0, v_sum, 0))) - - v_min_y = np.where(v_diff > 0)[0][0] - v_max_y = np.where(v_diff < 0)[0][-1] - 1 - - return v_max_y - v_min_y - - -def simple_vcdr(mask): - - disc_mask = (mask > 0).astype(np.uint8) - disc_height = find_haight(disc_mask) - - cup_mask = (mask > 1).astype(np.uint8) - cup_height = find_haight(cup_mask) - - vcdr = cup_height / disc_height - - return vcdr - - - diff --git a/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/segmodel/resnet.py b/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/segmodel/resnet.py deleted file mode 100644 index e86da6e62ed81a2c15a1cd6cce35c6da1f437917..0000000000000000000000000000000000000000 --- a/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/segmodel/resnet.py +++ /dev/null @@ -1,235 +0,0 @@ -import os -import sys -import torch -import torch.nn as nn -import math -try: - from lib.nn import SynchronizedBatchNorm2d -except ImportError: - from torch.nn import BatchNorm2d as SynchronizedBatchNorm2d - -try: - from urllib import urlretrieve -except ImportError: - from urllib.request import urlretrieve - - -__all__ = ['ResNet', 'resnet18', 'resnet50', 'resnet101'] # resnet101 is coming soon! - - -model_urls = { - 'resnet18': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet18-imagenet.pth', - 'resnet50': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet50-imagenet.pth', - 'resnet101': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet101-imagenet.pth' -} - - -def conv3x3(in_planes, out_planes, stride=1): - "3x3 convolution with padding" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=1, bias=False) - - -class BasicBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None): - super(BasicBlock, self).__init__() - self.conv1 = conv3x3(inplanes, planes, stride) - self.bn1 = SynchronizedBatchNorm2d(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = SynchronizedBatchNorm2d(planes) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class Bottleneck(nn.Module): - expansion = 4 - - def __init__(self, inplanes, planes, stride=1, downsample=None): - super(Bottleneck, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) - self.bn1 = SynchronizedBatchNorm2d(planes) - self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, - padding=1, bias=False) - self.bn2 = SynchronizedBatchNorm2d(planes) - self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) - self.bn3 = SynchronizedBatchNorm2d(planes * 4) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class ResNet(nn.Module): - - def __init__(self, block, layers, num_classes=1000): - self.inplanes = 128 - super(ResNet, self).__init__() - self.conv1 = conv3x3(3, 64, stride=2) - self.bn1 = SynchronizedBatchNorm2d(64) - self.relu1 = nn.ReLU(inplace=True) - self.conv2 = conv3x3(64, 64) - self.bn2 = SynchronizedBatchNorm2d(64) - self.relu2 = nn.ReLU(inplace=True) - self.conv3 = conv3x3(64, 128) - self.bn3 = SynchronizedBatchNorm2d(128) - self.relu3 = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - self.layer1 = self._make_layer(block, 64, layers[0]) - self.layer2 = self._make_layer(block, 128, layers[1], stride=2) - self.layer3 = self._make_layer(block, 256, layers[2], stride=2) - self.layer4 = self._make_layer(block, 512, layers[3], stride=2) - self.avgpool = nn.AvgPool2d(7, stride=1) - self.fc = nn.Linear(512 * block.expansion, num_classes) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - m.weight.data.normal_(0, math.sqrt(2. / n)) - elif isinstance(m, SynchronizedBatchNorm2d): - m.weight.data.fill_(1) - m.bias.data.zero_() - - def _make_layer(self, block, planes, blocks, stride=1): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d(self.inplanes, planes * block.expansion, - kernel_size=1, stride=stride, bias=False), - SynchronizedBatchNorm2d(planes * block.expansion), - ) - - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample)) - self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(self.inplanes, planes)) - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.relu1(self.bn1(self.conv1(x))) - x = self.relu2(self.bn2(self.conv2(x))) - x = self.relu3(self.bn3(self.conv3(x))) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - - x = self.avgpool(x) - x = x.view(x.size(0), -1) - x = self.fc(x) - - return x - -def resnet18(pretrained=False, **kwargs): - """Constructs a ResNet-18 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) - if pretrained: - model.load_state_dict(load_url(model_urls['resnet18'])) - return model - -''' -def resnet34(pretrained=False, **kwargs): - """Constructs a ResNet-34 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) - if pretrained: - model.load_state_dict(load_url(model_urls['resnet34'])) - return model -''' - -def resnet50(pretrained=False, **kwargs): - """Constructs a ResNet-50 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) - if pretrained: - model.load_state_dict(load_url(model_urls['resnet50']), strict=False) - return model - - -def resnet101(pretrained=False, **kwargs): - """Constructs a ResNet-101 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) - if pretrained: - model.load_state_dict(load_url(model_urls['resnet101']), strict=False) - return model - -# def resnet152(pretrained=False, **kwargs): -# """Constructs a ResNet-152 model. -# -# Args: -# pretrained (bool): If True, returns a model pre-trained on ImageNet -# """ -# model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) -# if pretrained: -# model.load_state_dict(load_url(model_urls['resnet152'])) -# return model - -def load_url(url, model_dir='./pretrained', map_location=None): - if not os.path.exists(model_dir): - os.makedirs(model_dir) - filename = url.split('/')[-1] - cached_file = os.path.join(model_dir, filename) - if not os.path.exists(cached_file): - sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file)) - urlretrieve(url, cached_file) - return torch.load(cached_file, map_location=map_location) diff --git a/spaces/pinecone/find-your-celebrity-match/app.py b/spaces/pinecone/find-your-celebrity-match/app.py deleted file mode 100644 index e7df1bca229d1a027ae84d026c0b900bb7002c12..0000000000000000000000000000000000000000 --- a/spaces/pinecone/find-your-celebrity-match/app.py +++ /dev/null @@ -1,53 +0,0 @@ -import pinecone -from PIL import Image -import streamlit as st -from facenet import FacenetEmbedder -from jinja2 import Environment, FileSystemLoader - - -st.markdown("

      ⚡️ Find Your Celebrity Match ⚡️

      ", unsafe_allow_html=True) -st.markdown("

      Take a picture to find your match, learn how it works here.

      ", unsafe_allow_html=True) - -def local_css(file_name): - st.markdown('', unsafe_allow_html=True) - with open(file_name) as f: - st.markdown(f'', unsafe_allow_html=True) - -@st.experimental_singleton -def init_template(): - local_css("style.css") - environment = Environment(loader=FileSystemLoader("templates/")) - template = environment.get_template("card.html") - return template - -PINECONE_KEY = st.secrets["PINECONE_KEY"] - -@st.experimental_singleton -def init_pinecone(): - pinecone.init(api_key=PINECONE_KEY, environment="us-west1-gcp") # get a free api key from app.pinecone.io - return pinecone.GRPCIndex("celebrity-match") - - -@st.experimental_singleton -def init_facenet(): - facenet = FacenetEmbedder() - return facenet - - -template = init_template() -index = init_pinecone() -facenet = init_facenet() - -col1, col2, col3 = st.columns([0.2, 2, 0.2]) - -with col2: img_file_buffer = st.camera_input("") - -if img_file_buffer is not None: - img = Image.open(img_file_buffer) - emb = facenet.encode([img]) - if emb: - result = index.query(emb[0], top_k=3, include_metadata=True) - cards = template.render(results=result["matches"]) - st.markdown(f"
      {cards}
      ", unsafe_allow_html=True) - else: - with col2: st.error('Oops! No face was detected, try again.', icon="🚨") diff --git a/spaces/pknez/face-swap-docker/clip/vitseg.py b/spaces/pknez/face-swap-docker/clip/vitseg.py deleted file mode 100644 index ed621431ddf930fcfa27b5929999776b96fede63..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/clip/vitseg.py +++ /dev/null @@ -1,286 +0,0 @@ -import math -from posixpath import basename, dirname, join -# import clip -from clip.model import convert_weights -import torch -import json -from torch import nn -from torch.nn import functional as nnf -from torch.nn.modules import activation -from torch.nn.modules.activation import ReLU -from torchvision import transforms - -normalize = transforms.Normalize(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711)) - -from torchvision.models import ResNet - - -def process_prompts(conditional, prompt_list, conditional_map): - # DEPRECATED - - # randomly sample a synonym - words = [conditional_map[int(i)] for i in conditional] - words = [syns[torch.multinomial(torch.ones(len(syns)), 1, replacement=True).item()] for syns in words] - words = [w.replace('_', ' ') for w in words] - - if prompt_list is not None: - prompt_indices = torch.multinomial(torch.ones(len(prompt_list)), len(words), replacement=True) - prompts = [prompt_list[i] for i in prompt_indices] - else: - prompts = ['a photo of {}'] * (len(words)) - - return [promt.format(w) for promt, w in zip(prompts, words)] - - -class VITDenseBase(nn.Module): - - def rescaled_pos_emb(self, new_size): - assert len(new_size) == 2 - - a = self.model.positional_embedding[1:].T.view(1, 768, *self.token_shape) - b = nnf.interpolate(a, new_size, mode='bicubic', align_corners=False).squeeze(0).view(768, new_size[0]*new_size[1]).T - return torch.cat([self.model.positional_embedding[:1], b]) - - def visual_forward(self, x_inp, extract_layers=(), skip=False, mask=None): - - with torch.no_grad(): - - x_inp = nnf.interpolate(x_inp, (384, 384)) - - x = self.model.patch_embed(x_inp) - cls_token = self.model.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks - if self.model.dist_token is None: - x = torch.cat((cls_token, x), dim=1) - else: - x = torch.cat((cls_token, self.model.dist_token.expand(x.shape[0], -1, -1), x), dim=1) - x = self.model.pos_drop(x + self.model.pos_embed) - - activations = [] - for i, block in enumerate(self.model.blocks): - x = block(x) - - if i in extract_layers: - # permute to be compatible with CLIP - activations += [x.permute(1,0,2)] - - x = self.model.norm(x) - x = self.model.head(self.model.pre_logits(x[:, 0])) - - # again for CLIP compatibility - # x = x.permute(1, 0, 2) - - return x, activations, None - - def sample_prompts(self, words, prompt_list=None): - - prompt_list = prompt_list if prompt_list is not None else self.prompt_list - - prompt_indices = torch.multinomial(torch.ones(len(prompt_list)), len(words), replacement=True) - prompts = [prompt_list[i] for i in prompt_indices] - return [promt.format(w) for promt, w in zip(prompts, words)] - - def get_cond_vec(self, conditional, batch_size): - # compute conditional from a single string - if conditional is not None and type(conditional) == str: - cond = self.compute_conditional(conditional) - cond = cond.repeat(batch_size, 1) - - # compute conditional from string list/tuple - elif conditional is not None and type(conditional) in {list, tuple} and type(conditional[0]) == str: - assert len(conditional) == batch_size - cond = self.compute_conditional(conditional) - - # use conditional directly - elif conditional is not None and type(conditional) == torch.Tensor and conditional.ndim == 2: - cond = conditional - - # compute conditional from image - elif conditional is not None and type(conditional) == torch.Tensor: - with torch.no_grad(): - cond, _, _ = self.visual_forward(conditional) - else: - raise ValueError('invalid conditional') - return cond - - def compute_conditional(self, conditional): - import clip - - dev = next(self.parameters()).device - - if type(conditional) in {list, tuple}: - text_tokens = clip.tokenize(conditional).to(dev) - cond = self.clip_model.encode_text(text_tokens) - else: - if conditional in self.precomputed_prompts: - cond = self.precomputed_prompts[conditional].float().to(dev) - else: - text_tokens = clip.tokenize([conditional]).to(dev) - cond = self.clip_model.encode_text(text_tokens)[0] - - return cond - - -class VITDensePredT(VITDenseBase): - - def __init__(self, extract_layers=(3, 6, 9), cond_layer=0, reduce_dim=128, n_heads=4, prompt='fixed', - depth=3, extra_blocks=0, reduce_cond=None, fix_shift=False, - learn_trans_conv_only=False, refine=None, limit_to_clip_only=False, upsample=False, - add_calibration=False, process_cond=None, not_pretrained=False): - super().__init__() - # device = 'cpu' - - self.extract_layers = extract_layers - self.cond_layer = cond_layer - self.limit_to_clip_only = limit_to_clip_only - self.process_cond = None - - if add_calibration: - self.calibration_conds = 1 - - self.upsample_proj = nn.Conv2d(reduce_dim, 1, kernel_size=1) if upsample else None - - self.add_activation1 = True - - import timm - self.model = timm.create_model('vit_base_patch16_384', pretrained=True) - self.model.head = nn.Linear(768, 512 if reduce_cond is None else reduce_cond) - - for p in self.model.parameters(): - p.requires_grad_(False) - - import clip - self.clip_model, _ = clip.load('ViT-B/16', device='cpu', jit=False) - # del self.clip_model.visual - - - self.token_shape = (14, 14) - - # conditional - if reduce_cond is not None: - self.reduce_cond = nn.Linear(512, reduce_cond) - for p in self.reduce_cond.parameters(): - p.requires_grad_(False) - else: - self.reduce_cond = None - - # self.film = AVAILABLE_BLOCKS['film'](512, 128) - self.film_mul = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim) - self.film_add = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim) - - # DEPRECATED - # self.conditional_map = {c['id']: c['synonyms'] for c in json.load(open(cond_map))} - - assert len(self.extract_layers) == depth - - self.reduces = nn.ModuleList([nn.Linear(768, reduce_dim) for _ in range(depth)]) - self.blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(len(self.extract_layers))]) - self.extra_blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(extra_blocks)]) - - trans_conv_ks = (16, 16) - self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks) - - # refinement and trans conv - - if learn_trans_conv_only: - for p in self.parameters(): - p.requires_grad_(False) - - for p in self.trans_conv.parameters(): - p.requires_grad_(True) - - if prompt == 'fixed': - self.prompt_list = ['a photo of a {}.'] - elif prompt == 'shuffle': - self.prompt_list = ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.'] - elif prompt == 'shuffle+': - self.prompt_list = ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.', - 'a cropped photo of a {}.', 'a good photo of a {}.', 'a photo of one {}.', - 'a bad photo of a {}.', 'a photo of the {}.'] - elif prompt == 'shuffle_clip': - from models.clip_prompts import imagenet_templates - self.prompt_list = imagenet_templates - - if process_cond is not None: - if process_cond == 'clamp' or process_cond[0] == 'clamp': - - val = process_cond[1] if type(process_cond) in {list, tuple} else 0.2 - - def clamp_vec(x): - return torch.clamp(x, -val, val) - - self.process_cond = clamp_vec - - elif process_cond.endswith('.pth'): - - shift = torch.load(process_cond) - def add_shift(x): - return x + shift.to(x.device) - - self.process_cond = add_shift - - import pickle - precomp = pickle.load(open('precomputed_prompt_vectors.pickle', 'rb')) - self.precomputed_prompts = {k: torch.from_numpy(v) for k, v in precomp.items()} - - - def forward(self, inp_image, conditional=None, return_features=False, mask=None): - - assert type(return_features) == bool - - # inp_image = inp_image.to(self.model.positional_embedding.device) - - if mask is not None: - raise ValueError('mask not supported') - - # x_inp = normalize(inp_image) - x_inp = inp_image - - bs, dev = inp_image.shape[0], x_inp.device - - inp_image_size = inp_image.shape[2:] - - cond = self.get_cond_vec(conditional, bs) - - visual_q, activations, _ = self.visual_forward(x_inp, extract_layers=[0] + list(self.extract_layers)) - - activation1 = activations[0] - activations = activations[1:] - - a = None - for i, (activation, block, reduce) in enumerate(zip(activations[::-1], self.blocks, self.reduces)): - - if a is not None: - a = reduce(activation) + a - else: - a = reduce(activation) - - if i == self.cond_layer: - if self.reduce_cond is not None: - cond = self.reduce_cond(cond) - - a = self.film_mul(cond) * a + self.film_add(cond) - - a = block(a) - - for block in self.extra_blocks: - a = a + block(a) - - a = a[1:].permute(1, 2, 0) # rm cls token and -> BS, Feats, Tokens - - size = int(math.sqrt(a.shape[2])) - - a = a.view(bs, a.shape[1], size, size) - - if self.trans_conv is not None: - a = self.trans_conv(a) - - if self.upsample_proj is not None: - a = self.upsample_proj(a) - a = nnf.interpolate(a, x_inp.shape[2:], mode='bilinear') - - a = nnf.interpolate(a, inp_image_size) - - if return_features: - return a, visual_q, cond, [activation1] + activations - else: - return a, diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py deleted file mode 100644 index d5b238608b2af459e3db803edbe1b23a7955df7b..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py +++ /dev/null @@ -1,299 +0,0 @@ -import functools -import logging -import os -from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast - -from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.resolvelib import BaseReporter, ResolutionImpossible -from pip._vendor.resolvelib import Resolver as RLResolver -from pip._vendor.resolvelib.structs import DirectedGraph - -from pip._internal.cache import WheelCache -from pip._internal.index.package_finder import PackageFinder -from pip._internal.operations.prepare import RequirementPreparer -from pip._internal.req.req_install import InstallRequirement -from pip._internal.req.req_set import RequirementSet -from pip._internal.resolution.base import BaseResolver, InstallRequirementProvider -from pip._internal.resolution.resolvelib.provider import PipProvider -from pip._internal.resolution.resolvelib.reporter import ( - PipDebuggingReporter, - PipReporter, -) - -from .base import Candidate, Requirement -from .factory import Factory - -if TYPE_CHECKING: - from pip._vendor.resolvelib.resolvers import Result as RLResult - - Result = RLResult[Requirement, Candidate, str] - - -logger = logging.getLogger(__name__) - - -class Resolver(BaseResolver): - _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"} - - def __init__( - self, - preparer: RequirementPreparer, - finder: PackageFinder, - wheel_cache: Optional[WheelCache], - make_install_req: InstallRequirementProvider, - use_user_site: bool, - ignore_dependencies: bool, - ignore_installed: bool, - ignore_requires_python: bool, - force_reinstall: bool, - upgrade_strategy: str, - py_version_info: Optional[Tuple[int, ...]] = None, - ): - super().__init__() - assert upgrade_strategy in self._allowed_strategies - - self.factory = Factory( - finder=finder, - preparer=preparer, - make_install_req=make_install_req, - wheel_cache=wheel_cache, - use_user_site=use_user_site, - force_reinstall=force_reinstall, - ignore_installed=ignore_installed, - ignore_requires_python=ignore_requires_python, - py_version_info=py_version_info, - ) - self.ignore_dependencies = ignore_dependencies - self.upgrade_strategy = upgrade_strategy - self._result: Optional[Result] = None - - def resolve( - self, root_reqs: List[InstallRequirement], check_supported_wheels: bool - ) -> RequirementSet: - collected = self.factory.collect_root_requirements(root_reqs) - provider = PipProvider( - factory=self.factory, - constraints=collected.constraints, - ignore_dependencies=self.ignore_dependencies, - upgrade_strategy=self.upgrade_strategy, - user_requested=collected.user_requested, - ) - if "PIP_RESOLVER_DEBUG" in os.environ: - reporter: BaseReporter = PipDebuggingReporter() - else: - reporter = PipReporter() - resolver: RLResolver[Requirement, Candidate, str] = RLResolver( - provider, - reporter, - ) - - try: - limit_how_complex_resolution_can_be = 200000 - result = self._result = resolver.resolve( - collected.requirements, max_rounds=limit_how_complex_resolution_can_be - ) - - except ResolutionImpossible as e: - error = self.factory.get_installation_error( - cast("ResolutionImpossible[Requirement, Candidate]", e), - collected.constraints, - ) - raise error from e - - req_set = RequirementSet(check_supported_wheels=check_supported_wheels) - for candidate in result.mapping.values(): - ireq = candidate.get_install_requirement() - if ireq is None: - continue - - # Check if there is already an installation under the same name, - # and set a flag for later stages to uninstall it, if needed. - installed_dist = self.factory.get_dist_to_uninstall(candidate) - if installed_dist is None: - # There is no existing installation -- nothing to uninstall. - ireq.should_reinstall = False - elif self.factory.force_reinstall: - # The --force-reinstall flag is set -- reinstall. - ireq.should_reinstall = True - elif installed_dist.version != candidate.version: - # The installation is different in version -- reinstall. - ireq.should_reinstall = True - elif candidate.is_editable or installed_dist.editable: - # The incoming distribution is editable, or different in - # editable-ness to installation -- reinstall. - ireq.should_reinstall = True - elif candidate.source_link and candidate.source_link.is_file: - # The incoming distribution is under file:// - if candidate.source_link.is_wheel: - # is a local wheel -- do nothing. - logger.info( - "%s is already installed with the same version as the " - "provided wheel. Use --force-reinstall to force an " - "installation of the wheel.", - ireq.name, - ) - continue - - # is a local sdist or path -- reinstall - ireq.should_reinstall = True - else: - continue - - link = candidate.source_link - if link and link.is_yanked: - # The reason can contain non-ASCII characters, Unicode - # is required for Python 2. - msg = ( - "The candidate selected for download or install is a " - "yanked version: {name!r} candidate (version {version} " - "at {link})\nReason for being yanked: {reason}" - ).format( - name=candidate.name, - version=candidate.version, - link=link, - reason=link.yanked_reason or "", - ) - logger.warning(msg) - - req_set.add_named_requirement(ireq) - - reqs = req_set.all_requirements - self.factory.preparer.prepare_linked_requirements_more(reqs) - for req in reqs: - req.prepared = True - req.needs_more_preparation = False - return req_set - - def get_installation_order( - self, req_set: RequirementSet - ) -> List[InstallRequirement]: - """Get order for installation of requirements in RequirementSet. - - The returned list contains a requirement before another that depends on - it. This helps ensure that the environment is kept consistent as they - get installed one-by-one. - - The current implementation creates a topological ordering of the - dependency graph, giving more weight to packages with less - or no dependencies, while breaking any cycles in the graph at - arbitrary points. We make no guarantees about where the cycle - would be broken, other than it *would* be broken. - """ - assert self._result is not None, "must call resolve() first" - - if not req_set.requirements: - # Nothing is left to install, so we do not need an order. - return [] - - graph = self._result.graph - weights = get_topological_weights(graph, set(req_set.requirements.keys())) - - sorted_items = sorted( - req_set.requirements.items(), - key=functools.partial(_req_set_item_sorter, weights=weights), - reverse=True, - ) - return [ireq for _, ireq in sorted_items] - - -def get_topological_weights( - graph: "DirectedGraph[Optional[str]]", requirement_keys: Set[str] -) -> Dict[Optional[str], int]: - """Assign weights to each node based on how "deep" they are. - - This implementation may change at any point in the future without prior - notice. - - We first simplify the dependency graph by pruning any leaves and giving them - the highest weight: a package without any dependencies should be installed - first. This is done again and again in the same way, giving ever less weight - to the newly found leaves. The loop stops when no leaves are left: all - remaining packages have at least one dependency left in the graph. - - Then we continue with the remaining graph, by taking the length for the - longest path to any node from root, ignoring any paths that contain a single - node twice (i.e. cycles). This is done through a depth-first search through - the graph, while keeping track of the path to the node. - - Cycles in the graph result would result in node being revisited while also - being on its own path. In this case, take no action. This helps ensure we - don't get stuck in a cycle. - - When assigning weight, the longer path (i.e. larger length) is preferred. - - We are only interested in the weights of packages that are in the - requirement_keys. - """ - path: Set[Optional[str]] = set() - weights: Dict[Optional[str], int] = {} - - def visit(node: Optional[str]) -> None: - if node in path: - # We hit a cycle, so we'll break it here. - return - - # Time to visit the children! - path.add(node) - for child in graph.iter_children(node): - visit(child) - path.remove(node) - - if node not in requirement_keys: - return - - last_known_parent_count = weights.get(node, 0) - weights[node] = max(last_known_parent_count, len(path)) - - # Simplify the graph, pruning leaves that have no dependencies. - # This is needed for large graphs (say over 200 packages) because the - # `visit` function is exponentially slower then, taking minutes. - # See https://github.com/pypa/pip/issues/10557 - # We will loop until we explicitly break the loop. - while True: - leaves = set() - for key in graph: - if key is None: - continue - for _child in graph.iter_children(key): - # This means we have at least one child - break - else: - # No child. - leaves.add(key) - if not leaves: - # We are done simplifying. - break - # Calculate the weight for the leaves. - weight = len(graph) - 1 - for leaf in leaves: - if leaf not in requirement_keys: - continue - weights[leaf] = weight - # Remove the leaves from the graph, making it simpler. - for leaf in leaves: - graph.remove(leaf) - - # Visit the remaining graph. - # `None` is guaranteed to be the root node by resolvelib. - visit(None) - - # Sanity check: all requirement keys should be in the weights, - # and no other keys should be in the weights. - difference = set(weights.keys()).difference(requirement_keys) - assert not difference, difference - - return weights - - -def _req_set_item_sorter( - item: Tuple[str, InstallRequirement], - weights: Dict[Optional[str], int], -) -> Tuple[int, str]: - """Key function used to sort install requirements for installation. - - Based on the "weight" mapping calculated in ``get_installation_order()``. - The canonical package name is returned as the second member as a tie- - breaker to ensure the result is predictable, which is useful in tests. - """ - name = canonicalize_name(item[0]) - return weights[name], name diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/__init__.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/__init__.py deleted file mode 100644 index 10ff67ff4d2bca253a91e4e6461ad096b41da03a..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/__init__.py +++ /dev/null @@ -1,182 +0,0 @@ -# __ -# /__) _ _ _ _ _/ _ -# / ( (- (/ (/ (- _) / _) -# / - -""" -Requests HTTP Library -~~~~~~~~~~~~~~~~~~~~~ - -Requests is an HTTP library, written in Python, for human beings. -Basic GET usage: - - >>> import requests - >>> r = requests.get('https://www.python.org') - >>> r.status_code - 200 - >>> b'Python is a programming language' in r.content - True - -... or POST: - - >>> payload = dict(key1='value1', key2='value2') - >>> r = requests.post('https://httpbin.org/post', data=payload) - >>> print(r.text) - { - ... - "form": { - "key1": "value1", - "key2": "value2" - }, - ... - } - -The other HTTP methods are supported - see `requests.api`. Full documentation -is at . - -:copyright: (c) 2017 by Kenneth Reitz. -:license: Apache 2.0, see LICENSE for more details. -""" - -import warnings - -from pip._vendor import urllib3 - -from .exceptions import RequestsDependencyWarning - -charset_normalizer_version = None - -try: - from pip._vendor.chardet import __version__ as chardet_version -except ImportError: - chardet_version = None - - -def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version): - urllib3_version = urllib3_version.split(".") - assert urllib3_version != ["dev"] # Verify urllib3 isn't installed from git. - - # Sometimes, urllib3 only reports its version as 16.1. - if len(urllib3_version) == 2: - urllib3_version.append("0") - - # Check urllib3 for compatibility. - major, minor, patch = urllib3_version # noqa: F811 - major, minor, patch = int(major), int(minor), int(patch) - # urllib3 >= 1.21.1 - assert major >= 1 - if major == 1: - assert minor >= 21 - - # Check charset_normalizer for compatibility. - if chardet_version: - major, minor, patch = chardet_version.split(".")[:3] - major, minor, patch = int(major), int(minor), int(patch) - # chardet_version >= 3.0.2, < 6.0.0 - assert (3, 0, 2) <= (major, minor, patch) < (6, 0, 0) - elif charset_normalizer_version: - major, minor, patch = charset_normalizer_version.split(".")[:3] - major, minor, patch = int(major), int(minor), int(patch) - # charset_normalizer >= 2.0.0 < 4.0.0 - assert (2, 0, 0) <= (major, minor, patch) < (4, 0, 0) - else: - raise Exception("You need either charset_normalizer or chardet installed") - - -def _check_cryptography(cryptography_version): - # cryptography < 1.3.4 - try: - cryptography_version = list(map(int, cryptography_version.split("."))) - except ValueError: - return - - if cryptography_version < [1, 3, 4]: - warning = "Old version of cryptography ({}) may cause slowdown.".format( - cryptography_version - ) - warnings.warn(warning, RequestsDependencyWarning) - - -# Check imported dependencies for compatibility. -try: - check_compatibility( - urllib3.__version__, chardet_version, charset_normalizer_version - ) -except (AssertionError, ValueError): - warnings.warn( - "urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported " - "version!".format( - urllib3.__version__, chardet_version, charset_normalizer_version - ), - RequestsDependencyWarning, - ) - -# Attempt to enable urllib3's fallback for SNI support -# if the standard library doesn't support SNI or the -# 'ssl' library isn't available. -try: - # Note: This logic prevents upgrading cryptography on Windows, if imported - # as part of pip. - from pip._internal.utils.compat import WINDOWS - if not WINDOWS: - raise ImportError("pip internals: don't import cryptography on Windows") - try: - import ssl - except ImportError: - ssl = None - - if not getattr(ssl, "HAS_SNI", False): - from pip._vendor.urllib3.contrib import pyopenssl - - pyopenssl.inject_into_urllib3() - - # Check cryptography version - from cryptography import __version__ as cryptography_version - - _check_cryptography(cryptography_version) -except ImportError: - pass - -# urllib3's DependencyWarnings should be silenced. -from pip._vendor.urllib3.exceptions import DependencyWarning - -warnings.simplefilter("ignore", DependencyWarning) - -# Set default logging handler to avoid "No handler found" warnings. -import logging -from logging import NullHandler - -from . import packages, utils -from .__version__ import ( - __author__, - __author_email__, - __build__, - __cake__, - __copyright__, - __description__, - __license__, - __title__, - __url__, - __version__, -) -from .api import delete, get, head, options, patch, post, put, request -from .exceptions import ( - ConnectionError, - ConnectTimeout, - FileModeWarning, - HTTPError, - JSONDecodeError, - ReadTimeout, - RequestException, - Timeout, - TooManyRedirects, - URLRequired, -) -from .models import PreparedRequest, Request, Response -from .sessions import Session, session -from .status_codes import codes - -logging.getLogger(__name__).addHandler(NullHandler()) - -# FileModeWarnings go off per the default. -warnings.simplefilter("default", FileModeWarning, append=True) diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/command/install.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/command/install.py deleted file mode 100644 index 73caacb049de312e75b3ff11e885e9ffb0f87b8f..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/command/install.py +++ /dev/null @@ -1,145 +0,0 @@ -from distutils.errors import DistutilsArgError -import inspect -import glob -import platform -import distutils.command.install as orig - -import setuptools -from ..warnings import SetuptoolsDeprecationWarning, SetuptoolsWarning - -# Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for -# now. See https://github.com/pypa/setuptools/issues/199/ -_install = orig.install - - -class install(orig.install): - """Use easy_install to install the package, w/dependencies""" - - user_options = orig.install.user_options + [ - ('old-and-unmanageable', None, "Try not to use this!"), - ('single-version-externally-managed', None, - "used by system package builders to create 'flat' eggs"), - ] - boolean_options = orig.install.boolean_options + [ - 'old-and-unmanageable', 'single-version-externally-managed', - ] - new_commands = [ - ('install_egg_info', lambda self: True), - ('install_scripts', lambda self: True), - ] - _nc = dict(new_commands) - - def initialize_options(self): - SetuptoolsDeprecationWarning.emit( - "setup.py install is deprecated.", - """ - Please avoid running ``setup.py`` directly. - Instead, use pypa/build, pypa/installer or other - standards-based tools. - """, - see_url="https://blog.ganssle.io/articles/2021/10/setup-py-deprecated.html", - # TODO: Document how to bootstrap setuptools without install - # (e.g. by unziping the wheel file) - # and then add a due_date to this warning. - ) - - orig.install.initialize_options(self) - self.old_and_unmanageable = None - self.single_version_externally_managed = None - - def finalize_options(self): - orig.install.finalize_options(self) - if self.root: - self.single_version_externally_managed = True - elif self.single_version_externally_managed: - if not self.root and not self.record: - raise DistutilsArgError( - "You must specify --record or --root when building system" - " packages" - ) - - def handle_extra_path(self): - if self.root or self.single_version_externally_managed: - # explicit backward-compatibility mode, allow extra_path to work - return orig.install.handle_extra_path(self) - - # Ignore extra_path when installing an egg (or being run by another - # command without --root or --single-version-externally-managed - self.path_file = None - self.extra_dirs = '' - - def run(self): - # Explicit request for old-style install? Just do it - if self.old_and_unmanageable or self.single_version_externally_managed: - return orig.install.run(self) - - if not self._called_from_setup(inspect.currentframe()): - # Run in backward-compatibility mode to support bdist_* commands. - orig.install.run(self) - else: - self.do_egg_install() - - @staticmethod - def _called_from_setup(run_frame): - """ - Attempt to detect whether run() was called from setup() or by another - command. If called by setup(), the parent caller will be the - 'run_command' method in 'distutils.dist', and *its* caller will be - the 'run_commands' method. If called any other way, the - immediate caller *might* be 'run_command', but it won't have been - called by 'run_commands'. Return True in that case or if a call stack - is unavailable. Return False otherwise. - """ - if run_frame is None: - msg = "Call stack not available. bdist_* commands may fail." - SetuptoolsWarning.emit(msg) - if platform.python_implementation() == 'IronPython': - msg = "For best results, pass -X:Frames to enable call stack." - SetuptoolsWarning.emit(msg) - return True - - frames = inspect.getouterframes(run_frame) - for frame in frames[2:4]: - caller, = frame[:1] - info = inspect.getframeinfo(caller) - caller_module = caller.f_globals.get('__name__', '') - - if caller_module == "setuptools.dist" and info.function == "run_command": - # Starting from v61.0.0 setuptools overwrites dist.run_command - continue - - return ( - caller_module == 'distutils.dist' - and info.function == 'run_commands' - ) - - def do_egg_install(self): - - easy_install = self.distribution.get_command_class('easy_install') - - cmd = easy_install( - self.distribution, args="x", root=self.root, record=self.record, - ) - cmd.ensure_finalized() # finalize before bdist_egg munges install cmd - cmd.always_copy_from = '.' # make sure local-dir eggs get installed - - # pick up setup-dir .egg files only: no .egg-info - cmd.package_index.scan(glob.glob('*.egg')) - - self.run_command('bdist_egg') - args = [self.distribution.get_command_obj('bdist_egg').egg_output] - - if setuptools.bootstrap_install_from: - # Bootstrap self-installation of setuptools - args.insert(0, setuptools.bootstrap_install_from) - - cmd.args = args - cmd.run(show_deprecation=False) - setuptools.bootstrap_install_from = None - - -# XXX Python 3.1 doesn't see _nc if this is inside the class -install.sub_commands = ( - [cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] + - install.new_commands -) diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/package_index.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/package_index.py deleted file mode 100644 index 3130acef2c24f85afc91b1c18cbc4883d1edfb1b..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/package_index.py +++ /dev/null @@ -1,1132 +0,0 @@ -"""PyPI and direct package downloading.""" - -import sys -import os -import re -import io -import shutil -import socket -import base64 -import hashlib -import itertools -import configparser -import html -import http.client -import urllib.parse -import urllib.request -import urllib.error -from functools import wraps - -import setuptools -from pkg_resources import ( - CHECKOUT_DIST, - Distribution, - BINARY_DIST, - normalize_path, - SOURCE_DIST, - Environment, - find_distributions, - safe_name, - safe_version, - to_filename, - Requirement, - DEVELOP_DIST, - EGG_DIST, - parse_version, -) -from distutils import log -from distutils.errors import DistutilsError -from fnmatch import translate -from setuptools.wheel import Wheel -from setuptools.extern.more_itertools import unique_everseen - - -EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$') -HREF = re.compile(r"""href\s*=\s*['"]?([^'"> ]+)""", re.I) -PYPI_MD5 = re.compile( - r'([^<]+)\n\s+\(md5\)' -) -URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match -EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split() - -__all__ = [ - 'PackageIndex', - 'distros_for_url', - 'parse_bdist_wininst', - 'interpret_distro_name', -] - -_SOCKET_TIMEOUT = 15 - -_tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}" -user_agent = _tmpl.format( - py_major='{}.{}'.format(*sys.version_info), setuptools=setuptools -) - - -def parse_requirement_arg(spec): - try: - return Requirement.parse(spec) - except ValueError as e: - raise DistutilsError( - "Not a URL, existing file, or requirement spec: %r" % (spec,) - ) from e - - -def parse_bdist_wininst(name): - """Return (base,pyversion) or (None,None) for possible .exe name""" - - lower = name.lower() - base, py_ver, plat = None, None, None - - if lower.endswith('.exe'): - if lower.endswith('.win32.exe'): - base = name[:-10] - plat = 'win32' - elif lower.startswith('.win32-py', -16): - py_ver = name[-7:-4] - base = name[:-16] - plat = 'win32' - elif lower.endswith('.win-amd64.exe'): - base = name[:-14] - plat = 'win-amd64' - elif lower.startswith('.win-amd64-py', -20): - py_ver = name[-7:-4] - base = name[:-20] - plat = 'win-amd64' - return base, py_ver, plat - - -def egg_info_for_url(url): - parts = urllib.parse.urlparse(url) - scheme, server, path, parameters, query, fragment = parts - base = urllib.parse.unquote(path.split('/')[-1]) - if server == 'sourceforge.net' and base == 'download': # XXX Yuck - base = urllib.parse.unquote(path.split('/')[-2]) - if '#' in base: - base, fragment = base.split('#', 1) - return base, fragment - - -def distros_for_url(url, metadata=None): - """Yield egg or source distribution objects that might be found at a URL""" - base, fragment = egg_info_for_url(url) - for dist in distros_for_location(url, base, metadata): - yield dist - if fragment: - match = EGG_FRAGMENT.match(fragment) - if match: - for dist in interpret_distro_name( - url, match.group(1), metadata, precedence=CHECKOUT_DIST - ): - yield dist - - -def distros_for_location(location, basename, metadata=None): - """Yield egg or source distribution objects based on basename""" - if basename.endswith('.egg.zip'): - basename = basename[:-4] # strip the .zip - if basename.endswith('.egg') and '-' in basename: - # only one, unambiguous interpretation - return [Distribution.from_location(location, basename, metadata)] - if basename.endswith('.whl') and '-' in basename: - wheel = Wheel(basename) - if not wheel.is_compatible(): - return [] - return [ - Distribution( - location=location, - project_name=wheel.project_name, - version=wheel.version, - # Increase priority over eggs. - precedence=EGG_DIST + 1, - ) - ] - if basename.endswith('.exe'): - win_base, py_ver, platform = parse_bdist_wininst(basename) - if win_base is not None: - return interpret_distro_name( - location, win_base, metadata, py_ver, BINARY_DIST, platform - ) - # Try source distro extensions (.zip, .tgz, etc.) - # - for ext in EXTENSIONS: - if basename.endswith(ext): - basename = basename[: -len(ext)] - return interpret_distro_name(location, basename, metadata) - return [] # no extension matched - - -def distros_for_filename(filename, metadata=None): - """Yield possible egg or source distribution objects based on a filename""" - return distros_for_location( - normalize_path(filename), os.path.basename(filename), metadata - ) - - -def interpret_distro_name( - location, basename, metadata, py_version=None, precedence=SOURCE_DIST, platform=None -): - """Generate the interpretation of a source distro name - - Note: if `location` is a filesystem filename, you should call - ``pkg_resources.normalize_path()`` on it before passing it to this - routine! - """ - - parts = basename.split('-') - if not py_version and any(re.match(r'py\d\.\d$', p) for p in parts[2:]): - # it is a bdist_dumb, not an sdist -- bail out - return - - # find the pivot (p) that splits the name from the version. - # infer the version as the first item that has a digit. - for p in range(len(parts)): - if parts[p][:1].isdigit(): - break - else: - p = len(parts) - - yield Distribution( - location, - metadata, - '-'.join(parts[:p]), - '-'.join(parts[p:]), - py_version=py_version, - precedence=precedence, - platform=platform - ) - - -def unique_values(func): - """ - Wrap a function returning an iterable such that the resulting iterable - only ever yields unique items. - """ - - @wraps(func) - def wrapper(*args, **kwargs): - return unique_everseen(func(*args, **kwargs)) - - return wrapper - - -REL = re.compile(r"""<([^>]*\srel\s{0,10}=\s{0,10}['"]?([^'" >]+)[^>]*)>""", re.I) -""" -Regex for an HTML tag with 'rel="val"' attributes. -""" - - -@unique_values -def find_external_links(url, page): - """Find rel="homepage" and rel="download" links in `page`, yielding URLs""" - - for match in REL.finditer(page): - tag, rel = match.groups() - rels = set(map(str.strip, rel.lower().split(','))) - if 'homepage' in rels or 'download' in rels: - for match in HREF.finditer(tag): - yield urllib.parse.urljoin(url, htmldecode(match.group(1))) - - for tag in ("Home Page", "Download URL"): - pos = page.find(tag) - if pos != -1: - match = HREF.search(page, pos) - if match: - yield urllib.parse.urljoin(url, htmldecode(match.group(1))) - - -class ContentChecker: - """ - A null content checker that defines the interface for checking content - """ - - def feed(self, block): - """ - Feed a block of data to the hash. - """ - return - - def is_valid(self): - """ - Check the hash. Return False if validation fails. - """ - return True - - def report(self, reporter, template): - """ - Call reporter with information about the checker (hash name) - substituted into the template. - """ - return - - -class HashChecker(ContentChecker): - pattern = re.compile( - r'(?Psha1|sha224|sha384|sha256|sha512|md5)=' - r'(?P[a-f0-9]+)' - ) - - def __init__(self, hash_name, expected): - self.hash_name = hash_name - self.hash = hashlib.new(hash_name) - self.expected = expected - - @classmethod - def from_url(cls, url): - "Construct a (possibly null) ContentChecker from a URL" - fragment = urllib.parse.urlparse(url)[-1] - if not fragment: - return ContentChecker() - match = cls.pattern.search(fragment) - if not match: - return ContentChecker() - return cls(**match.groupdict()) - - def feed(self, block): - self.hash.update(block) - - def is_valid(self): - return self.hash.hexdigest() == self.expected - - def report(self, reporter, template): - msg = template % self.hash_name - return reporter(msg) - - -class PackageIndex(Environment): - """A distribution index that scans web pages for download URLs""" - - def __init__( - self, - index_url="https://pypi.org/simple/", - hosts=('*',), - ca_bundle=None, - verify_ssl=True, - *args, - **kw - ): - super().__init__(*args, **kw) - self.index_url = index_url + "/"[: not index_url.endswith('/')] - self.scanned_urls = {} - self.fetched_urls = {} - self.package_pages = {} - self.allows = re.compile('|'.join(map(translate, hosts))).match - self.to_scan = [] - self.opener = urllib.request.urlopen - - def add(self, dist): - # ignore invalid versions - try: - parse_version(dist.version) - except Exception: - return - return super().add(dist) - - # FIXME: 'PackageIndex.process_url' is too complex (14) - def process_url(self, url, retrieve=False): # noqa: C901 - """Evaluate a URL as a possible download, and maybe retrieve it""" - if url in self.scanned_urls and not retrieve: - return - self.scanned_urls[url] = True - if not URL_SCHEME(url): - self.process_filename(url) - return - else: - dists = list(distros_for_url(url)) - if dists: - if not self.url_ok(url): - return - self.debug("Found link: %s", url) - - if dists or not retrieve or url in self.fetched_urls: - list(map(self.add, dists)) - return # don't need the actual page - - if not self.url_ok(url): - self.fetched_urls[url] = True - return - - self.info("Reading %s", url) - self.fetched_urls[url] = True # prevent multiple fetch attempts - tmpl = "Download error on %s: %%s -- Some packages may not be found!" - f = self.open_url(url, tmpl % url) - if f is None: - return - if isinstance(f, urllib.error.HTTPError) and f.code == 401: - self.info("Authentication error: %s" % f.msg) - self.fetched_urls[f.url] = True - if 'html' not in f.headers.get('content-type', '').lower(): - f.close() # not html, we can't process it - return - - base = f.url # handle redirects - page = f.read() - if not isinstance(page, str): - # In Python 3 and got bytes but want str. - if isinstance(f, urllib.error.HTTPError): - # Errors have no charset, assume latin1: - charset = 'latin-1' - else: - charset = f.headers.get_param('charset') or 'latin-1' - page = page.decode(charset, "ignore") - f.close() - for match in HREF.finditer(page): - link = urllib.parse.urljoin(base, htmldecode(match.group(1))) - self.process_url(link) - if url.startswith(self.index_url) and getattr(f, 'code', None) != 404: - page = self.process_index(url, page) - - def process_filename(self, fn, nested=False): - # process filenames or directories - if not os.path.exists(fn): - self.warn("Not found: %s", fn) - return - - if os.path.isdir(fn) and not nested: - path = os.path.realpath(fn) - for item in os.listdir(path): - self.process_filename(os.path.join(path, item), True) - - dists = distros_for_filename(fn) - if dists: - self.debug("Found: %s", fn) - list(map(self.add, dists)) - - def url_ok(self, url, fatal=False): - s = URL_SCHEME(url) - is_file = s and s.group(1).lower() == 'file' - if is_file or self.allows(urllib.parse.urlparse(url)[1]): - return True - msg = ( - "\nNote: Bypassing %s (disallowed host; see " - "https://setuptools.pypa.io/en/latest/deprecated/" - "easy_install.html#restricting-downloads-with-allow-hosts for details).\n" - ) - if fatal: - raise DistutilsError(msg % url) - else: - self.warn(msg, url) - - def scan_egg_links(self, search_path): - dirs = filter(os.path.isdir, search_path) - egg_links = ( - (path, entry) - for path in dirs - for entry in os.listdir(path) - if entry.endswith('.egg-link') - ) - list(itertools.starmap(self.scan_egg_link, egg_links)) - - def scan_egg_link(self, path, entry): - with open(os.path.join(path, entry)) as raw_lines: - # filter non-empty lines - lines = list(filter(None, map(str.strip, raw_lines))) - - if len(lines) != 2: - # format is not recognized; punt - return - - egg_path, setup_path = lines - - for dist in find_distributions(os.path.join(path, egg_path)): - dist.location = os.path.join(path, *lines) - dist.precedence = SOURCE_DIST - self.add(dist) - - def _scan(self, link): - # Process a URL to see if it's for a package page - NO_MATCH_SENTINEL = None, None - if not link.startswith(self.index_url): - return NO_MATCH_SENTINEL - - parts = list(map(urllib.parse.unquote, link[len(self.index_url) :].split('/'))) - if len(parts) != 2 or '#' in parts[1]: - return NO_MATCH_SENTINEL - - # it's a package page, sanitize and index it - pkg = safe_name(parts[0]) - ver = safe_version(parts[1]) - self.package_pages.setdefault(pkg.lower(), {})[link] = True - return to_filename(pkg), to_filename(ver) - - def process_index(self, url, page): - """Process the contents of a PyPI page""" - - # process an index page into the package-page index - for match in HREF.finditer(page): - try: - self._scan(urllib.parse.urljoin(url, htmldecode(match.group(1)))) - except ValueError: - pass - - pkg, ver = self._scan(url) # ensure this page is in the page index - if not pkg: - return "" # no sense double-scanning non-package pages - - # process individual package page - for new_url in find_external_links(url, page): - # Process the found URL - base, frag = egg_info_for_url(new_url) - if base.endswith('.py') and not frag: - if ver: - new_url += '#egg=%s-%s' % (pkg, ver) - else: - self.need_version_info(url) - self.scan_url(new_url) - - return PYPI_MD5.sub( - lambda m: '%s' % m.group(1, 3, 2), page - ) - - def need_version_info(self, url): - self.scan_all( - "Page at %s links to .py file(s) without version info; an index " - "scan is required.", - url, - ) - - def scan_all(self, msg=None, *args): - if self.index_url not in self.fetched_urls: - if msg: - self.warn(msg, *args) - self.info("Scanning index of all packages (this may take a while)") - self.scan_url(self.index_url) - - def find_packages(self, requirement): - self.scan_url(self.index_url + requirement.unsafe_name + '/') - - if not self.package_pages.get(requirement.key): - # Fall back to safe version of the name - self.scan_url(self.index_url + requirement.project_name + '/') - - if not self.package_pages.get(requirement.key): - # We couldn't find the target package, so search the index page too - self.not_found_in_index(requirement) - - for url in list(self.package_pages.get(requirement.key, ())): - # scan each page that might be related to the desired package - self.scan_url(url) - - def obtain(self, requirement, installer=None): - self.prescan() - self.find_packages(requirement) - for dist in self[requirement.key]: - if dist in requirement: - return dist - self.debug("%s does not match %s", requirement, dist) - return super(PackageIndex, self).obtain(requirement, installer) - - def check_hash(self, checker, filename, tfp): - """ - checker is a ContentChecker - """ - checker.report(self.debug, "Validating %%s checksum for %s" % filename) - if not checker.is_valid(): - tfp.close() - os.unlink(filename) - raise DistutilsError( - "%s validation failed for %s; " - "possible download problem?" - % (checker.hash.name, os.path.basename(filename)) - ) - - def add_find_links(self, urls): - """Add `urls` to the list that will be prescanned for searches""" - for url in urls: - if ( - self.to_scan is None # if we have already "gone online" - or not URL_SCHEME(url) # or it's a local file/directory - or url.startswith('file:') - or list(distros_for_url(url)) # or a direct package link - ): - # then go ahead and process it now - self.scan_url(url) - else: - # otherwise, defer retrieval till later - self.to_scan.append(url) - - def prescan(self): - """Scan urls scheduled for prescanning (e.g. --find-links)""" - if self.to_scan: - list(map(self.scan_url, self.to_scan)) - self.to_scan = None # from now on, go ahead and process immediately - - def not_found_in_index(self, requirement): - if self[requirement.key]: # we've seen at least one distro - meth, msg = self.info, "Couldn't retrieve index page for %r" - else: # no distros seen for this name, might be misspelled - meth, msg = ( - self.warn, - "Couldn't find index page for %r (maybe misspelled?)", - ) - meth(msg, requirement.unsafe_name) - self.scan_all() - - def download(self, spec, tmpdir): - """Locate and/or download `spec` to `tmpdir`, returning a local path - - `spec` may be a ``Requirement`` object, or a string containing a URL, - an existing local filename, or a project/version requirement spec - (i.e. the string form of a ``Requirement`` object). If it is the URL - of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one - that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is - automatically created alongside the downloaded file. - - If `spec` is a ``Requirement`` object or a string containing a - project/version requirement spec, this method returns the location of - a matching distribution (possibly after downloading it to `tmpdir`). - If `spec` is a locally existing file or directory name, it is simply - returned unchanged. If `spec` is a URL, it is downloaded to a subpath - of `tmpdir`, and the local filename is returned. Various errors may be - raised if a problem occurs during downloading. - """ - if not isinstance(spec, Requirement): - scheme = URL_SCHEME(spec) - if scheme: - # It's a url, download it to tmpdir - found = self._download_url(scheme.group(1), spec, tmpdir) - base, fragment = egg_info_for_url(spec) - if base.endswith('.py'): - found = self.gen_setup(found, fragment, tmpdir) - return found - elif os.path.exists(spec): - # Existing file or directory, just return it - return spec - else: - spec = parse_requirement_arg(spec) - return getattr(self.fetch_distribution(spec, tmpdir), 'location', None) - - def fetch_distribution( # noqa: C901 # is too complex (14) # FIXME - self, - requirement, - tmpdir, - force_scan=False, - source=False, - develop_ok=False, - local_index=None, - ): - """Obtain a distribution suitable for fulfilling `requirement` - - `requirement` must be a ``pkg_resources.Requirement`` instance. - If necessary, or if the `force_scan` flag is set, the requirement is - searched for in the (online) package index as well as the locally - installed packages. If a distribution matching `requirement` is found, - the returned distribution's ``location`` is the value you would have - gotten from calling the ``download()`` method with the matching - distribution's URL or filename. If no matching distribution is found, - ``None`` is returned. - - If the `source` flag is set, only source distributions and source - checkout links will be considered. Unless the `develop_ok` flag is - set, development and system eggs (i.e., those using the ``.egg-info`` - format) will be ignored. - """ - # process a Requirement - self.info("Searching for %s", requirement) - skipped = {} - dist = None - - def find(req, env=None): - if env is None: - env = self - # Find a matching distribution; may be called more than once - - for dist in env[req.key]: - - if dist.precedence == DEVELOP_DIST and not develop_ok: - if dist not in skipped: - self.warn( - "Skipping development or system egg: %s", - dist, - ) - skipped[dist] = 1 - continue - - test = dist in req and (dist.precedence <= SOURCE_DIST or not source) - if test: - loc = self.download(dist.location, tmpdir) - dist.download_location = loc - if os.path.exists(dist.download_location): - return dist - - if force_scan: - self.prescan() - self.find_packages(requirement) - dist = find(requirement) - - if not dist and local_index is not None: - dist = find(requirement, local_index) - - if dist is None: - if self.to_scan is not None: - self.prescan() - dist = find(requirement) - - if dist is None and not force_scan: - self.find_packages(requirement) - dist = find(requirement) - - if dist is None: - self.warn( - "No local packages or working download links found for %s%s", - (source and "a source distribution of " or ""), - requirement, - ) - else: - self.info("Best match: %s", dist) - return dist.clone(location=dist.download_location) - - def fetch(self, requirement, tmpdir, force_scan=False, source=False): - """Obtain a file suitable for fulfilling `requirement` - - DEPRECATED; use the ``fetch_distribution()`` method now instead. For - backward compatibility, this routine is identical but returns the - ``location`` of the downloaded distribution instead of a distribution - object. - """ - dist = self.fetch_distribution(requirement, tmpdir, force_scan, source) - if dist is not None: - return dist.location - return None - - def gen_setup(self, filename, fragment, tmpdir): - match = EGG_FRAGMENT.match(fragment) - dists = ( - match - and [ - d - for d in interpret_distro_name(filename, match.group(1), None) - if d.version - ] - or [] - ) - - if len(dists) == 1: # unambiguous ``#egg`` fragment - basename = os.path.basename(filename) - - # Make sure the file has been downloaded to the temp dir. - if os.path.dirname(filename) != tmpdir: - dst = os.path.join(tmpdir, basename) - if not (os.path.exists(dst) and os.path.samefile(filename, dst)): - shutil.copy2(filename, dst) - filename = dst - - with open(os.path.join(tmpdir, 'setup.py'), 'w') as file: - file.write( - "from setuptools import setup\n" - "setup(name=%r, version=%r, py_modules=[%r])\n" - % ( - dists[0].project_name, - dists[0].version, - os.path.splitext(basename)[0], - ) - ) - return filename - - elif match: - raise DistutilsError( - "Can't unambiguously interpret project/version identifier %r; " - "any dashes in the name or version should be escaped using " - "underscores. %r" % (fragment, dists) - ) - else: - raise DistutilsError( - "Can't process plain .py files without an '#egg=name-version'" - " suffix to enable automatic setup script generation." - ) - - dl_blocksize = 8192 - - def _download_to(self, url, filename): - self.info("Downloading %s", url) - # Download the file - fp = None - try: - checker = HashChecker.from_url(url) - fp = self.open_url(url) - if isinstance(fp, urllib.error.HTTPError): - raise DistutilsError( - "Can't download %s: %s %s" % (url, fp.code, fp.msg) - ) - headers = fp.info() - blocknum = 0 - bs = self.dl_blocksize - size = -1 - if "content-length" in headers: - # Some servers return multiple Content-Length headers :( - sizes = headers.get_all('Content-Length') - size = max(map(int, sizes)) - self.reporthook(url, filename, blocknum, bs, size) - with open(filename, 'wb') as tfp: - while True: - block = fp.read(bs) - if block: - checker.feed(block) - tfp.write(block) - blocknum += 1 - self.reporthook(url, filename, blocknum, bs, size) - else: - break - self.check_hash(checker, filename, tfp) - return headers - finally: - if fp: - fp.close() - - def reporthook(self, url, filename, blocknum, blksize, size): - pass # no-op - - # FIXME: - def open_url(self, url, warning=None): # noqa: C901 # is too complex (12) - if url.startswith('file:'): - return local_open(url) - try: - return open_with_auth(url, self.opener) - except (ValueError, http.client.InvalidURL) as v: - msg = ' '.join([str(arg) for arg in v.args]) - if warning: - self.warn(warning, msg) - else: - raise DistutilsError('%s %s' % (url, msg)) from v - except urllib.error.HTTPError as v: - return v - except urllib.error.URLError as v: - if warning: - self.warn(warning, v.reason) - else: - raise DistutilsError( - "Download error for %s: %s" % (url, v.reason) - ) from v - except http.client.BadStatusLine as v: - if warning: - self.warn(warning, v.line) - else: - raise DistutilsError( - '%s returned a bad status line. The server might be ' - 'down, %s' % (url, v.line) - ) from v - except (http.client.HTTPException, socket.error) as v: - if warning: - self.warn(warning, v) - else: - raise DistutilsError("Download error for %s: %s" % (url, v)) from v - - def _download_url(self, scheme, url, tmpdir): - # Determine download filename - # - name, fragment = egg_info_for_url(url) - if name: - while '..' in name: - name = name.replace('..', '.').replace('\\', '_') - else: - name = "__downloaded__" # default if URL has no path contents - - if name.endswith('.egg.zip'): - name = name[:-4] # strip the extra .zip before download - - filename = os.path.join(tmpdir, name) - - # Download the file - # - if scheme == 'svn' or scheme.startswith('svn+'): - return self._download_svn(url, filename) - elif scheme == 'git' or scheme.startswith('git+'): - return self._download_git(url, filename) - elif scheme.startswith('hg+'): - return self._download_hg(url, filename) - elif scheme == 'file': - return urllib.request.url2pathname(urllib.parse.urlparse(url)[2]) - else: - self.url_ok(url, True) # raises error if not allowed - return self._attempt_download(url, filename) - - def scan_url(self, url): - self.process_url(url, True) - - def _attempt_download(self, url, filename): - headers = self._download_to(url, filename) - if 'html' in headers.get('content-type', '').lower(): - return self._invalid_download_html(url, headers, filename) - else: - return filename - - def _invalid_download_html(self, url, headers, filename): - os.unlink(filename) - raise DistutilsError(f"Unexpected HTML page found at {url}") - - def _download_svn(self, url, _filename): - raise DistutilsError(f"Invalid config, SVN download is not supported: {url}") - - @staticmethod - def _vcs_split_rev_from_url(url, pop_prefix=False): - scheme, netloc, path, query, frag = urllib.parse.urlsplit(url) - - scheme = scheme.split('+', 1)[-1] - - # Some fragment identification fails - path = path.split('#', 1)[0] - - rev = None - if '@' in path: - path, rev = path.rsplit('@', 1) - - # Also, discard fragment - url = urllib.parse.urlunsplit((scheme, netloc, path, query, '')) - - return url, rev - - def _download_git(self, url, filename): - filename = filename.split('#', 1)[0] - url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) - - self.info("Doing git clone from %s to %s", url, filename) - os.system("git clone --quiet %s %s" % (url, filename)) - - if rev is not None: - self.info("Checking out %s", rev) - os.system( - "git -C %s checkout --quiet %s" - % ( - filename, - rev, - ) - ) - - return filename - - def _download_hg(self, url, filename): - filename = filename.split('#', 1)[0] - url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) - - self.info("Doing hg clone from %s to %s", url, filename) - os.system("hg clone --quiet %s %s" % (url, filename)) - - if rev is not None: - self.info("Updating to %s", rev) - os.system( - "hg --cwd %s up -C -r %s -q" - % ( - filename, - rev, - ) - ) - - return filename - - def debug(self, msg, *args): - log.debug(msg, *args) - - def info(self, msg, *args): - log.info(msg, *args) - - def warn(self, msg, *args): - log.warn(msg, *args) - - -# This pattern matches a character entity reference (a decimal numeric -# references, a hexadecimal numeric reference, or a named reference). -entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub - - -def decode_entity(match): - what = match.group(0) - return html.unescape(what) - - -def htmldecode(text): - """ - Decode HTML entities in the given text. - - >>> htmldecode( - ... 'https://../package_name-0.1.2.tar.gz' - ... '?tokena=A&tokenb=B">package_name-0.1.2.tar.gz') - 'https://../package_name-0.1.2.tar.gz?tokena=A&tokenb=B">package_name-0.1.2.tar.gz' - """ - return entity_sub(decode_entity, text) - - -def socket_timeout(timeout=15): - def _socket_timeout(func): - def _socket_timeout(*args, **kwargs): - old_timeout = socket.getdefaulttimeout() - socket.setdefaulttimeout(timeout) - try: - return func(*args, **kwargs) - finally: - socket.setdefaulttimeout(old_timeout) - - return _socket_timeout - - return _socket_timeout - - -def _encode_auth(auth): - """ - Encode auth from a URL suitable for an HTTP header. - >>> str(_encode_auth('username%3Apassword')) - 'dXNlcm5hbWU6cGFzc3dvcmQ=' - - Long auth strings should not cause a newline to be inserted. - >>> long_auth = 'username:' + 'password'*10 - >>> chr(10) in str(_encode_auth(long_auth)) - False - """ - auth_s = urllib.parse.unquote(auth) - # convert to bytes - auth_bytes = auth_s.encode() - encoded_bytes = base64.b64encode(auth_bytes) - # convert back to a string - encoded = encoded_bytes.decode() - # strip the trailing carriage return - return encoded.replace('\n', '') - - -class Credential: - """ - A username/password pair. Use like a namedtuple. - """ - - def __init__(self, username, password): - self.username = username - self.password = password - - def __iter__(self): - yield self.username - yield self.password - - def __str__(self): - return '%(username)s:%(password)s' % vars(self) - - -class PyPIConfig(configparser.RawConfigParser): - def __init__(self): - """ - Load from ~/.pypirc - """ - defaults = dict.fromkeys(['username', 'password', 'repository'], '') - super().__init__(defaults) - - rc = os.path.join(os.path.expanduser('~'), '.pypirc') - if os.path.exists(rc): - self.read(rc) - - @property - def creds_by_repository(self): - sections_with_repositories = [ - section - for section in self.sections() - if self.get(section, 'repository').strip() - ] - - return dict(map(self._get_repo_cred, sections_with_repositories)) - - def _get_repo_cred(self, section): - repo = self.get(section, 'repository').strip() - return repo, Credential( - self.get(section, 'username').strip(), - self.get(section, 'password').strip(), - ) - - def find_credential(self, url): - """ - If the URL indicated appears to be a repository defined in this - config, return the credential for that repository. - """ - for repository, cred in self.creds_by_repository.items(): - if url.startswith(repository): - return cred - - -def open_with_auth(url, opener=urllib.request.urlopen): - """Open a urllib2 request, handling HTTP authentication""" - - parsed = urllib.parse.urlparse(url) - scheme, netloc, path, params, query, frag = parsed - - # Double scheme does not raise on macOS as revealed by a - # failing test. We would expect "nonnumeric port". Refs #20. - if netloc.endswith(':'): - raise http.client.InvalidURL("nonnumeric port: ''") - - if scheme in ('http', 'https'): - auth, address = _splituser(netloc) - else: - auth = None - - if not auth: - cred = PyPIConfig().find_credential(url) - if cred: - auth = str(cred) - info = cred.username, url - log.info('Authenticating as %s for %s (from .pypirc)', *info) - - if auth: - auth = "Basic " + _encode_auth(auth) - parts = scheme, address, path, params, query, frag - new_url = urllib.parse.urlunparse(parts) - request = urllib.request.Request(new_url) - request.add_header("Authorization", auth) - else: - request = urllib.request.Request(url) - - request.add_header('User-Agent', user_agent) - fp = opener(request) - - if auth: - # Put authentication info back into request URL if same host, - # so that links found on the page will work - s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url) - if s2 == scheme and h2 == address: - parts = s2, netloc, path2, param2, query2, frag2 - fp.url = urllib.parse.urlunparse(parts) - - return fp - - -# copy of urllib.parse._splituser from Python 3.8 -def _splituser(host): - """splituser('user[:passwd]@host[:port]') - --> 'user[:passwd]', 'host[:port]'.""" - user, delim, host = host.rpartition('@') - return (user if delim else None), host - - -# adding a timeout to avoid freezing package_index -open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth) - - -def fix_sf_url(url): - return url # backward compatibility - - -def local_open(url): - """Read a local path, with special support for directories""" - scheme, server, path, param, query, frag = urllib.parse.urlparse(url) - filename = urllib.request.url2pathname(path) - if os.path.isfile(filename): - return urllib.request.urlopen(url) - elif path.endswith('/') and os.path.isdir(filename): - files = [] - for f in os.listdir(filename): - filepath = os.path.join(filename, f) - if f == 'index.html': - with open(filepath, 'r') as fp: - body = fp.read() - break - elif os.path.isdir(filepath): - f += '/' - files.append('{name}'.format(name=f)) - else: - tmpl = ( - "{url}" "{files}" - ) - body = tmpl.format(url=url, files='\n'.join(files)) - status, message = 200, "OK" - else: - status, message, body = 404, "Path not found", "Not found" - - headers = {'content-type': 'text/html'} - body_stream = io.StringIO(body) - return urllib.error.HTTPError(url, status, message, headers, body_stream) diff --git a/spaces/power2/JoJoGan-powerhow2/e4e/criteria/__init__.py b/spaces/power2/JoJoGan-powerhow2/e4e/criteria/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/power2/sketch/README.md b/spaces/power2/sketch/README.md deleted file mode 100644 index 65609ccdb6c7d9361bcbc9b0cd0075a46f745500..0000000000000000000000000000000000000000 --- a/spaces/power2/sketch/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ✏️Image2LineDrawing GR🖼️ -emoji: 🖼️✏️ -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 2.9.3 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/prasanthntu/dog-vs-cat-classifier/README.md b/spaces/prasanthntu/dog-vs-cat-classifier/README.md deleted file mode 100644 index 04bdbbf5f9b9604c8693b8824fa37c1b9bd4bb22..0000000000000000000000000000000000000000 --- a/spaces/prasanthntu/dog-vs-cat-classifier/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Dog Vs Cat Classifier -emoji: 📚 -colorFrom: gray -colorTo: gray -sdk: gradio -sdk_version: 3.33.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fsspec/implementations/cached.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fsspec/implementations/cached.py deleted file mode 100644 index 5c495e354aea8079296cc01b316995a18f7b7a9d..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fsspec/implementations/cached.py +++ /dev/null @@ -1,808 +0,0 @@ -from __future__ import annotations - -import inspect -import logging -import os -import tempfile -import time -import weakref -from shutil import rmtree -from typing import TYPE_CHECKING, Any, Callable, ClassVar - -from fsspec import AbstractFileSystem, filesystem -from fsspec.callbacks import _DEFAULT_CALLBACK -from fsspec.compression import compr -from fsspec.core import BaseCache, MMapCache -from fsspec.exceptions import BlocksizeMismatchError -from fsspec.implementations.cache_mapper import create_cache_mapper -from fsspec.implementations.cache_metadata import CacheMetadata -from fsspec.spec import AbstractBufferedFile -from fsspec.utils import infer_compression - -if TYPE_CHECKING: - from fsspec.implementations.cache_mapper import AbstractCacheMapper - -logger = logging.getLogger("fsspec.cached") - - -class CachingFileSystem(AbstractFileSystem): - """Locally caching filesystem, layer over any other FS - - This class implements chunk-wise local storage of remote files, for quick - access after the initial download. The files are stored in a given - directory with hashes of URLs for the filenames. If no directory is given, - a temporary one is used, which should be cleaned up by the OS after the - process ends. The files themselves are sparse (as implemented in - :class:`~fsspec.caching.MMapCache`), so only the data which is accessed - takes up space. - - Restrictions: - - - the block-size must be the same for each access of a given file, unless - all blocks of the file have already been read - - caching can only be applied to file-systems which produce files - derived from fsspec.spec.AbstractBufferedFile ; LocalFileSystem is also - allowed, for testing - """ - - protocol: ClassVar[str | tuple[str, ...]] = ("blockcache", "cached") - - def __init__( - self, - target_protocol=None, - cache_storage="TMP", - cache_check=10, - check_files=False, - expiry_time=604800, - target_options=None, - fs=None, - same_names: bool | None = None, - compression=None, - cache_mapper: AbstractCacheMapper | None = None, - **kwargs, - ): - """ - - Parameters - ---------- - target_protocol: str (optional) - Target filesystem protocol. Provide either this or ``fs``. - cache_storage: str or list(str) - Location to store files. If "TMP", this is a temporary directory, - and will be cleaned up by the OS when this process ends (or later). - If a list, each location will be tried in the order given, but - only the last will be considered writable. - cache_check: int - Number of seconds between reload of cache metadata - check_files: bool - Whether to explicitly see if the UID of the remote file matches - the stored one before using. Warning: some file systems such as - HTTP cannot reliably give a unique hash of the contents of some - path, so be sure to set this option to False. - expiry_time: int - The time in seconds after which a local copy is considered useless. - Set to falsy to prevent expiry. The default is equivalent to one - week. - target_options: dict or None - Passed to the instantiation of the FS, if fs is None. - fs: filesystem instance - The target filesystem to run against. Provide this or ``protocol``. - same_names: bool (optional) - By default, target URLs are hashed using a ``HashCacheMapper`` so - that files from different backends with the same basename do not - conflict. If this argument is ``true``, a ``BasenameCacheMapper`` - is used instead. Other cache mapper options are available by using - the ``cache_mapper`` keyword argument. Only one of this and - ``cache_mapper`` should be specified. - compression: str (optional) - To decompress on download. Can be 'infer' (guess from the URL name), - one of the entries in ``fsspec.compression.compr``, or None for no - decompression. - cache_mapper: AbstractCacheMapper (optional) - The object use to map from original filenames to cached filenames. - Only one of this and ``same_names`` should be specified. - """ - super().__init__(**kwargs) - if fs is None and target_protocol is None: - raise ValueError( - "Please provide filesystem instance(fs) or target_protocol" - ) - if not (fs is None) ^ (target_protocol is None): - raise ValueError( - "Both filesystems (fs) and target_protocol may not be both given." - ) - if cache_storage == "TMP": - tempdir = tempfile.mkdtemp() - storage = [tempdir] - weakref.finalize(self, self._remove_tempdir, tempdir) - else: - if isinstance(cache_storage, str): - storage = [cache_storage] - else: - storage = cache_storage - os.makedirs(storage[-1], exist_ok=True) - self.storage = storage - self.kwargs = target_options or {} - self.cache_check = cache_check - self.check_files = check_files - self.expiry = expiry_time - self.compression = compression - - # Size of cache in bytes. If None then the size is unknown and will be - # recalculated the next time cache_size() is called. On writes to the - # cache this is reset to None. - self._cache_size = None - - if same_names is not None and cache_mapper is not None: - raise ValueError( - "Cannot specify both same_names and cache_mapper in " - "CachingFileSystem.__init__" - ) - if cache_mapper is not None: - self._mapper = cache_mapper - else: - self._mapper = create_cache_mapper( - same_names if same_names is not None else False - ) - - self.target_protocol = ( - target_protocol - if isinstance(target_protocol, str) - else (fs.protocol if isinstance(fs.protocol, str) else fs.protocol[0]) - ) - self._metadata = CacheMetadata(self.storage) - self.load_cache() - self.fs = fs if fs is not None else filesystem(target_protocol, **self.kwargs) - - def _strip_protocol(path): - # acts as a method, since each instance has a difference target - return self.fs._strip_protocol(type(self)._strip_protocol(path)) - - self._strip_protocol: Callable = _strip_protocol - - @staticmethod - def _remove_tempdir(tempdir): - try: - rmtree(tempdir) - except Exception: - pass - - def _mkcache(self): - os.makedirs(self.storage[-1], exist_ok=True) - - def cache_size(self): - """Return size of cache in bytes. - - If more than one cache directory is in use, only the size of the last - one (the writable cache directory) is returned. - """ - if self._cache_size is None: - cache_dir = self.storage[-1] - self._cache_size = filesystem("file").du(cache_dir, withdirs=True) - return self._cache_size - - def load_cache(self): - """Read set of stored blocks from file""" - self._metadata.load() - self._mkcache() - self.last_cache = time.time() - - def save_cache(self): - """Save set of stored blocks from file""" - self._mkcache() - self._metadata.save() - self.last_cache = time.time() - self._cache_size = None - - def _check_cache(self): - """Reload caches if time elapsed or any disappeared""" - self._mkcache() - if not self.cache_check: - # explicitly told not to bother checking - return - timecond = time.time() - self.last_cache > self.cache_check - existcond = all(os.path.exists(storage) for storage in self.storage) - if timecond or not existcond: - self.load_cache() - - def _check_file(self, path): - """Is path in cache and still valid""" - path = self._strip_protocol(path) - self._check_cache() - return self._metadata.check_file(path, self) - - def clear_cache(self): - """Remove all files and metadata from the cache - - In the case of multiple cache locations, this clears only the last one, - which is assumed to be the read/write one. - """ - rmtree(self.storage[-1]) - self.load_cache() - self._cache_size = None - - def clear_expired_cache(self, expiry_time=None): - """Remove all expired files and metadata from the cache - - In the case of multiple cache locations, this clears only the last one, - which is assumed to be the read/write one. - - Parameters - ---------- - expiry_time: int - The time in seconds after which a local copy is considered useless. - If not defined the default is equivalent to the attribute from the - file caching instantiation. - """ - - if not expiry_time: - expiry_time = self.expiry - - self._check_cache() - - expired_files, writable_cache_empty = self._metadata.clear_expired(expiry_time) - for fn in expired_files: - if os.path.exists(fn): - os.remove(fn) - - if writable_cache_empty: - rmtree(self.storage[-1]) - self.load_cache() - - self._cache_size = None - - def pop_from_cache(self, path): - """Remove cached version of given file - - Deletes local copy of the given (remote) path. If it is found in a cache - location which is not the last, it is assumed to be read-only, and - raises PermissionError - """ - path = self._strip_protocol(path) - fn = self._metadata.pop_file(path) - if fn is not None: - os.remove(fn) - self._cache_size = None - - def _open( - self, - path, - mode="rb", - block_size=None, - autocommit=True, - cache_options=None, - **kwargs, - ): - """Wrap the target _open - - If the whole file exists in the cache, just open it locally and - return that. - - Otherwise, open the file on the target FS, and make it have a mmap - cache pointing to the location which we determine, in our cache. - The ``blocks`` instance is shared, so as the mmap cache instance - updates, so does the entry in our ``cached_files`` attribute. - We monkey-patch this file, so that when it closes, we call - ``close_and_update`` to save the state of the blocks. - """ - path = self._strip_protocol(path) - - path = self.fs._strip_protocol(path) - if "r" not in mode: - return self.fs._open( - path, - mode=mode, - block_size=block_size, - autocommit=autocommit, - cache_options=cache_options, - **kwargs, - ) - detail = self._check_file(path) - if detail: - # file is in cache - detail, fn = detail - hash, blocks = detail["fn"], detail["blocks"] - if blocks is True: - # stored file is complete - logger.debug("Opening local copy of %s", path) - return open(fn, mode) - # TODO: action where partial file exists in read-only cache - logger.debug("Opening partially cached copy of %s", path) - else: - hash = self._mapper(path) - fn = os.path.join(self.storage[-1], hash) - blocks = set() - detail = { - "original": path, - "fn": hash, - "blocks": blocks, - "time": time.time(), - "uid": self.fs.ukey(path), - } - self._metadata.update_file(path, detail) - logger.debug("Creating local sparse file for %s", path) - - # call target filesystems open - self._mkcache() - f = self.fs._open( - path, - mode=mode, - block_size=block_size, - autocommit=autocommit, - cache_options=cache_options, - cache_type="none", - **kwargs, - ) - if self.compression: - comp = ( - infer_compression(path) - if self.compression == "infer" - else self.compression - ) - f = compr[comp](f, mode="rb") - if "blocksize" in detail: - if detail["blocksize"] != f.blocksize: - raise BlocksizeMismatchError( - f"Cached file must be reopened with same block" - f" size as original (old: {detail['blocksize']}," - f" new {f.blocksize})" - ) - else: - detail["blocksize"] = f.blocksize - f.cache = MMapCache(f.blocksize, f._fetch_range, f.size, fn, blocks) - close = f.close - f.close = lambda: self.close_and_update(f, close) - self.save_cache() - return f - - def hash_name(self, path: str, *args: Any) -> str: - # Kept for backward compatibility with downstream libraries. - # Ignores extra arguments, previously same_name boolean. - return self._mapper(path) - - def close_and_update(self, f, close): - """Called when a file is closing, so store the set of blocks""" - if f.closed: - return - path = self._strip_protocol(f.path) - self._metadata.on_close_cached_file(f, path) - try: - logger.debug("going to save") - self.save_cache() - logger.debug("saved") - except OSError: - logger.debug("Cache saving failed while closing file") - except NameError: - logger.debug("Cache save failed due to interpreter shutdown") - close() - f.closed = True - - def __getattribute__(self, item): - if item in [ - "load_cache", - "_open", - "save_cache", - "close_and_update", - "__init__", - "__getattribute__", - "__reduce__", - "_make_local_details", - "open", - "cat", - "cat_file", - "get", - "read_block", - "tail", - "head", - "_check_file", - "_check_cache", - "_mkcache", - "clear_cache", - "clear_expired_cache", - "pop_from_cache", - "_mkcache", - "local_file", - "_paths_from_path", - "get_mapper", - "open_many", - "commit_many", - "hash_name", - "__hash__", - "__eq__", - "to_json", - "cache_size", - ]: - # all the methods defined in this class. Note `open` here, since - # it calls `_open`, but is actually in superclass - return lambda *args, **kw: getattr(type(self), item).__get__(self)( - *args, **kw - ) - if item in ["__reduce_ex__"]: - raise AttributeError - if item in ["_cache"]: - # class attributes - return getattr(type(self), item) - if item == "__class__": - return type(self) - d = object.__getattribute__(self, "__dict__") - fs = d.get("fs", None) # fs is not immediately defined - if item in d: - return d[item] - elif fs is not None: - if item in fs.__dict__: - # attribute of instance - return fs.__dict__[item] - # attributed belonging to the target filesystem - cls = type(fs) - m = getattr(cls, item) - if (inspect.isfunction(m) or inspect.isdatadescriptor(m)) and ( - not hasattr(m, "__self__") or m.__self__ is None - ): - # instance method - return m.__get__(fs, cls) - return m # class method or attribute - else: - # attributes of the superclass, while target is being set up - return super().__getattribute__(item) - - def __eq__(self, other): - """Test for equality.""" - if self is other: - return True - if not isinstance(other, type(self)): - return False - return ( - self.storage == other.storage - and self.kwargs == other.kwargs - and self.cache_check == other.cache_check - and self.check_files == other.check_files - and self.expiry == other.expiry - and self.compression == other.compression - and self._mapper == other._mapper - and self.target_protocol == other.target_protocol - ) - - def __hash__(self): - """Calculate hash.""" - return ( - hash(tuple(self.storage)) - ^ hash(str(self.kwargs)) - ^ hash(self.cache_check) - ^ hash(self.check_files) - ^ hash(self.expiry) - ^ hash(self.compression) - ^ hash(self._mapper) - ^ hash(self.target_protocol) - ) - - def to_json(self): - """Calculate JSON representation. - - Not implemented yet for CachingFileSystem. - """ - raise NotImplementedError( - "CachingFileSystem JSON representation not implemented" - ) - - -class WholeFileCacheFileSystem(CachingFileSystem): - """Caches whole remote files on first access - - This class is intended as a layer over any other file system, and - will make a local copy of each file accessed, so that all subsequent - reads are local. This is similar to ``CachingFileSystem``, but without - the block-wise functionality and so can work even when sparse files - are not allowed. See its docstring for definition of the init - arguments. - - The class still needs access to the remote store for listing files, - and may refresh cached files. - """ - - protocol = "filecache" - local_file = True - - def open_many(self, open_files): - paths = [of.path for of in open_files] - if "r" in open_files.mode: - self._mkcache() - else: - return [ - LocalTempFile(self.fs, path, mode=open_files.mode) for path in paths - ] - - if self.compression: - raise NotImplementedError - details = [self._check_file(sp) for sp in paths] - downpath = [p for p, d in zip(paths, details) if not d] - downfn0 = [ - os.path.join(self.storage[-1], self._mapper(p)) - for p, d in zip(paths, details) - ] # keep these path names for opening later - downfn = [fn for fn, d in zip(downfn0, details) if not d] - if downpath: - # skip if all files are already cached and up to date - self.fs.get(downpath, downfn) - - # update metadata - only happens when downloads are successful - newdetail = [ - { - "original": path, - "fn": self._mapper(path), - "blocks": True, - "time": time.time(), - "uid": self.fs.ukey(path), - } - for path in downpath - ] - for path, detail in zip(downpath, newdetail): - self._metadata.update_file(path, detail) - self.save_cache() - - def firstpart(fn): - # helper to adapt both whole-file and simple-cache - return fn[1] if isinstance(fn, tuple) else fn - - return [ - open(firstpart(fn0) if fn0 else fn1, mode=open_files.mode) - for fn0, fn1 in zip(details, downfn0) - ] - - def commit_many(self, open_files): - self.fs.put([f.fn for f in open_files], [f.path for f in open_files]) - [f.close() for f in open_files] - for f in open_files: - # in case autocommit is off, and so close did not already delete - try: - os.remove(f.name) - except FileNotFoundError: - pass - self._cache_size = None - - def _make_local_details(self, path): - hash = self._mapper(path) - fn = os.path.join(self.storage[-1], hash) - detail = { - "original": path, - "fn": hash, - "blocks": True, - "time": time.time(), - "uid": self.fs.ukey(path), - } - self._metadata.update_file(path, detail) - logger.debug("Copying %s to local cache", path) - return fn - - def cat( - self, - path, - recursive=False, - on_error="raise", - callback=_DEFAULT_CALLBACK, - **kwargs, - ): - paths = self.expand_path( - path, recursive=recursive, maxdepth=kwargs.get("maxdepth", None) - ) - getpaths = [] - storepaths = [] - fns = [] - out = {} - for p in paths.copy(): - try: - detail = self._check_file(p) - if not detail: - fn = self._make_local_details(p) - getpaths.append(p) - storepaths.append(fn) - else: - detail, fn = detail if isinstance(detail, tuple) else (None, detail) - fns.append(fn) - except Exception as e: - if on_error == "raise": - raise - if on_error == "return": - out[p] = e - paths.remove(p) - - if getpaths: - self.fs.get(getpaths, storepaths) - self.save_cache() - - callback.set_size(len(paths)) - for p, fn in zip(paths, fns): - with open(fn, "rb") as f: - out[p] = f.read() - callback.relative_update(1) - if isinstance(path, str) and len(paths) == 1 and recursive is False: - out = out[paths[0]] - return out - - def _open(self, path, mode="rb", **kwargs): - path = self._strip_protocol(path) - if "r" not in mode: - return LocalTempFile(self, path, mode=mode) - detail = self._check_file(path) - if detail: - detail, fn = detail - _, blocks = detail["fn"], detail["blocks"] - if blocks is True: - logger.debug("Opening local copy of %s", path) - - # In order to support downstream filesystems to be able to - # infer the compression from the original filename, like - # the `TarFileSystem`, let's extend the `io.BufferedReader` - # fileobject protocol by adding a dedicated attribute - # `original`. - f = open(fn, mode) - f.original = detail.get("original") - return f - else: - raise ValueError( - f"Attempt to open partially cached file {path}" - f" as a wholly cached file" - ) - else: - fn = self._make_local_details(path) - kwargs["mode"] = mode - - # call target filesystems open - self._mkcache() - if self.compression: - with self.fs._open(path, **kwargs) as f, open(fn, "wb") as f2: - if isinstance(f, AbstractBufferedFile): - # want no type of caching if just downloading whole thing - f.cache = BaseCache(0, f.cache.fetcher, f.size) - comp = ( - infer_compression(path) - if self.compression == "infer" - else self.compression - ) - f = compr[comp](f, mode="rb") - data = True - while data: - block = getattr(f, "blocksize", 5 * 2**20) - data = f.read(block) - f2.write(data) - else: - self.fs.get_file(path, fn) - self.save_cache() - return self._open(path, mode) - - -class SimpleCacheFileSystem(WholeFileCacheFileSystem): - """Caches whole remote files on first access - - This class is intended as a layer over any other file system, and - will make a local copy of each file accessed, so that all subsequent - reads are local. This implementation only copies whole files, and - does not keep any metadata about the download time or file details. - It is therefore safer to use in multi-threaded/concurrent situations. - - This is the only of the caching filesystems that supports write: you will - be given a real local open file, and upon close and commit, it will be - uploaded to the target filesystem; the writability or the target URL is - not checked until that time. - - """ - - protocol = "simplecache" - local_file = True - - def __init__(self, **kwargs): - kw = kwargs.copy() - for key in ["cache_check", "expiry_time", "check_files"]: - kw[key] = False - super().__init__(**kw) - for storage in self.storage: - if not os.path.exists(storage): - os.makedirs(storage, exist_ok=True) - - def _check_file(self, path): - self._check_cache() - sha = self._mapper(path) - for storage in self.storage: - fn = os.path.join(storage, sha) - if os.path.exists(fn): - return fn - - def save_cache(self): - pass - - def load_cache(self): - pass - - def _open(self, path, mode="rb", **kwargs): - path = self._strip_protocol(path) - - if "r" not in mode: - return LocalTempFile(self, path, mode=mode) - fn = self._check_file(path) - if fn: - return open(fn, mode) - - sha = self._mapper(path) - fn = os.path.join(self.storage[-1], sha) - logger.debug("Copying %s to local cache", path) - kwargs["mode"] = mode - - self._mkcache() - self._cache_size = None - if self.compression: - with self.fs._open(path, **kwargs) as f, open(fn, "wb") as f2: - if isinstance(f, AbstractBufferedFile): - # want no type of caching if just downloading whole thing - f.cache = BaseCache(0, f.cache.fetcher, f.size) - comp = ( - infer_compression(path) - if self.compression == "infer" - else self.compression - ) - f = compr[comp](f, mode="rb") - data = True - while data: - block = getattr(f, "blocksize", 5 * 2**20) - data = f.read(block) - f2.write(data) - else: - self.fs.get_file(path, fn) - return self._open(path, mode) - - -class LocalTempFile: - """A temporary local file, which will be uploaded on commit""" - - def __init__(self, fs, path, fn=None, mode="wb", autocommit=True, seek=0): - if fn: - self.fn = fn - self.fh = open(fn, mode) - else: - fd, self.fn = tempfile.mkstemp() - self.fh = open(fd, mode) - self.mode = mode - if seek: - self.fh.seek(seek) - self.path = path - self.fs = fs - self.closed = False - self.autocommit = autocommit - - def __reduce__(self): - # always open in rb+ to allow continuing writing at a location - return ( - LocalTempFile, - (self.fs, self.path, self.fn, "rb+", self.autocommit, self.tell()), - ) - - def __enter__(self): - return self.fh - - def __exit__(self, exc_type, exc_val, exc_tb): - self.close() - - def close(self): - if self.closed: - return - self.fh.close() - self.closed = True - if self.autocommit: - self.commit() - - def discard(self): - self.fh.close() - os.remove(self.fn) - - def commit(self): - self.fs.put(self.fn, self.path) - try: - os.remove(self.fn) - except (PermissionError, FileNotFoundError): - # file path may be held by new version of the file on windows - pass - - @property - def name(self): - return self.fn - - def __getattr__(self, item): - return getattr(self.fh, item) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/gallery/shared/utils.ts b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/gallery/shared/utils.ts deleted file mode 100644 index 221e474b582956a46a6413639f918759f71867bc..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/gallery/shared/utils.ts +++ /dev/null @@ -1,18 +0,0 @@ -import { uploadToHuggingFace } from "@gradio/utils"; -import type { FileData } from "@gradio/client"; - -export async function format_gallery_for_sharing( - value: [FileData, string | null][] | null -): Promise { - if (!value) return ""; - let urls = await Promise.all( - value.map(async ([image, _]) => { - if (image === null || !image.url) return ""; - return await uploadToHuggingFace(image.url, "url"); - }) - ); - - return `
      ${urls - .map((url) => ``) - .join("")}
      `; -} diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/statustracker/static/index.ts b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/statustracker/static/index.ts deleted file mode 100644 index 550211d0afefb048f45d16ea18032e1462a6088c..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/statustracker/static/index.ts +++ /dev/null @@ -1,5 +0,0 @@ -export { default as StatusTracker } from "./index.svelte"; -export { default as Toast } from "./Toast.svelte"; -export { default as Loader } from "./Loader.svelte"; -export type * from "./types"; -export { default } from "./index.svelte"; diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/packaging/tags.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/packaging/tags.py deleted file mode 100644 index 37f33b1ef849ed9e22a6dd44395c61654a9b7d7a..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/packaging/tags.py +++ /dev/null @@ -1,553 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import logging -import platform -import struct -import subprocess -import sys -import sysconfig -from importlib.machinery import EXTENSION_SUFFIXES -from typing import ( - Dict, - FrozenSet, - Iterable, - Iterator, - List, - Optional, - Sequence, - Tuple, - Union, - cast, -) - -from . import _manylinux, _musllinux - -logger = logging.getLogger(__name__) - -PythonVersion = Sequence[int] -MacVersion = Tuple[int, int] - -INTERPRETER_SHORT_NAMES: Dict[str, str] = { - "python": "py", # Generic. - "cpython": "cp", - "pypy": "pp", - "ironpython": "ip", - "jython": "jy", -} - - -_32_BIT_INTERPRETER = struct.calcsize("P") == 4 - - -class Tag: - """ - A representation of the tag triple for a wheel. - - Instances are considered immutable and thus are hashable. Equality checking - is also supported. - """ - - __slots__ = ["_interpreter", "_abi", "_platform", "_hash"] - - def __init__(self, interpreter: str, abi: str, platform: str) -> None: - self._interpreter = interpreter.lower() - self._abi = abi.lower() - self._platform = platform.lower() - # The __hash__ of every single element in a Set[Tag] will be evaluated each time - # that a set calls its `.disjoint()` method, which may be called hundreds of - # times when scanning a page of links for packages with tags matching that - # Set[Tag]. Pre-computing the value here produces significant speedups for - # downstream consumers. - self._hash = hash((self._interpreter, self._abi, self._platform)) - - @property - def interpreter(self) -> str: - return self._interpreter - - @property - def abi(self) -> str: - return self._abi - - @property - def platform(self) -> str: - return self._platform - - def __eq__(self, other: object) -> bool: - if not isinstance(other, Tag): - return NotImplemented - - return ( - (self._hash == other._hash) # Short-circuit ASAP for perf reasons. - and (self._platform == other._platform) - and (self._abi == other._abi) - and (self._interpreter == other._interpreter) - ) - - def __hash__(self) -> int: - return self._hash - - def __str__(self) -> str: - return f"{self._interpreter}-{self._abi}-{self._platform}" - - def __repr__(self) -> str: - return f"<{self} @ {id(self)}>" - - -def parse_tag(tag: str) -> FrozenSet[Tag]: - """ - Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances. - - Returning a set is required due to the possibility that the tag is a - compressed tag set. - """ - tags = set() - interpreters, abis, platforms = tag.split("-") - for interpreter in interpreters.split("."): - for abi in abis.split("."): - for platform_ in platforms.split("."): - tags.add(Tag(interpreter, abi, platform_)) - return frozenset(tags) - - -def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]: - value: Union[int, str, None] = sysconfig.get_config_var(name) - if value is None and warn: - logger.debug( - "Config variable '%s' is unset, Python ABI tag may be incorrect", name - ) - return value - - -def _normalize_string(string: str) -> str: - return string.replace(".", "_").replace("-", "_").replace(" ", "_") - - -def _abi3_applies(python_version: PythonVersion) -> bool: - """ - Determine if the Python version supports abi3. - - PEP 384 was first implemented in Python 3.2. - """ - return len(python_version) > 1 and tuple(python_version) >= (3, 2) - - -def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: - py_version = tuple(py_version) # To allow for version comparison. - abis = [] - version = _version_nodot(py_version[:2]) - debug = pymalloc = ucs4 = "" - with_debug = _get_config_var("Py_DEBUG", warn) - has_refcount = hasattr(sys, "gettotalrefcount") - # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled - # extension modules is the best option. - # https://github.com/pypa/pip/issues/3383#issuecomment-173267692 - has_ext = "_d.pyd" in EXTENSION_SUFFIXES - if with_debug or (with_debug is None and (has_refcount or has_ext)): - debug = "d" - if py_version < (3, 8): - with_pymalloc = _get_config_var("WITH_PYMALLOC", warn) - if with_pymalloc or with_pymalloc is None: - pymalloc = "m" - if py_version < (3, 3): - unicode_size = _get_config_var("Py_UNICODE_SIZE", warn) - if unicode_size == 4 or ( - unicode_size is None and sys.maxunicode == 0x10FFFF - ): - ucs4 = "u" - elif debug: - # Debug builds can also load "normal" extension modules. - # We can also assume no UCS-4 or pymalloc requirement. - abis.append(f"cp{version}") - abis.insert( - 0, - "cp{version}{debug}{pymalloc}{ucs4}".format( - version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4 - ), - ) - return abis - - -def cpython_tags( - python_version: Optional[PythonVersion] = None, - abis: Optional[Iterable[str]] = None, - platforms: Optional[Iterable[str]] = None, - *, - warn: bool = False, -) -> Iterator[Tag]: - """ - Yields the tags for a CPython interpreter. - - The tags consist of: - - cp-- - - cp-abi3- - - cp-none- - - cp-abi3- # Older Python versions down to 3.2. - - If python_version only specifies a major version then user-provided ABIs and - the 'none' ABItag will be used. - - If 'abi3' or 'none' are specified in 'abis' then they will be yielded at - their normal position and not at the beginning. - """ - if not python_version: - python_version = sys.version_info[:2] - - interpreter = f"cp{_version_nodot(python_version[:2])}" - - if abis is None: - if len(python_version) > 1: - abis = _cpython_abis(python_version, warn) - else: - abis = [] - abis = list(abis) - # 'abi3' and 'none' are explicitly handled later. - for explicit_abi in ("abi3", "none"): - try: - abis.remove(explicit_abi) - except ValueError: - pass - - platforms = list(platforms or platform_tags()) - for abi in abis: - for platform_ in platforms: - yield Tag(interpreter, abi, platform_) - if _abi3_applies(python_version): - yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms) - yield from (Tag(interpreter, "none", platform_) for platform_ in platforms) - - if _abi3_applies(python_version): - for minor_version in range(python_version[1] - 1, 1, -1): - for platform_ in platforms: - interpreter = "cp{version}".format( - version=_version_nodot((python_version[0], minor_version)) - ) - yield Tag(interpreter, "abi3", platform_) - - -def _generic_abi() -> List[str]: - """ - Return the ABI tag based on EXT_SUFFIX. - """ - # The following are examples of `EXT_SUFFIX`. - # We want to keep the parts which are related to the ABI and remove the - # parts which are related to the platform: - # - linux: '.cpython-310-x86_64-linux-gnu.so' => cp310 - # - mac: '.cpython-310-darwin.so' => cp310 - # - win: '.cp310-win_amd64.pyd' => cp310 - # - win: '.pyd' => cp37 (uses _cpython_abis()) - # - pypy: '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73 - # - graalpy: '.graalpy-38-native-x86_64-darwin.dylib' - # => graalpy_38_native - - ext_suffix = _get_config_var("EXT_SUFFIX", warn=True) - if not isinstance(ext_suffix, str) or ext_suffix[0] != ".": - raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')") - parts = ext_suffix.split(".") - if len(parts) < 3: - # CPython3.7 and earlier uses ".pyd" on Windows. - return _cpython_abis(sys.version_info[:2]) - soabi = parts[1] - if soabi.startswith("cpython"): - # non-windows - abi = "cp" + soabi.split("-")[1] - elif soabi.startswith("cp"): - # windows - abi = soabi.split("-")[0] - elif soabi.startswith("pypy"): - abi = "-".join(soabi.split("-")[:2]) - elif soabi.startswith("graalpy"): - abi = "-".join(soabi.split("-")[:3]) - elif soabi: - # pyston, ironpython, others? - abi = soabi - else: - return [] - return [_normalize_string(abi)] - - -def generic_tags( - interpreter: Optional[str] = None, - abis: Optional[Iterable[str]] = None, - platforms: Optional[Iterable[str]] = None, - *, - warn: bool = False, -) -> Iterator[Tag]: - """ - Yields the tags for a generic interpreter. - - The tags consist of: - - -- - - The "none" ABI will be added if it was not explicitly provided. - """ - if not interpreter: - interp_name = interpreter_name() - interp_version = interpreter_version(warn=warn) - interpreter = "".join([interp_name, interp_version]) - if abis is None: - abis = _generic_abi() - else: - abis = list(abis) - platforms = list(platforms or platform_tags()) - if "none" not in abis: - abis.append("none") - for abi in abis: - for platform_ in platforms: - yield Tag(interpreter, abi, platform_) - - -def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: - """ - Yields Python versions in descending order. - - After the latest version, the major-only version will be yielded, and then - all previous versions of that major version. - """ - if len(py_version) > 1: - yield f"py{_version_nodot(py_version[:2])}" - yield f"py{py_version[0]}" - if len(py_version) > 1: - for minor in range(py_version[1] - 1, -1, -1): - yield f"py{_version_nodot((py_version[0], minor))}" - - -def compatible_tags( - python_version: Optional[PythonVersion] = None, - interpreter: Optional[str] = None, - platforms: Optional[Iterable[str]] = None, -) -> Iterator[Tag]: - """ - Yields the sequence of tags that are compatible with a specific version of Python. - - The tags consist of: - - py*-none- - - -none-any # ... if `interpreter` is provided. - - py*-none-any - """ - if not python_version: - python_version = sys.version_info[:2] - platforms = list(platforms or platform_tags()) - for version in _py_interpreter_range(python_version): - for platform_ in platforms: - yield Tag(version, "none", platform_) - if interpreter: - yield Tag(interpreter, "none", "any") - for version in _py_interpreter_range(python_version): - yield Tag(version, "none", "any") - - -def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str: - if not is_32bit: - return arch - - if arch.startswith("ppc"): - return "ppc" - - return "i386" - - -def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]: - formats = [cpu_arch] - if cpu_arch == "x86_64": - if version < (10, 4): - return [] - formats.extend(["intel", "fat64", "fat32"]) - - elif cpu_arch == "i386": - if version < (10, 4): - return [] - formats.extend(["intel", "fat32", "fat"]) - - elif cpu_arch == "ppc64": - # TODO: Need to care about 32-bit PPC for ppc64 through 10.2? - if version > (10, 5) or version < (10, 4): - return [] - formats.append("fat64") - - elif cpu_arch == "ppc": - if version > (10, 6): - return [] - formats.extend(["fat32", "fat"]) - - if cpu_arch in {"arm64", "x86_64"}: - formats.append("universal2") - - if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}: - formats.append("universal") - - return formats - - -def mac_platforms( - version: Optional[MacVersion] = None, arch: Optional[str] = None -) -> Iterator[str]: - """ - Yields the platform tags for a macOS system. - - The `version` parameter is a two-item tuple specifying the macOS version to - generate platform tags for. The `arch` parameter is the CPU architecture to - generate platform tags for. Both parameters default to the appropriate value - for the current system. - """ - version_str, _, cpu_arch = platform.mac_ver() - if version is None: - version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) - if version == (10, 16): - # When built against an older macOS SDK, Python will report macOS 10.16 - # instead of the real version. - version_str = subprocess.run( - [ - sys.executable, - "-sS", - "-c", - "import platform; print(platform.mac_ver()[0])", - ], - check=True, - env={"SYSTEM_VERSION_COMPAT": "0"}, - stdout=subprocess.PIPE, - text=True, - ).stdout - version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) - else: - version = version - if arch is None: - arch = _mac_arch(cpu_arch) - else: - arch = arch - - if (10, 0) <= version and version < (11, 0): - # Prior to Mac OS 11, each yearly release of Mac OS bumped the - # "minor" version number. The major version was always 10. - for minor_version in range(version[1], -1, -1): - compat_version = 10, minor_version - binary_formats = _mac_binary_formats(compat_version, arch) - for binary_format in binary_formats: - yield "macosx_{major}_{minor}_{binary_format}".format( - major=10, minor=minor_version, binary_format=binary_format - ) - - if version >= (11, 0): - # Starting with Mac OS 11, each yearly release bumps the major version - # number. The minor versions are now the midyear updates. - for major_version in range(version[0], 10, -1): - compat_version = major_version, 0 - binary_formats = _mac_binary_formats(compat_version, arch) - for binary_format in binary_formats: - yield "macosx_{major}_{minor}_{binary_format}".format( - major=major_version, minor=0, binary_format=binary_format - ) - - if version >= (11, 0): - # Mac OS 11 on x86_64 is compatible with binaries from previous releases. - # Arm64 support was introduced in 11.0, so no Arm binaries from previous - # releases exist. - # - # However, the "universal2" binary format can have a - # macOS version earlier than 11.0 when the x86_64 part of the binary supports - # that version of macOS. - if arch == "x86_64": - for minor_version in range(16, 3, -1): - compat_version = 10, minor_version - binary_formats = _mac_binary_formats(compat_version, arch) - for binary_format in binary_formats: - yield "macosx_{major}_{minor}_{binary_format}".format( - major=compat_version[0], - minor=compat_version[1], - binary_format=binary_format, - ) - else: - for minor_version in range(16, 3, -1): - compat_version = 10, minor_version - binary_format = "universal2" - yield "macosx_{major}_{minor}_{binary_format}".format( - major=compat_version[0], - minor=compat_version[1], - binary_format=binary_format, - ) - - -def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: - linux = _normalize_string(sysconfig.get_platform()) - if not linux.startswith("linux_"): - # we should never be here, just yield the sysconfig one and return - yield linux - return - if is_32bit: - if linux == "linux_x86_64": - linux = "linux_i686" - elif linux == "linux_aarch64": - linux = "linux_armv8l" - _, arch = linux.split("_", 1) - archs = {"armv8l": ["armv8l", "armv7l"]}.get(arch, [arch]) - yield from _manylinux.platform_tags(archs) - yield from _musllinux.platform_tags(archs) - for arch in archs: - yield f"linux_{arch}" - - -def _generic_platforms() -> Iterator[str]: - yield _normalize_string(sysconfig.get_platform()) - - -def platform_tags() -> Iterator[str]: - """ - Provides the platform tags for this installation. - """ - if platform.system() == "Darwin": - return mac_platforms() - elif platform.system() == "Linux": - return _linux_platforms() - else: - return _generic_platforms() - - -def interpreter_name() -> str: - """ - Returns the name of the running interpreter. - - Some implementations have a reserved, two-letter abbreviation which will - be returned when appropriate. - """ - name = sys.implementation.name - return INTERPRETER_SHORT_NAMES.get(name) or name - - -def interpreter_version(*, warn: bool = False) -> str: - """ - Returns the version of the running interpreter. - """ - version = _get_config_var("py_version_nodot", warn=warn) - if version: - version = str(version) - else: - version = _version_nodot(sys.version_info[:2]) - return version - - -def _version_nodot(version: PythonVersion) -> str: - return "".join(map(str, version)) - - -def sys_tags(*, warn: bool = False) -> Iterator[Tag]: - """ - Returns the sequence of tag triples for the running interpreter. - - The order of the sequence corresponds to priority order for the - interpreter, from most to least important. - """ - - interp_name = interpreter_name() - if interp_name == "cp": - yield from cpython_tags(warn=warn) - else: - yield from generic_tags() - - if interp_name == "pp": - interp = "pp3" - elif interp_name == "cp": - interp = "cp" + interpreter_version(warn=warn) - else: - interp = None - yield from compatible_tags(interpreter=interp) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/plotting/_matplotlib/tools.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/plotting/_matplotlib/tools.py deleted file mode 100644 index 8c0e401f991a62ec2edd9147127e5837d040b01a..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/plotting/_matplotlib/tools.py +++ /dev/null @@ -1,484 +0,0 @@ -# being a bit too dynamic -from __future__ import annotations - -from math import ceil -from typing import TYPE_CHECKING -import warnings - -from matplotlib import ticker -import matplotlib.table -import numpy as np - -from pandas.util._exceptions import find_stack_level - -from pandas.core.dtypes.common import is_list_like -from pandas.core.dtypes.generic import ( - ABCDataFrame, - ABCIndex, - ABCSeries, -) - -if TYPE_CHECKING: - from collections.abc import ( - Iterable, - Sequence, - ) - - from matplotlib.axes import Axes - from matplotlib.axis import Axis - from matplotlib.figure import Figure - from matplotlib.lines import Line2D - from matplotlib.table import Table - - from pandas import ( - DataFrame, - Series, - ) - - -def do_adjust_figure(fig: Figure) -> bool: - """Whether fig has constrained_layout enabled.""" - if not hasattr(fig, "get_constrained_layout"): - return False - return not fig.get_constrained_layout() - - -def maybe_adjust_figure(fig: Figure, *args, **kwargs) -> None: - """Call fig.subplots_adjust unless fig has constrained_layout enabled.""" - if do_adjust_figure(fig): - fig.subplots_adjust(*args, **kwargs) - - -def format_date_labels(ax: Axes, rot) -> None: - # mini version of autofmt_xdate - for label in ax.get_xticklabels(): - label.set_ha("right") - label.set_rotation(rot) - fig = ax.get_figure() - maybe_adjust_figure(fig, bottom=0.2) - - -def table( - ax, data: DataFrame | Series, rowLabels=None, colLabels=None, **kwargs -) -> Table: - if isinstance(data, ABCSeries): - data = data.to_frame() - elif isinstance(data, ABCDataFrame): - pass - else: - raise ValueError("Input data must be DataFrame or Series") - - if rowLabels is None: - rowLabels = data.index - - if colLabels is None: - colLabels = data.columns - - cellText = data.values - - return matplotlib.table.table( - ax, cellText=cellText, rowLabels=rowLabels, colLabels=colLabels, **kwargs - ) - - -def _get_layout( - nplots: int, - layout: tuple[int, int] | None = None, - layout_type: str = "box", -) -> tuple[int, int]: - if layout is not None: - if not isinstance(layout, (tuple, list)) or len(layout) != 2: - raise ValueError("Layout must be a tuple of (rows, columns)") - - nrows, ncols = layout - - if nrows == -1 and ncols > 0: - layout = nrows, ncols = (ceil(nplots / ncols), ncols) - elif ncols == -1 and nrows > 0: - layout = nrows, ncols = (nrows, ceil(nplots / nrows)) - elif ncols <= 0 and nrows <= 0: - msg = "At least one dimension of layout must be positive" - raise ValueError(msg) - - if nrows * ncols < nplots: - raise ValueError( - f"Layout of {nrows}x{ncols} must be larger than required size {nplots}" - ) - - return layout - - if layout_type == "single": - return (1, 1) - elif layout_type == "horizontal": - return (1, nplots) - elif layout_type == "vertical": - return (nplots, 1) - - layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)} - try: - return layouts[nplots] - except KeyError: - k = 1 - while k**2 < nplots: - k += 1 - - if (k - 1) * k >= nplots: - return k, (k - 1) - else: - return k, k - - -# copied from matplotlib/pyplot.py and modified for pandas.plotting - - -def create_subplots( - naxes: int, - sharex: bool = False, - sharey: bool = False, - squeeze: bool = True, - subplot_kw=None, - ax=None, - layout=None, - layout_type: str = "box", - **fig_kw, -): - """ - Create a figure with a set of subplots already made. - - This utility wrapper makes it convenient to create common layouts of - subplots, including the enclosing figure object, in a single call. - - Parameters - ---------- - naxes : int - Number of required axes. Exceeded axes are set invisible. Default is - nrows * ncols. - - sharex : bool - If True, the X axis will be shared amongst all subplots. - - sharey : bool - If True, the Y axis will be shared amongst all subplots. - - squeeze : bool - - If True, extra dimensions are squeezed out from the returned axis object: - - if only one subplot is constructed (nrows=ncols=1), the resulting - single Axis object is returned as a scalar. - - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object - array of Axis objects are returned as numpy 1-d arrays. - - for NxM subplots with N>1 and M>1 are returned as a 2d array. - - If False, no squeezing is done: the returned axis object is always - a 2-d array containing Axis instances, even if it ends up being 1x1. - - subplot_kw : dict - Dict with keywords passed to the add_subplot() call used to create each - subplots. - - ax : Matplotlib axis object, optional - - layout : tuple - Number of rows and columns of the subplot grid. - If not specified, calculated from naxes and layout_type - - layout_type : {'box', 'horizontal', 'vertical'}, default 'box' - Specify how to layout the subplot grid. - - fig_kw : Other keyword arguments to be passed to the figure() call. - Note that all keywords not recognized above will be - automatically included here. - - Returns - ------- - fig, ax : tuple - - fig is the Matplotlib Figure object - - ax can be either a single axis object or an array of axis objects if - more than one subplot was created. The dimensions of the resulting array - can be controlled with the squeeze keyword, see above. - - Examples - -------- - x = np.linspace(0, 2*np.pi, 400) - y = np.sin(x**2) - - # Just a figure and one subplot - f, ax = plt.subplots() - ax.plot(x, y) - ax.set_title('Simple plot') - - # Two subplots, unpack the output array immediately - f, (ax1, ax2) = plt.subplots(1, 2, sharey=True) - ax1.plot(x, y) - ax1.set_title('Sharing Y axis') - ax2.scatter(x, y) - - # Four polar axes - plt.subplots(2, 2, subplot_kw=dict(polar=True)) - """ - import matplotlib.pyplot as plt - - if subplot_kw is None: - subplot_kw = {} - - if ax is None: - fig = plt.figure(**fig_kw) - else: - if is_list_like(ax): - if squeeze: - ax = flatten_axes(ax) - if layout is not None: - warnings.warn( - "When passing multiple axes, layout keyword is ignored.", - UserWarning, - stacklevel=find_stack_level(), - ) - if sharex or sharey: - warnings.warn( - "When passing multiple axes, sharex and sharey " - "are ignored. These settings must be specified when creating axes.", - UserWarning, - stacklevel=find_stack_level(), - ) - if ax.size == naxes: - fig = ax.flat[0].get_figure() - return fig, ax - else: - raise ValueError( - f"The number of passed axes must be {naxes}, the " - "same as the output plot" - ) - - fig = ax.get_figure() - # if ax is passed and a number of subplots is 1, return ax as it is - if naxes == 1: - if squeeze: - return fig, ax - else: - return fig, flatten_axes(ax) - else: - warnings.warn( - "To output multiple subplots, the figure containing " - "the passed axes is being cleared.", - UserWarning, - stacklevel=find_stack_level(), - ) - fig.clear() - - nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type) - nplots = nrows * ncols - - # Create empty object array to hold all axes. It's easiest to make it 1-d - # so we can just append subplots upon creation, and then - axarr = np.empty(nplots, dtype=object) - - # Create first subplot separately, so we can share it if requested - ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw) - - if sharex: - subplot_kw["sharex"] = ax0 - if sharey: - subplot_kw["sharey"] = ax0 - axarr[0] = ax0 - - # Note off-by-one counting because add_subplot uses the MATLAB 1-based - # convention. - for i in range(1, nplots): - kwds = subplot_kw.copy() - # Set sharex and sharey to None for blank/dummy axes, these can - # interfere with proper axis limits on the visible axes if - # they share axes e.g. issue #7528 - if i >= naxes: - kwds["sharex"] = None - kwds["sharey"] = None - ax = fig.add_subplot(nrows, ncols, i + 1, **kwds) - axarr[i] = ax - - if naxes != nplots: - for ax in axarr[naxes:]: - ax.set_visible(False) - - handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey) - - if squeeze: - # Reshape the array to have the final desired dimension (nrow,ncol), - # though discarding unneeded dimensions that equal 1. If we only have - # one subplot, just return it instead of a 1-element array. - if nplots == 1: - axes = axarr[0] - else: - axes = axarr.reshape(nrows, ncols).squeeze() - else: - # returned axis array will be always 2-d, even if nrows=ncols=1 - axes = axarr.reshape(nrows, ncols) - - return fig, axes - - -def _remove_labels_from_axis(axis: Axis) -> None: - for t in axis.get_majorticklabels(): - t.set_visible(False) - - # set_visible will not be effective if - # minor axis has NullLocator and NullFormatter (default) - if isinstance(axis.get_minor_locator(), ticker.NullLocator): - axis.set_minor_locator(ticker.AutoLocator()) - if isinstance(axis.get_minor_formatter(), ticker.NullFormatter): - axis.set_minor_formatter(ticker.FormatStrFormatter("")) - for t in axis.get_minorticklabels(): - t.set_visible(False) - - axis.get_label().set_visible(False) - - -def _has_externally_shared_axis(ax1: Axes, compare_axis: str) -> bool: - """ - Return whether an axis is externally shared. - - Parameters - ---------- - ax1 : matplotlib.axes.Axes - Axis to query. - compare_axis : str - `"x"` or `"y"` according to whether the X-axis or Y-axis is being - compared. - - Returns - ------- - bool - `True` if the axis is externally shared. Otherwise `False`. - - Notes - ----- - If two axes with different positions are sharing an axis, they can be - referred to as *externally* sharing the common axis. - - If two axes sharing an axis also have the same position, they can be - referred to as *internally* sharing the common axis (a.k.a twinning). - - _handle_shared_axes() is only interested in axes externally sharing an - axis, regardless of whether either of the axes is also internally sharing - with a third axis. - """ - if compare_axis == "x": - axes = ax1.get_shared_x_axes() - elif compare_axis == "y": - axes = ax1.get_shared_y_axes() - else: - raise ValueError( - "_has_externally_shared_axis() needs 'x' or 'y' as a second parameter" - ) - - axes = axes.get_siblings(ax1) - - # Retain ax1 and any of its siblings which aren't in the same position as it - ax1_points = ax1.get_position().get_points() - - for ax2 in axes: - if not np.array_equal(ax1_points, ax2.get_position().get_points()): - return True - - return False - - -def handle_shared_axes( - axarr: Iterable[Axes], - nplots: int, - naxes: int, - nrows: int, - ncols: int, - sharex: bool, - sharey: bool, -) -> None: - if nplots > 1: - row_num = lambda x: x.get_subplotspec().rowspan.start - col_num = lambda x: x.get_subplotspec().colspan.start - - is_first_col = lambda x: x.get_subplotspec().is_first_col() - - if nrows > 1: - try: - # first find out the ax layout, - # so that we can correctly handle 'gaps" - layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool_) - for ax in axarr: - layout[row_num(ax), col_num(ax)] = ax.get_visible() - - for ax in axarr: - # only the last row of subplots should get x labels -> all - # other off layout handles the case that the subplot is - # the last in the column, because below is no subplot/gap. - if not layout[row_num(ax) + 1, col_num(ax)]: - continue - if sharex or _has_externally_shared_axis(ax, "x"): - _remove_labels_from_axis(ax.xaxis) - - except IndexError: - # if gridspec is used, ax.rowNum and ax.colNum may different - # from layout shape. in this case, use last_row logic - is_last_row = lambda x: x.get_subplotspec().is_last_row() - for ax in axarr: - if is_last_row(ax): - continue - if sharex or _has_externally_shared_axis(ax, "x"): - _remove_labels_from_axis(ax.xaxis) - - if ncols > 1: - for ax in axarr: - # only the first column should get y labels -> set all other to - # off as we only have labels in the first column and we always - # have a subplot there, we can skip the layout test - if is_first_col(ax): - continue - if sharey or _has_externally_shared_axis(ax, "y"): - _remove_labels_from_axis(ax.yaxis) - - -def flatten_axes(axes: Axes | Sequence[Axes]) -> np.ndarray: - if not is_list_like(axes): - return np.array([axes]) - elif isinstance(axes, (np.ndarray, ABCIndex)): - return np.asarray(axes).ravel() - return np.array(axes) - - -def set_ticks_props( - axes: Axes | Sequence[Axes], - xlabelsize: int | None = None, - xrot=None, - ylabelsize: int | None = None, - yrot=None, -): - import matplotlib.pyplot as plt - - for ax in flatten_axes(axes): - if xlabelsize is not None: - plt.setp(ax.get_xticklabels(), fontsize=xlabelsize) - if xrot is not None: - plt.setp(ax.get_xticklabels(), rotation=xrot) - if ylabelsize is not None: - plt.setp(ax.get_yticklabels(), fontsize=ylabelsize) - if yrot is not None: - plt.setp(ax.get_yticklabels(), rotation=yrot) - return axes - - -def get_all_lines(ax: Axes) -> list[Line2D]: - lines = ax.get_lines() - - if hasattr(ax, "right_ax"): - lines += ax.right_ax.get_lines() - - if hasattr(ax, "left_ax"): - lines += ax.left_ax.get_lines() - - return lines - - -def get_xlim(lines: Iterable[Line2D]) -> tuple[float, float]: - left, right = np.inf, -np.inf - for line in lines: - x = line.get_xdata(orig=False) - left = min(np.nanmin(x), left) - right = max(np.nanmax(x), right) - return left, right diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/test_ufunc.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/test_ufunc.py deleted file mode 100644 index 305c0f8bba8ce210811d488f669a4953370d094b..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/test_ufunc.py +++ /dev/null @@ -1,311 +0,0 @@ -from functools import partial -import re - -import numpy as np -import pytest - -import pandas as pd -import pandas._testing as tm -from pandas.api.types import is_extension_array_dtype - -dtypes = [ - "int64", - "Int64", - {"A": "int64", "B": "Int64"}, -] - - -@pytest.mark.parametrize("dtype", dtypes) -def test_unary_unary(dtype): - # unary input, unary output - values = np.array([[-1, -1], [1, 1]], dtype="int64") - df = pd.DataFrame(values, columns=["A", "B"], index=["a", "b"]).astype(dtype=dtype) - result = np.positive(df) - expected = pd.DataFrame( - np.positive(values), index=df.index, columns=df.columns - ).astype(dtype) - tm.assert_frame_equal(result, expected) - - -@pytest.mark.parametrize("dtype", dtypes) -def test_unary_binary(request, dtype): - # unary input, binary output - if is_extension_array_dtype(dtype) or isinstance(dtype, dict): - request.node.add_marker( - pytest.mark.xfail( - reason="Extension / mixed with multiple outputs not implemented." - ) - ) - - values = np.array([[-1, -1], [1, 1]], dtype="int64") - df = pd.DataFrame(values, columns=["A", "B"], index=["a", "b"]).astype(dtype=dtype) - result_pandas = np.modf(df) - assert isinstance(result_pandas, tuple) - assert len(result_pandas) == 2 - expected_numpy = np.modf(values) - - for result, b in zip(result_pandas, expected_numpy): - expected = pd.DataFrame(b, index=df.index, columns=df.columns) - tm.assert_frame_equal(result, expected) - - -@pytest.mark.parametrize("dtype", dtypes) -def test_binary_input_dispatch_binop(dtype): - # binop ufuncs are dispatched to our dunder methods. - values = np.array([[-1, -1], [1, 1]], dtype="int64") - df = pd.DataFrame(values, columns=["A", "B"], index=["a", "b"]).astype(dtype=dtype) - result = np.add(df, df) - expected = pd.DataFrame( - np.add(values, values), index=df.index, columns=df.columns - ).astype(dtype) - tm.assert_frame_equal(result, expected) - - -@pytest.mark.parametrize( - "func,arg,expected", - [ - (np.add, 1, [2, 3, 4, 5]), - ( - partial(np.add, where=[[False, True], [True, False]]), - np.array([[1, 1], [1, 1]]), - [0, 3, 4, 0], - ), - (np.power, np.array([[1, 1], [2, 2]]), [1, 2, 9, 16]), - (np.subtract, 2, [-1, 0, 1, 2]), - ( - partial(np.negative, where=np.array([[False, True], [True, False]])), - None, - [0, -2, -3, 0], - ), - ], -) -def test_ufunc_passes_args(func, arg, expected): - # GH#40662 - arr = np.array([[1, 2], [3, 4]]) - df = pd.DataFrame(arr) - result_inplace = np.zeros_like(arr) - # 1-argument ufunc - if arg is None: - result = func(df, out=result_inplace) - else: - result = func(df, arg, out=result_inplace) - - expected = np.array(expected).reshape(2, 2) - tm.assert_numpy_array_equal(result_inplace, expected) - - expected = pd.DataFrame(expected) - tm.assert_frame_equal(result, expected) - - -@pytest.mark.parametrize("dtype_a", dtypes) -@pytest.mark.parametrize("dtype_b", dtypes) -def test_binary_input_aligns_columns(request, dtype_a, dtype_b): - if ( - is_extension_array_dtype(dtype_a) - or isinstance(dtype_a, dict) - or is_extension_array_dtype(dtype_b) - or isinstance(dtype_b, dict) - ): - request.node.add_marker( - pytest.mark.xfail( - reason="Extension / mixed with multiple inputs not implemented." - ) - ) - - df1 = pd.DataFrame({"A": [1, 2], "B": [3, 4]}).astype(dtype_a) - - if isinstance(dtype_a, dict) and isinstance(dtype_b, dict): - dtype_b = dtype_b.copy() - dtype_b["C"] = dtype_b.pop("B") - df2 = pd.DataFrame({"A": [1, 2], "C": [3, 4]}).astype(dtype_b) - # As of 2.0, align first before applying the ufunc - result = np.heaviside(df1, df2) - expected = np.heaviside( - np.array([[1, 3, np.nan], [2, 4, np.nan]]), - np.array([[1, np.nan, 3], [2, np.nan, 4]]), - ) - expected = pd.DataFrame(expected, index=[0, 1], columns=["A", "B", "C"]) - tm.assert_frame_equal(result, expected) - - result = np.heaviside(df1, df2.values) - expected = pd.DataFrame([[1.0, 1.0], [1.0, 1.0]], columns=["A", "B"]) - tm.assert_frame_equal(result, expected) - - -@pytest.mark.parametrize("dtype", dtypes) -def test_binary_input_aligns_index(request, dtype): - if is_extension_array_dtype(dtype) or isinstance(dtype, dict): - request.node.add_marker( - pytest.mark.xfail( - reason="Extension / mixed with multiple inputs not implemented." - ) - ) - df1 = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).astype(dtype) - df2 = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "c"]).astype(dtype) - result = np.heaviside(df1, df2) - expected = np.heaviside( - np.array([[1, 3], [3, 4], [np.nan, np.nan]]), - np.array([[1, 3], [np.nan, np.nan], [3, 4]]), - ) - # TODO(FloatArray): this will be Float64Dtype. - expected = pd.DataFrame(expected, index=["a", "b", "c"], columns=["A", "B"]) - tm.assert_frame_equal(result, expected) - - result = np.heaviside(df1, df2.values) - expected = pd.DataFrame( - [[1.0, 1.0], [1.0, 1.0]], columns=["A", "B"], index=["a", "b"] - ) - tm.assert_frame_equal(result, expected) - - -def test_binary_frame_series_raises(): - # We don't currently implement - df = pd.DataFrame({"A": [1, 2]}) - with pytest.raises(NotImplementedError, match="logaddexp"): - np.logaddexp(df, df["A"]) - - with pytest.raises(NotImplementedError, match="logaddexp"): - np.logaddexp(df["A"], df) - - -def test_unary_accumulate_axis(): - # https://github.com/pandas-dev/pandas/issues/39259 - df = pd.DataFrame({"a": [1, 3, 2, 4]}) - result = np.maximum.accumulate(df) - expected = pd.DataFrame({"a": [1, 3, 3, 4]}) - tm.assert_frame_equal(result, expected) - - df = pd.DataFrame({"a": [1, 3, 2, 4], "b": [0.1, 4.0, 3.0, 2.0]}) - result = np.maximum.accumulate(df) - # in theory could preserve int dtype for default axis=0 - expected = pd.DataFrame({"a": [1.0, 3.0, 3.0, 4.0], "b": [0.1, 4.0, 4.0, 4.0]}) - tm.assert_frame_equal(result, expected) - - result = np.maximum.accumulate(df, axis=0) - tm.assert_frame_equal(result, expected) - - result = np.maximum.accumulate(df, axis=1) - expected = pd.DataFrame({"a": [1.0, 3.0, 2.0, 4.0], "b": [1.0, 4.0, 3.0, 4.0]}) - tm.assert_frame_equal(result, expected) - - -def test_frame_outer_disallowed(): - df = pd.DataFrame({"A": [1, 2]}) - with pytest.raises(NotImplementedError, match=""): - # deprecation enforced in 2.0 - np.subtract.outer(df, df) - - -def test_alignment_deprecation_enforced(): - # Enforced in 2.0 - # https://github.com/pandas-dev/pandas/issues/39184 - df1 = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) - df2 = pd.DataFrame({"b": [1, 2, 3], "c": [4, 5, 6]}) - s1 = pd.Series([1, 2], index=["a", "b"]) - s2 = pd.Series([1, 2], index=["b", "c"]) - - # binary dataframe / dataframe - expected = pd.DataFrame({"a": [2, 4, 6], "b": [8, 10, 12]}) - - with tm.assert_produces_warning(None): - # aligned -> no warning! - result = np.add(df1, df1) - tm.assert_frame_equal(result, expected) - - result = np.add(df1, df2.values) - tm.assert_frame_equal(result, expected) - - result = np.add(df1, df2) - expected = pd.DataFrame({"a": [np.nan] * 3, "b": [5, 7, 9], "c": [np.nan] * 3}) - tm.assert_frame_equal(result, expected) - - result = np.add(df1.values, df2) - expected = pd.DataFrame({"b": [2, 4, 6], "c": [8, 10, 12]}) - tm.assert_frame_equal(result, expected) - - # binary dataframe / series - expected = pd.DataFrame({"a": [2, 3, 4], "b": [6, 7, 8]}) - - with tm.assert_produces_warning(None): - # aligned -> no warning! - result = np.add(df1, s1) - tm.assert_frame_equal(result, expected) - - result = np.add(df1, s2.values) - tm.assert_frame_equal(result, expected) - - expected = pd.DataFrame( - {"a": [np.nan] * 3, "b": [5.0, 6.0, 7.0], "c": [np.nan] * 3} - ) - result = np.add(df1, s2) - tm.assert_frame_equal(result, expected) - - msg = "Cannot apply ufunc to mixed DataFrame and Series inputs." - with pytest.raises(NotImplementedError, match=msg): - np.add(s2, df1) - - -def test_alignment_deprecation_many_inputs_enforced(): - # Enforced in 2.0 - # https://github.com/pandas-dev/pandas/issues/39184 - # test that the deprecation also works with > 2 inputs -> using a numba - # written ufunc for this because numpy itself doesn't have such ufuncs - numba = pytest.importorskip("numba") - - @numba.vectorize([numba.float64(numba.float64, numba.float64, numba.float64)]) - def my_ufunc(x, y, z): - return x + y + z - - df1 = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) - df2 = pd.DataFrame({"b": [1, 2, 3], "c": [4, 5, 6]}) - df3 = pd.DataFrame({"a": [1, 2, 3], "c": [4, 5, 6]}) - - result = my_ufunc(df1, df2, df3) - expected = pd.DataFrame(np.full((3, 3), np.nan), columns=["a", "b", "c"]) - tm.assert_frame_equal(result, expected) - - # all aligned -> no warning - with tm.assert_produces_warning(None): - result = my_ufunc(df1, df1, df1) - expected = pd.DataFrame([[3.0, 12.0], [6.0, 15.0], [9.0, 18.0]], columns=["a", "b"]) - tm.assert_frame_equal(result, expected) - - # mixed frame / arrays - msg = ( - r"operands could not be broadcast together with shapes \(3,3\) \(3,3\) \(3,2\)" - ) - with pytest.raises(ValueError, match=msg): - my_ufunc(df1, df2, df3.values) - - # single frame -> no warning - with tm.assert_produces_warning(None): - result = my_ufunc(df1, df2.values, df3.values) - tm.assert_frame_equal(result, expected) - - # takes indices of first frame - msg = ( - r"operands could not be broadcast together with shapes \(3,2\) \(3,3\) \(3,3\)" - ) - with pytest.raises(ValueError, match=msg): - my_ufunc(df1.values, df2, df3) - - -def test_array_ufuncs_for_many_arguments(): - # GH39853 - def add3(x, y, z): - return x + y + z - - ufunc = np.frompyfunc(add3, 3, 1) - df = pd.DataFrame([[1, 2], [3, 4]]) - - result = ufunc(df, df, 1) - expected = pd.DataFrame([[3, 5], [7, 9]], dtype=object) - tm.assert_frame_equal(result, expected) - - ser = pd.Series([1, 2]) - msg = ( - "Cannot apply ufunc " - "to mixed DataFrame and Series inputs." - ) - with pytest.raises(NotImplementedError, match=re.escape(msg)): - ufunc(df, df, ser) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/excel/test_openpyxl.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/excel/test_openpyxl.py deleted file mode 100644 index b8d41164792e082e59d58023cb095db1f3051ffc..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/excel/test_openpyxl.py +++ /dev/null @@ -1,398 +0,0 @@ -import contextlib -from pathlib import Path -import re - -import numpy as np -import pytest - -import pandas as pd -from pandas import DataFrame -import pandas._testing as tm - -from pandas.io.excel import ( - ExcelWriter, - _OpenpyxlWriter, -) - -openpyxl = pytest.importorskip("openpyxl") - -pytestmark = pytest.mark.parametrize("ext", [".xlsx"]) - - -def test_to_excel_styleconverter(ext): - from openpyxl import styles - - hstyle = { - "font": {"color": "00FF0000", "bold": True}, - "borders": {"top": "thin", "right": "thin", "bottom": "thin", "left": "thin"}, - "alignment": {"horizontal": "center", "vertical": "top"}, - "fill": {"patternType": "solid", "fgColor": {"rgb": "006666FF", "tint": 0.3}}, - "number_format": {"format_code": "0.00"}, - "protection": {"locked": True, "hidden": False}, - } - - font_color = styles.Color("00FF0000") - font = styles.Font(bold=True, color=font_color) - side = styles.Side(style=styles.borders.BORDER_THIN) - border = styles.Border(top=side, right=side, bottom=side, left=side) - alignment = styles.Alignment(horizontal="center", vertical="top") - fill_color = styles.Color(rgb="006666FF", tint=0.3) - fill = styles.PatternFill(patternType="solid", fgColor=fill_color) - - number_format = "0.00" - - protection = styles.Protection(locked=True, hidden=False) - - kw = _OpenpyxlWriter._convert_to_style_kwargs(hstyle) - assert kw["font"] == font - assert kw["border"] == border - assert kw["alignment"] == alignment - assert kw["fill"] == fill - assert kw["number_format"] == number_format - assert kw["protection"] == protection - - -def test_write_cells_merge_styled(ext): - from pandas.io.formats.excel import ExcelCell - - sheet_name = "merge_styled" - - sty_b1 = {"font": {"color": "00FF0000"}} - sty_a2 = {"font": {"color": "0000FF00"}} - - initial_cells = [ - ExcelCell(col=1, row=0, val=42, style=sty_b1), - ExcelCell(col=0, row=1, val=99, style=sty_a2), - ] - - sty_merged = {"font": {"color": "000000FF", "bold": True}} - sty_kwargs = _OpenpyxlWriter._convert_to_style_kwargs(sty_merged) - openpyxl_sty_merged = sty_kwargs["font"] - merge_cells = [ - ExcelCell( - col=0, row=0, val="pandas", mergestart=1, mergeend=1, style=sty_merged - ) - ] - - with tm.ensure_clean(ext) as path: - with _OpenpyxlWriter(path) as writer: - writer._write_cells(initial_cells, sheet_name=sheet_name) - writer._write_cells(merge_cells, sheet_name=sheet_name) - - wks = writer.sheets[sheet_name] - xcell_b1 = wks["B1"] - xcell_a2 = wks["A2"] - assert xcell_b1.font == openpyxl_sty_merged - assert xcell_a2.font == openpyxl_sty_merged - - -@pytest.mark.parametrize("iso_dates", [True, False]) -def test_engine_kwargs_write(ext, iso_dates): - # GH 42286 GH 43445 - engine_kwargs = {"iso_dates": iso_dates} - with tm.ensure_clean(ext) as f: - with ExcelWriter(f, engine="openpyxl", engine_kwargs=engine_kwargs) as writer: - assert writer.book.iso_dates == iso_dates - # ExcelWriter won't allow us to close without writing something - DataFrame().to_excel(writer) - - -def test_engine_kwargs_append_invalid(ext): - # GH 43445 - # test whether an invalid engine kwargs actually raises - with tm.ensure_clean(ext) as f: - DataFrame(["hello", "world"]).to_excel(f) - with pytest.raises( - TypeError, - match=re.escape( - "load_workbook() got an unexpected keyword argument 'apple_banana'" - ), - ): - with ExcelWriter( - f, engine="openpyxl", mode="a", engine_kwargs={"apple_banana": "fruit"} - ) as writer: - # ExcelWriter needs us to write something to close properly - DataFrame(["good"]).to_excel(writer, sheet_name="Sheet2") - - -@pytest.mark.parametrize("data_only, expected", [(True, 0), (False, "=1+1")]) -def test_engine_kwargs_append_data_only(ext, data_only, expected): - # GH 43445 - # tests whether the data_only engine_kwarg actually works well for - # openpyxl's load_workbook - with tm.ensure_clean(ext) as f: - DataFrame(["=1+1"]).to_excel(f) - with ExcelWriter( - f, engine="openpyxl", mode="a", engine_kwargs={"data_only": data_only} - ) as writer: - assert writer.sheets["Sheet1"]["B2"].value == expected - # ExcelWriter needs us to writer something to close properly? - DataFrame().to_excel(writer, sheet_name="Sheet2") - - -@pytest.mark.parametrize( - "mode,expected", [("w", ["baz"]), ("a", ["foo", "bar", "baz"])] -) -def test_write_append_mode(ext, mode, expected): - df = DataFrame([1], columns=["baz"]) - - with tm.ensure_clean(ext) as f: - wb = openpyxl.Workbook() - wb.worksheets[0].title = "foo" - wb.worksheets[0]["A1"].value = "foo" - wb.create_sheet("bar") - wb.worksheets[1]["A1"].value = "bar" - wb.save(f) - - with ExcelWriter(f, engine="openpyxl", mode=mode) as writer: - df.to_excel(writer, sheet_name="baz", index=False) - - with contextlib.closing(openpyxl.load_workbook(f)) as wb2: - result = [sheet.title for sheet in wb2.worksheets] - assert result == expected - - for index, cell_value in enumerate(expected): - assert wb2.worksheets[index]["A1"].value == cell_value - - -@pytest.mark.parametrize( - "if_sheet_exists,num_sheets,expected", - [ - ("new", 2, ["apple", "banana"]), - ("replace", 1, ["pear"]), - ("overlay", 1, ["pear", "banana"]), - ], -) -def test_if_sheet_exists_append_modes(ext, if_sheet_exists, num_sheets, expected): - # GH 40230 - df1 = DataFrame({"fruit": ["apple", "banana"]}) - df2 = DataFrame({"fruit": ["pear"]}) - - with tm.ensure_clean(ext) as f: - df1.to_excel(f, engine="openpyxl", sheet_name="foo", index=False) - with ExcelWriter( - f, engine="openpyxl", mode="a", if_sheet_exists=if_sheet_exists - ) as writer: - df2.to_excel(writer, sheet_name="foo", index=False) - - with contextlib.closing(openpyxl.load_workbook(f)) as wb: - assert len(wb.sheetnames) == num_sheets - assert wb.sheetnames[0] == "foo" - result = pd.read_excel(wb, "foo", engine="openpyxl") - assert list(result["fruit"]) == expected - if len(wb.sheetnames) == 2: - result = pd.read_excel(wb, wb.sheetnames[1], engine="openpyxl") - tm.assert_frame_equal(result, df2) - - -@pytest.mark.parametrize( - "startrow, startcol, greeting, goodbye", - [ - (0, 0, ["poop", "world"], ["goodbye", "people"]), - (0, 1, ["hello", "world"], ["poop", "people"]), - (1, 0, ["hello", "poop"], ["goodbye", "people"]), - (1, 1, ["hello", "world"], ["goodbye", "poop"]), - ], -) -def test_append_overlay_startrow_startcol(ext, startrow, startcol, greeting, goodbye): - df1 = DataFrame({"greeting": ["hello", "world"], "goodbye": ["goodbye", "people"]}) - df2 = DataFrame(["poop"]) - - with tm.ensure_clean(ext) as f: - df1.to_excel(f, engine="openpyxl", sheet_name="poo", index=False) - with ExcelWriter( - f, engine="openpyxl", mode="a", if_sheet_exists="overlay" - ) as writer: - # use startrow+1 because we don't have a header - df2.to_excel( - writer, - index=False, - header=False, - startrow=startrow + 1, - startcol=startcol, - sheet_name="poo", - ) - - result = pd.read_excel(f, sheet_name="poo", engine="openpyxl") - expected = DataFrame({"greeting": greeting, "goodbye": goodbye}) - tm.assert_frame_equal(result, expected) - - -@pytest.mark.parametrize( - "if_sheet_exists,msg", - [ - ( - "invalid", - "'invalid' is not valid for if_sheet_exists. Valid options " - "are 'error', 'new', 'replace' and 'overlay'.", - ), - ( - "error", - "Sheet 'foo' already exists and if_sheet_exists is set to 'error'.", - ), - ( - None, - "Sheet 'foo' already exists and if_sheet_exists is set to 'error'.", - ), - ], -) -def test_if_sheet_exists_raises(ext, if_sheet_exists, msg): - # GH 40230 - df = DataFrame({"fruit": ["pear"]}) - with tm.ensure_clean(ext) as f: - with pytest.raises(ValueError, match=re.escape(msg)): - df.to_excel(f, "foo", engine="openpyxl") - with ExcelWriter( - f, engine="openpyxl", mode="a", if_sheet_exists=if_sheet_exists - ) as writer: - df.to_excel(writer, sheet_name="foo") - - -def test_to_excel_with_openpyxl_engine(ext): - # GH 29854 - with tm.ensure_clean(ext) as filename: - df1 = DataFrame({"A": np.linspace(1, 10, 10)}) - df2 = DataFrame({"B": np.linspace(1, 20, 10)}) - df = pd.concat([df1, df2], axis=1) - styled = df.style.map( - lambda val: f"color: {'red' if val < 0 else 'black'}" - ).highlight_max() - - styled.to_excel(filename, engine="openpyxl") - - -@pytest.mark.parametrize("read_only", [True, False]) -def test_read_workbook(datapath, ext, read_only): - # GH 39528 - filename = datapath("io", "data", "excel", "test1" + ext) - with contextlib.closing( - openpyxl.load_workbook(filename, read_only=read_only) - ) as wb: - result = pd.read_excel(wb, engine="openpyxl") - expected = pd.read_excel(filename) - tm.assert_frame_equal(result, expected) - - -@pytest.mark.parametrize( - "header, expected_data", - [ - ( - 0, - { - "Title": [np.nan, "A", 1, 2, 3], - "Unnamed: 1": [np.nan, "B", 4, 5, 6], - "Unnamed: 2": [np.nan, "C", 7, 8, 9], - }, - ), - (2, {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}), - ], -) -@pytest.mark.parametrize( - "filename", ["dimension_missing", "dimension_small", "dimension_large"] -) -# When read_only is None, use read_excel instead of a workbook -@pytest.mark.parametrize("read_only", [True, False, None]) -def test_read_with_bad_dimension( - datapath, ext, header, expected_data, filename, read_only -): - # GH 38956, 39001 - no/incorrect dimension information - path = datapath("io", "data", "excel", f"{filename}{ext}") - if read_only is None: - result = pd.read_excel(path, header=header) - else: - with contextlib.closing( - openpyxl.load_workbook(path, read_only=read_only) - ) as wb: - result = pd.read_excel(wb, engine="openpyxl", header=header) - expected = DataFrame(expected_data) - tm.assert_frame_equal(result, expected) - - -def test_append_mode_file(ext): - # GH 39576 - df = DataFrame() - - with tm.ensure_clean(ext) as f: - df.to_excel(f, engine="openpyxl") - - with ExcelWriter( - f, mode="a", engine="openpyxl", if_sheet_exists="new" - ) as writer: - df.to_excel(writer) - - # make sure that zip files are not concatenated by making sure that - # "docProps/app.xml" only occurs twice in the file - data = Path(f).read_bytes() - first = data.find(b"docProps/app.xml") - second = data.find(b"docProps/app.xml", first + 1) - third = data.find(b"docProps/app.xml", second + 1) - assert second != -1 and third == -1 - - -# When read_only is None, use read_excel instead of a workbook -@pytest.mark.parametrize("read_only", [True, False, None]) -def test_read_with_empty_trailing_rows(datapath, ext, read_only): - # GH 39181 - path = datapath("io", "data", "excel", f"empty_trailing_rows{ext}") - if read_only is None: - result = pd.read_excel(path) - else: - with contextlib.closing( - openpyxl.load_workbook(path, read_only=read_only) - ) as wb: - result = pd.read_excel(wb, engine="openpyxl") - expected = DataFrame( - { - "Title": [np.nan, "A", 1, 2, 3], - "Unnamed: 1": [np.nan, "B", 4, 5, 6], - "Unnamed: 2": [np.nan, "C", 7, 8, 9], - } - ) - tm.assert_frame_equal(result, expected) - - -# When read_only is None, use read_excel instead of a workbook -@pytest.mark.parametrize("read_only", [True, False, None]) -def test_read_empty_with_blank_row(datapath, ext, read_only): - # GH 39547 - empty excel file with a row that has no data - path = datapath("io", "data", "excel", f"empty_with_blank_row{ext}") - if read_only is None: - result = pd.read_excel(path) - else: - with contextlib.closing( - openpyxl.load_workbook(path, read_only=read_only) - ) as wb: - result = pd.read_excel(wb, engine="openpyxl") - expected = DataFrame() - tm.assert_frame_equal(result, expected) - - -def test_book_and_sheets_consistent(ext): - # GH#45687 - Ensure sheets is updated if user modifies book - with tm.ensure_clean(ext) as f: - with ExcelWriter(f, engine="openpyxl") as writer: - assert writer.sheets == {} - sheet = writer.book.create_sheet("test_name", 0) - assert writer.sheets == {"test_name": sheet} - - -def test_ints_spelled_with_decimals(datapath, ext): - # GH 46988 - openpyxl returns this sheet with floats - path = datapath("io", "data", "excel", f"ints_spelled_with_decimals{ext}") - result = pd.read_excel(path) - expected = DataFrame(range(2, 12), columns=[1]) - tm.assert_frame_equal(result, expected) - - -def test_read_multiindex_header_no_index_names(datapath, ext): - # GH#47487 - path = datapath("io", "data", "excel", f"multiindex_no_index_names{ext}") - result = pd.read_excel(path, index_col=[0, 1, 2], header=[0, 1, 2]) - expected = DataFrame( - [[np.nan, "x", "x", "x"], ["x", np.nan, np.nan, np.nan]], - columns=pd.MultiIndex.from_tuples( - [("X", "Y", "A1"), ("X", "Y", "A2"), ("XX", "YY", "B1"), ("XX", "YY", "B2")] - ), - index=pd.MultiIndex.from_tuples([("A", "AA", "AAA"), ("A", "BB", "BBB")]), - ) - tm.assert_frame_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/formats/test_format.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/formats/test_format.py deleted file mode 100644 index 7dfd35d5424b47e056caa48d2635285fd5bd1a06..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/formats/test_format.py +++ /dev/null @@ -1,3648 +0,0 @@ -""" -Test output formatting for Series/DataFrame, including to_string & reprs -""" -from contextlib import nullcontext -from datetime import ( - datetime, - time, - timedelta, -) -from io import StringIO -import itertools -import locale -from operator import methodcaller -from pathlib import Path -import re -from shutil import get_terminal_size -import sys -import textwrap - -import dateutil -import numpy as np -import pytest -import pytz - -from pandas._config import config - -import pandas as pd -from pandas import ( - DataFrame, - Index, - MultiIndex, - NaT, - Series, - Timestamp, - date_range, - get_option, - option_context, - read_csv, - reset_option, -) -import pandas._testing as tm - -from pandas.io.formats import printing -import pandas.io.formats.format as fmt - - -def get_local_am_pm(): - """Return the AM and PM strings returned by strftime in current locale.""" - am_local = time(1).strftime("%p") - pm_local = time(13).strftime("%p") - return am_local, pm_local - - -@pytest.fixture(autouse=True) -def clean_config(): - curr_deprecated_options = config._deprecated_options.copy() - curr_registered_options = config._registered_options.copy() - curr_global_config = config._global_config.copy() - yield - config._deprecated_options = curr_deprecated_options - config._registered_options = curr_registered_options - config._global_config = curr_global_config - - -@pytest.fixture(params=["string", "pathlike", "buffer"]) -def filepath_or_buffer_id(request): - """ - A fixture yielding test ids for filepath_or_buffer testing. - """ - return request.param - - -@pytest.fixture -def filepath_or_buffer(filepath_or_buffer_id, tmp_path): - """ - A fixture yielding a string representing a filepath, a path-like object - and a StringIO buffer. Also checks that buffer is not closed. - """ - if filepath_or_buffer_id == "buffer": - buf = StringIO() - yield buf - assert not buf.closed - else: - assert isinstance(tmp_path, Path) - if filepath_or_buffer_id == "pathlike": - yield tmp_path / "foo" - else: - yield str(tmp_path / "foo") - - -@pytest.fixture -def assert_filepath_or_buffer_equals( - filepath_or_buffer, filepath_or_buffer_id, encoding -): - """ - Assertion helper for checking filepath_or_buffer. - """ - if encoding is None: - encoding = "utf-8" - - def _assert_filepath_or_buffer_equals(expected): - if filepath_or_buffer_id == "string": - with open(filepath_or_buffer, encoding=encoding) as f: - result = f.read() - elif filepath_or_buffer_id == "pathlike": - result = filepath_or_buffer.read_text(encoding=encoding) - elif filepath_or_buffer_id == "buffer": - result = filepath_or_buffer.getvalue() - assert result == expected - - return _assert_filepath_or_buffer_equals - - -def has_info_repr(df): - r = repr(df) - c1 = r.split("\n")[0].startswith(" - # 2. Index - # 3. Columns - # 4. dtype - # 5. memory usage - # 6. trailing newline - nv = len(r.split("\n")) == 6 - return has_info and nv - - -def has_horizontally_truncated_repr(df): - try: # Check header row - fst_line = np.array(repr(df).splitlines()[0].split()) - cand_col = np.where(fst_line == "...")[0][0] - except IndexError: - return False - # Make sure each row has this ... in the same place - r = repr(df) - for ix, _ in enumerate(r.splitlines()): - if not r.split()[cand_col] == "...": - return False - return True - - -def has_vertically_truncated_repr(df): - r = repr(df) - only_dot_row = False - for row in r.splitlines(): - if re.match(r"^[\.\ ]+$", row): - only_dot_row = True - return only_dot_row - - -def has_truncated_repr(df): - return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df) - - -def has_doubly_truncated_repr(df): - return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df) - - -def has_expanded_repr(df): - r = repr(df) - for line in r.split("\n"): - if line.endswith("\\"): - return True - return False - - -class TestDataFrameFormatting: - def test_eng_float_formatter(self, float_frame): - df = float_frame - df.loc[5] = 0 - - fmt.set_eng_float_format() - repr(df) - - fmt.set_eng_float_format(use_eng_prefix=True) - repr(df) - - fmt.set_eng_float_format(accuracy=0) - repr(df) - tm.reset_display_options() - - @pytest.mark.parametrize( - "row, columns, show_counts, result", - [ - [20, 20, None, True], - [20, 20, True, True], - [20, 20, False, False], - [5, 5, None, False], - [5, 5, True, False], - [5, 5, False, False], - ], - ) - def test_show_counts(self, row, columns, show_counts, result): - # Explicit cast to float to avoid implicit cast when setting nan - df = DataFrame(1, columns=range(10), index=range(10)).astype({1: "float"}) - df.iloc[1, 1] = np.nan - - with option_context( - "display.max_info_rows", row, "display.max_info_columns", columns - ): - with StringIO() as buf: - df.info(buf=buf, show_counts=show_counts) - assert ("non-null" in buf.getvalue()) is result - - def test_repr_truncation(self): - max_len = 20 - with option_context("display.max_colwidth", max_len): - df = DataFrame( - { - "A": np.random.default_rng(2).standard_normal(10), - "B": [ - "a" - * np.random.default_rng(2).integers(max_len - 1, max_len + 1) - for _ in range(10) - ], - } - ) - r = repr(df) - r = r[r.find("\n") + 1 :] - - adj = fmt.get_adjustment() - - for line, value in zip(r.split("\n"), df["B"]): - if adj.len(value) + 1 > max_len: - assert "..." in line - else: - assert "..." not in line - - with option_context("display.max_colwidth", 999999): - assert "..." not in repr(df) - - with option_context("display.max_colwidth", max_len + 2): - assert "..." not in repr(df) - - def test_max_colwidth_negative_int_raises(self): - # Deprecation enforced from: - # https://github.com/pandas-dev/pandas/issues/31532 - with pytest.raises( - ValueError, match="Value must be a nonnegative integer or None" - ): - with option_context("display.max_colwidth", -1): - pass - - def test_repr_chop_threshold(self): - df = DataFrame([[0.1, 0.5], [0.5, -0.1]]) - reset_option("display.chop_threshold") # default None - assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1" - - with option_context("display.chop_threshold", 0.2): - assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0" - - with option_context("display.chop_threshold", 0.6): - assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0" - - with option_context("display.chop_threshold", None): - assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1" - - def test_repr_chop_threshold_column_below(self): - # GH 6839: validation case - - df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T - - with option_context("display.chop_threshold", 0): - assert repr(df) == ( - " 0 1\n" - "0 10.0 8.000000e-10\n" - "1 20.0 -1.000000e-11\n" - "2 30.0 2.000000e-09\n" - "3 40.0 -2.000000e-11" - ) - - with option_context("display.chop_threshold", 1e-8): - assert repr(df) == ( - " 0 1\n" - "0 10.0 0.000000e+00\n" - "1 20.0 0.000000e+00\n" - "2 30.0 0.000000e+00\n" - "3 40.0 0.000000e+00" - ) - - with option_context("display.chop_threshold", 5e-11): - assert repr(df) == ( - " 0 1\n" - "0 10.0 8.000000e-10\n" - "1 20.0 0.000000e+00\n" - "2 30.0 2.000000e-09\n" - "3 40.0 0.000000e+00" - ) - - def test_repr_obeys_max_seq_limit(self): - with option_context("display.max_seq_items", 2000): - assert len(printing.pprint_thing(list(range(1000)))) > 1000 - - with option_context("display.max_seq_items", 5): - assert len(printing.pprint_thing(list(range(1000)))) < 100 - - with option_context("display.max_seq_items", 1): - assert len(printing.pprint_thing(list(range(1000)))) < 9 - - def test_repr_set(self): - assert printing.pprint_thing({1}) == "{1}" - - def test_repr_is_valid_construction_code(self): - # for the case of Index, where the repr is traditional rather than - # stylized - idx = Index(["a", "b"]) - res = eval("pd." + repr(idx)) - tm.assert_series_equal(Series(res), Series(idx)) - - def test_repr_should_return_str(self): - # https://docs.python.org/3/reference/datamodel.html#object.__repr__ - # "...The return value must be a string object." - - # (str on py2.x, str (unicode) on py3) - - data = [8, 5, 3, 5] - index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"] - cols = ["\u03c8"] - df = DataFrame(data, columns=cols, index=index1) - assert type(df.__repr__()) == str # both py2 / 3 - - def test_repr_no_backslash(self): - with option_context("mode.sim_interactive", True): - df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) - assert "\\" not in repr(df) - - def test_expand_frame_repr(self): - df_small = DataFrame("hello", index=[0], columns=[0]) - df_wide = DataFrame("hello", index=[0], columns=range(10)) - df_tall = DataFrame("hello", index=range(30), columns=range(5)) - - with option_context("mode.sim_interactive", True): - with option_context( - "display.max_columns", - 10, - "display.width", - 20, - "display.max_rows", - 20, - "display.show_dimensions", - True, - ): - with option_context("display.expand_frame_repr", True): - assert not has_truncated_repr(df_small) - assert not has_expanded_repr(df_small) - assert not has_truncated_repr(df_wide) - assert has_expanded_repr(df_wide) - assert has_vertically_truncated_repr(df_tall) - assert has_expanded_repr(df_tall) - - with option_context("display.expand_frame_repr", False): - assert not has_truncated_repr(df_small) - assert not has_expanded_repr(df_small) - assert not has_horizontally_truncated_repr(df_wide) - assert not has_expanded_repr(df_wide) - assert has_vertically_truncated_repr(df_tall) - assert not has_expanded_repr(df_tall) - - def test_repr_non_interactive(self): - # in non interactive mode, there can be no dependency on the - # result of terminal auto size detection - df = DataFrame("hello", index=range(1000), columns=range(5)) - - with option_context( - "mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000 - ): - assert not has_truncated_repr(df) - assert not has_expanded_repr(df) - - def test_repr_truncates_terminal_size(self, monkeypatch): - # see gh-21180 - - terminal_size = (118, 96) - monkeypatch.setattr( - "pandas.io.formats.format.get_terminal_size", lambda: terminal_size - ) - - index = range(5) - columns = MultiIndex.from_tuples( - [ - ("This is a long title with > 37 chars.", "cat"), - ("This is a loooooonger title with > 43 chars.", "dog"), - ] - ) - df = DataFrame(1, index=index, columns=columns) - - result = repr(df) - - h1, h2 = result.split("\n")[:2] - assert "long" in h1 - assert "loooooonger" in h1 - assert "cat" in h2 - assert "dog" in h2 - - # regular columns - df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]}) - result = repr(df2) - - assert df2.columns[0] in result.split("\n")[0] - - def test_repr_truncates_terminal_size_full(self, monkeypatch): - # GH 22984 ensure entire window is filled - terminal_size = (80, 24) - df = DataFrame(np.random.default_rng(2).random((1, 7))) - - monkeypatch.setattr( - "pandas.io.formats.format.get_terminal_size", lambda: terminal_size - ) - assert "..." not in str(df) - - def test_repr_truncation_column_size(self): - # dataframe with last column very wide -> check it is not used to - # determine size of truncation (...) column - df = DataFrame( - { - "a": [108480, 30830], - "b": [12345, 12345], - "c": [12345, 12345], - "d": [12345, 12345], - "e": ["a" * 50] * 2, - } - ) - assert "..." in str(df) - assert " ... " not in str(df) - - def test_repr_max_columns_max_rows(self): - term_width, term_height = get_terminal_size() - if term_width < 10 or term_height < 10: - pytest.skip(f"terminal size too small, {term_width} x {term_height}") - - def mkframe(n): - index = [f"{i:05d}" for i in range(n)] - return DataFrame(0, index, index) - - df6 = mkframe(6) - df10 = mkframe(10) - with option_context("mode.sim_interactive", True): - with option_context("display.width", term_width * 2): - with option_context("display.max_rows", 5, "display.max_columns", 5): - assert not has_expanded_repr(mkframe(4)) - assert not has_expanded_repr(mkframe(5)) - assert not has_expanded_repr(df6) - assert has_doubly_truncated_repr(df6) - - with option_context("display.max_rows", 20, "display.max_columns", 10): - # Out off max_columns boundary, but no extending - # since not exceeding width - assert not has_expanded_repr(df6) - assert not has_truncated_repr(df6) - - with option_context("display.max_rows", 9, "display.max_columns", 10): - # out vertical bounds can not result in expanded repr - assert not has_expanded_repr(df10) - assert has_vertically_truncated_repr(df10) - - # width=None in terminal, auto detection - with option_context( - "display.max_columns", - 100, - "display.max_rows", - term_width * 20, - "display.width", - None, - ): - df = mkframe((term_width // 7) - 2) - assert not has_expanded_repr(df) - df = mkframe((term_width // 7) + 2) - printing.pprint_thing(df._repr_fits_horizontal_()) - assert has_expanded_repr(df) - - def test_repr_min_rows(self): - df = DataFrame({"a": range(20)}) - - # default setting no truncation even if above min_rows - assert ".." not in repr(df) - assert ".." not in df._repr_html_() - - df = DataFrame({"a": range(61)}) - - # default of max_rows 60 triggers truncation if above - assert ".." in repr(df) - assert ".." in df._repr_html_() - - with option_context("display.max_rows", 10, "display.min_rows", 4): - # truncated after first two rows - assert ".." in repr(df) - assert "2 " not in repr(df) - assert "..." in df._repr_html_() - assert "2" not in df._repr_html_() - - with option_context("display.max_rows", 12, "display.min_rows", None): - # when set to None, follow value of max_rows - assert "5 5" in repr(df) - assert "5" in df._repr_html_() - - with option_context("display.max_rows", 10, "display.min_rows", 12): - # when set value higher as max_rows, use the minimum - assert "5 5" not in repr(df) - assert "5" not in df._repr_html_() - - with option_context("display.max_rows", None, "display.min_rows", 12): - # max_rows of None -> never truncate - assert ".." not in repr(df) - assert ".." not in df._repr_html_() - - def test_str_max_colwidth(self): - # GH 7856 - df = DataFrame( - [ - { - "a": "foo", - "b": "bar", - "c": "uncomfortably long line with lots of stuff", - "d": 1, - }, - {"a": "foo", "b": "bar", "c": "stuff", "d": 1}, - ] - ) - df.set_index(["a", "b", "c"]) - assert str(df) == ( - " a b c d\n" - "0 foo bar uncomfortably long line with lots of stuff 1\n" - "1 foo bar stuff 1" - ) - with option_context("max_colwidth", 20): - assert str(df) == ( - " a b c d\n" - "0 foo bar uncomfortably lo... 1\n" - "1 foo bar stuff 1" - ) - - def test_auto_detect(self): - term_width, term_height = get_terminal_size() - fac = 1.05 # Arbitrary large factor to exceed term width - cols = range(int(term_width * fac)) - index = range(10) - df = DataFrame(index=index, columns=cols) - with option_context("mode.sim_interactive", True): - with option_context("display.max_rows", None): - with option_context("display.max_columns", None): - # Wrap around with None - assert has_expanded_repr(df) - with option_context("display.max_rows", 0): - with option_context("display.max_columns", 0): - # Truncate with auto detection. - assert has_horizontally_truncated_repr(df) - - index = range(int(term_height * fac)) - df = DataFrame(index=index, columns=cols) - with option_context("display.max_rows", 0): - with option_context("display.max_columns", None): - # Wrap around with None - assert has_expanded_repr(df) - # Truncate vertically - assert has_vertically_truncated_repr(df) - - with option_context("display.max_rows", None): - with option_context("display.max_columns", 0): - assert has_horizontally_truncated_repr(df) - - def test_to_string_repr_unicode(self): - buf = StringIO() - - unicode_values = ["\u03c3"] * 10 - unicode_values = np.array(unicode_values, dtype=object) - df = DataFrame({"unicode": unicode_values}) - df.to_string(col_space=10, buf=buf) - - # it works! - repr(df) - - idx = Index(["abc", "\u03c3a", "aegdvg"]) - ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) - rs = repr(ser).split("\n") - line_len = len(rs[0]) - for line in rs[1:]: - try: - line = line.decode(get_option("display.encoding")) - except AttributeError: - pass - if not line.startswith("dtype:"): - assert len(line) == line_len - - # it works even if sys.stdin in None - _stdin = sys.stdin - try: - sys.stdin = None - repr(df) - finally: - sys.stdin = _stdin - - def test_east_asian_unicode_false(self): - # not aligned properly because of east asian width - - # mid col - df = DataFrame( - {"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]}, - index=["a", "bb", "c", "ddd"], - ) - expected = ( - " a b\na あ 1\n" - "bb いいい 222\nc う 33333\n" - "ddd ええええええ 4" - ) - assert repr(df) == expected - - # last col - df = DataFrame( - {"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]}, - index=["a", "bb", "c", "ddd"], - ) - expected = ( - " a b\na 1 あ\n" - "bb 222 いいい\nc 33333 う\n" - "ddd 4 ええええええ" - ) - assert repr(df) == expected - - # all col - df = DataFrame( - { - "a": ["あああああ", "い", "う", "えええ"], - "b": ["あ", "いいい", "う", "ええええええ"], - }, - index=["a", "bb", "c", "ddd"], - ) - expected = ( - " a b\na あああああ あ\n" - "bb い いいい\nc う う\n" - "ddd えええ ええええええ" - ) - assert repr(df) == expected - - # column name - df = DataFrame( - { - "b": ["あ", "いいい", "う", "ええええええ"], - "あああああ": [1, 222, 33333, 4], - }, - index=["a", "bb", "c", "ddd"], - ) - expected = ( - " b あああああ\na あ 1\n" - "bb いいい 222\nc う 33333\n" - "ddd ええええええ 4" - ) - assert repr(df) == expected - - # index - df = DataFrame( - { - "a": ["あああああ", "い", "う", "えええ"], - "b": ["あ", "いいい", "う", "ええええええ"], - }, - index=["あああ", "いいいいいい", "うう", "え"], - ) - expected = ( - " a b\nあああ あああああ あ\n" - "いいいいいい い いいい\nうう う う\n" - "え えええ ええええええ" - ) - assert repr(df) == expected - - # index name - df = DataFrame( - { - "a": ["あああああ", "い", "う", "えええ"], - "b": ["あ", "いいい", "う", "ええええええ"], - }, - index=Index(["あ", "い", "うう", "え"], name="おおおお"), - ) - expected = ( - " a b\n" - "おおおお \n" - "あ あああああ あ\n" - "い い いいい\n" - "うう う う\n" - "え えええ ええええええ" - ) - assert repr(df) == expected - - # all - df = DataFrame( - { - "あああ": ["あああ", "い", "う", "えええええ"], - "いいいいい": ["あ", "いいい", "う", "ええ"], - }, - index=Index(["あ", "いいい", "うう", "え"], name="お"), - ) - expected = ( - " あああ いいいいい\n" - "お \n" - "あ あああ あ\n" - "いいい い いいい\n" - "うう う う\n" - "え えええええ ええ" - ) - assert repr(df) == expected - - # MultiIndex - idx = MultiIndex.from_tuples( - [("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")] - ) - df = DataFrame( - { - "a": ["あああああ", "い", "う", "えええ"], - "b": ["あ", "いいい", "う", "ええええええ"], - }, - index=idx, - ) - expected = ( - " a b\n" - "あ いい あああああ あ\n" - "う え い いいい\n" - "おおお かかかか う う\n" - "き くく えええ ええええええ" - ) - assert repr(df) == expected - - # truncate - with option_context("display.max_rows", 3, "display.max_columns", 3): - df = DataFrame( - { - "a": ["あああああ", "い", "う", "えええ"], - "b": ["あ", "いいい", "う", "ええええええ"], - "c": ["お", "か", "ききき", "くくくくくく"], - "ああああ": ["さ", "し", "す", "せ"], - }, - columns=["a", "b", "c", "ああああ"], - ) - - expected = ( - " a ... ああああ\n0 あああああ ... さ\n" - ".. ... ... ...\n3 えええ ... せ\n" - "\n[4 rows x 4 columns]" - ) - assert repr(df) == expected - - df.index = ["あああ", "いいいい", "う", "aaa"] - expected = ( - " a ... ああああ\nあああ あああああ ... さ\n" - ".. ... ... ...\naaa えええ ... せ\n" - "\n[4 rows x 4 columns]" - ) - assert repr(df) == expected - - def test_east_asian_unicode_true(self): - # Enable Unicode option ----------------------------------------- - with option_context("display.unicode.east_asian_width", True): - # mid col - df = DataFrame( - {"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]}, - index=["a", "bb", "c", "ddd"], - ) - expected = ( - " a b\na あ 1\n" - "bb いいい 222\nc う 33333\n" - "ddd ええええええ 4" - ) - assert repr(df) == expected - - # last col - df = DataFrame( - {"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]}, - index=["a", "bb", "c", "ddd"], - ) - expected = ( - " a b\na 1 あ\n" - "bb 222 いいい\nc 33333 う\n" - "ddd 4 ええええええ" - ) - assert repr(df) == expected - - # all col - df = DataFrame( - { - "a": ["あああああ", "い", "う", "えええ"], - "b": ["あ", "いいい", "う", "ええええええ"], - }, - index=["a", "bb", "c", "ddd"], - ) - expected = ( - " a b\n" - "a あああああ あ\n" - "bb い いいい\n" - "c う う\n" - "ddd えええ ええええええ" - ) - assert repr(df) == expected - - # column name - df = DataFrame( - { - "b": ["あ", "いいい", "う", "ええええええ"], - "あああああ": [1, 222, 33333, 4], - }, - index=["a", "bb", "c", "ddd"], - ) - expected = ( - " b あああああ\n" - "a あ 1\n" - "bb いいい 222\n" - "c う 33333\n" - "ddd ええええええ 4" - ) - assert repr(df) == expected - - # index - df = DataFrame( - { - "a": ["あああああ", "い", "う", "えええ"], - "b": ["あ", "いいい", "う", "ええええええ"], - }, - index=["あああ", "いいいいいい", "うう", "え"], - ) - expected = ( - " a b\n" - "あああ あああああ あ\n" - "いいいいいい い いいい\n" - "うう う う\n" - "え えええ ええええええ" - ) - assert repr(df) == expected - - # index name - df = DataFrame( - { - "a": ["あああああ", "い", "う", "えええ"], - "b": ["あ", "いいい", "う", "ええええええ"], - }, - index=Index(["あ", "い", "うう", "え"], name="おおおお"), - ) - expected = ( - " a b\n" - "おおおお \n" - "あ あああああ あ\n" - "い い いいい\n" - "うう う う\n" - "え えええ ええええええ" - ) - assert repr(df) == expected - - # all - df = DataFrame( - { - "あああ": ["あああ", "い", "う", "えええええ"], - "いいいいい": ["あ", "いいい", "う", "ええ"], - }, - index=Index(["あ", "いいい", "うう", "え"], name="お"), - ) - expected = ( - " あああ いいいいい\n" - "お \n" - "あ あああ あ\n" - "いいい い いいい\n" - "うう う う\n" - "え えええええ ええ" - ) - assert repr(df) == expected - - # MultiIndex - idx = MultiIndex.from_tuples( - [("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")] - ) - df = DataFrame( - { - "a": ["あああああ", "い", "う", "えええ"], - "b": ["あ", "いいい", "う", "ええええええ"], - }, - index=idx, - ) - expected = ( - " a b\n" - "あ いい あああああ あ\n" - "う え い いいい\n" - "おおお かかかか う う\n" - "き くく えええ ええええええ" - ) - assert repr(df) == expected - - # truncate - with option_context("display.max_rows", 3, "display.max_columns", 3): - df = DataFrame( - { - "a": ["あああああ", "い", "う", "えええ"], - "b": ["あ", "いいい", "う", "ええええええ"], - "c": ["お", "か", "ききき", "くくくくくく"], - "ああああ": ["さ", "し", "す", "せ"], - }, - columns=["a", "b", "c", "ああああ"], - ) - - expected = ( - " a ... ああああ\n" - "0 あああああ ... さ\n" - ".. ... ... ...\n" - "3 えええ ... せ\n" - "\n[4 rows x 4 columns]" - ) - assert repr(df) == expected - - df.index = ["あああ", "いいいい", "う", "aaa"] - expected = ( - " a ... ああああ\n" - "あああ あああああ ... さ\n" - "... ... ... ...\n" - "aaa えええ ... せ\n" - "\n[4 rows x 4 columns]" - ) - assert repr(df) == expected - - # ambiguous unicode - df = DataFrame( - { - "b": ["あ", "いいい", "¡¡", "ええええええ"], - "あああああ": [1, 222, 33333, 4], - }, - index=["a", "bb", "c", "¡¡¡"], - ) - expected = ( - " b あああああ\n" - "a あ 1\n" - "bb いいい 222\n" - "c ¡¡ 33333\n" - "¡¡¡ ええええええ 4" - ) - assert repr(df) == expected - - def test_to_string_buffer_all_unicode(self): - buf = StringIO() - - empty = DataFrame({"c/\u03c3": Series(dtype=object)}) - nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])}) - - print(empty, file=buf) - print(nonempty, file=buf) - - # this should work - buf.getvalue() - - def test_to_string_with_col_space(self): - df = DataFrame(np.random.default_rng(2).random(size=(1, 3))) - c10 = len(df.to_string(col_space=10).split("\n")[1]) - c20 = len(df.to_string(col_space=20).split("\n")[1]) - c30 = len(df.to_string(col_space=30).split("\n")[1]) - assert c10 < c20 < c30 - - # GH 8230 - # col_space wasn't being applied with header=False - with_header = df.to_string(col_space=20) - with_header_row1 = with_header.splitlines()[1] - no_header = df.to_string(col_space=20, header=False) - assert len(with_header_row1) == len(no_header) - - def test_to_string_with_column_specific_col_space_raises(self): - df = DataFrame( - np.random.default_rng(2).random(size=(3, 3)), columns=["a", "b", "c"] - ) - - msg = ( - "Col_space length\\(\\d+\\) should match " - "DataFrame number of columns\\(\\d+\\)" - ) - with pytest.raises(ValueError, match=msg): - df.to_string(col_space=[30, 40]) - - with pytest.raises(ValueError, match=msg): - df.to_string(col_space=[30, 40, 50, 60]) - - msg = "unknown column" - with pytest.raises(ValueError, match=msg): - df.to_string(col_space={"a": "foo", "b": 23, "d": 34}) - - def test_to_string_with_column_specific_col_space(self): - df = DataFrame( - np.random.default_rng(2).random(size=(3, 3)), columns=["a", "b", "c"] - ) - - result = df.to_string(col_space={"a": 10, "b": 11, "c": 12}) - # 3 separating space + each col_space for (id, a, b, c) - assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12) - - result = df.to_string(col_space=[10, 11, 12]) - assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12) - - @pytest.mark.parametrize( - "index", - [ - tm.makeStringIndex, - tm.makeIntIndex, - tm.makeDateIndex, - tm.makePeriodIndex, - ], - ) - @pytest.mark.parametrize("h", [10, 20]) - @pytest.mark.parametrize("w", [10, 20]) - def test_to_string_truncate_indices(self, index, h, w): - with option_context("display.expand_frame_repr", False): - df = DataFrame(index=index(h), columns=tm.makeStringIndex(w)) - with option_context("display.max_rows", 15): - if h == 20: - assert has_vertically_truncated_repr(df) - else: - assert not has_vertically_truncated_repr(df) - with option_context("display.max_columns", 15): - if w == 20: - assert has_horizontally_truncated_repr(df) - else: - assert not has_horizontally_truncated_repr(df) - with option_context("display.max_rows", 15, "display.max_columns", 15): - if h == 20 and w == 20: - assert has_doubly_truncated_repr(df) - else: - assert not has_doubly_truncated_repr(df) - - def test_to_string_truncate_multilevel(self): - arrays = [ - ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], - ["one", "two", "one", "two", "one", "two", "one", "two"], - ] - df = DataFrame(index=arrays, columns=arrays) - with option_context("display.max_rows", 7, "display.max_columns", 7): - assert has_doubly_truncated_repr(df) - - def test_truncate_with_different_dtypes(self): - # 11594, 12045 - # when truncated the dtypes of the splits can differ - - # 11594 - s = Series( - [datetime(2012, 1, 1)] * 10 - + [datetime(1012, 1, 2)] - + [datetime(2012, 1, 3)] * 10 - ) - - with option_context("display.max_rows", 8): - result = str(s) - assert "object" in result - - # 12045 - df = DataFrame({"text": ["some words"] + [None] * 9}) - - with option_context("display.max_rows", 8, "display.max_columns", 3): - result = str(df) - assert "None" in result - assert "NaN" not in result - - def test_truncate_with_different_dtypes_multiindex(self): - # GH#13000 - df = DataFrame({"Vals": range(100)}) - frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"]) - result = repr(frame) - - result2 = repr(frame.iloc[:5]) - assert result.startswith(result2) - - def test_datetimelike_frame(self): - # GH 12211 - df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5}) - - with option_context("display.max_rows", 5): - result = str(df) - assert "2013-01-01 00:00:00+00:00" in result - assert "NaT" in result - assert "..." in result - assert "[6 rows x 1 columns]" in result - - dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5 - df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}) - with option_context("display.max_rows", 5): - expected = ( - " dt x\n" - "0 2011-01-01 00:00:00-05:00 1\n" - "1 2011-01-01 00:00:00-05:00 2\n" - ".. ... ..\n" - "8 NaT 9\n" - "9 NaT 10\n\n" - "[10 rows x 2 columns]" - ) - assert repr(df) == expected - - dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5 - df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}) - with option_context("display.max_rows", 5): - expected = ( - " dt x\n" - "0 NaT 1\n" - "1 NaT 2\n" - ".. ... ..\n" - "8 2011-01-01 00:00:00-05:00 9\n" - "9 2011-01-01 00:00:00-05:00 10\n\n" - "[10 rows x 2 columns]" - ) - assert repr(df) == expected - - dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [ - Timestamp("2011-01-01", tz="US/Eastern") - ] * 5 - df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}) - with option_context("display.max_rows", 5): - expected = ( - " dt x\n" - "0 2011-01-01 00:00:00+09:00 1\n" - "1 2011-01-01 00:00:00+09:00 2\n" - ".. ... ..\n" - "8 2011-01-01 00:00:00-05:00 9\n" - "9 2011-01-01 00:00:00-05:00 10\n\n" - "[10 rows x 2 columns]" - ) - assert repr(df) == expected - - @pytest.mark.parametrize( - "start_date", - [ - "2017-01-01 23:59:59.999999999", - "2017-01-01 23:59:59.99999999", - "2017-01-01 23:59:59.9999999", - "2017-01-01 23:59:59.999999", - "2017-01-01 23:59:59.99999", - "2017-01-01 23:59:59.9999", - ], - ) - def test_datetimeindex_highprecision(self, start_date): - # GH19030 - # Check that high-precision time values for the end of day are - # included in repr for DatetimeIndex - df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)}) - result = str(df) - assert start_date in result - - dti = date_range(start=start_date, freq="D", periods=5) - df = DataFrame({"A": range(5)}, index=dti) - result = str(df.index) - assert start_date in result - - def test_nonunicode_nonascii_alignment(self): - df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]]) - rep_str = df.to_string() - lines = rep_str.split("\n") - assert len(lines[1]) == len(lines[2]) - - def test_unicode_problem_decoding_as_ascii(self): - dm = DataFrame({"c/\u03c3": Series({"test": np.nan})}) - str(dm.to_string()) - - def test_string_repr_encoding(self, datapath): - filepath = datapath("io", "parser", "data", "unicode_series.csv") - df = read_csv(filepath, header=None, encoding="latin1") - repr(df) - repr(df[1]) - - def test_repr_corner(self): - # representing infs poses no problems - df = DataFrame({"foo": [-np.inf, np.inf]}) - repr(df) - - def test_frame_info_encoding(self): - index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"] - with option_context("display.max_rows", 1): - df = DataFrame(columns=["a", "b", "c"], index=index) - repr(df) - repr(df.T) - - def test_wide_repr(self): - with option_context( - "mode.sim_interactive", - True, - "display.show_dimensions", - True, - "display.max_columns", - 20, - ): - max_cols = get_option("display.max_columns") - df = DataFrame([["a" * 25] * (max_cols - 1)] * 10) - with option_context("display.expand_frame_repr", False): - rep_str = repr(df) - - assert f"10 rows x {max_cols - 1} columns" in rep_str - with option_context("display.expand_frame_repr", True): - wide_repr = repr(df) - assert rep_str != wide_repr - - with option_context("display.width", 120): - wider_repr = repr(df) - assert len(wider_repr) < len(wide_repr) - - def test_wide_repr_wide_columns(self): - with option_context("mode.sim_interactive", True, "display.max_columns", 20): - df = DataFrame( - np.random.default_rng(2).standard_normal((5, 3)), - columns=["a" * 90, "b" * 90, "c" * 90], - ) - rep_str = repr(df) - - assert len(rep_str.splitlines()) == 20 - - def test_wide_repr_named(self): - with option_context("mode.sim_interactive", True, "display.max_columns", 20): - max_cols = get_option("display.max_columns") - df = DataFrame([["a" * 25] * (max_cols - 1)] * 10) - df.index.name = "DataFrame Index" - with option_context("display.expand_frame_repr", False): - rep_str = repr(df) - with option_context("display.expand_frame_repr", True): - wide_repr = repr(df) - assert rep_str != wide_repr - - with option_context("display.width", 150): - wider_repr = repr(df) - assert len(wider_repr) < len(wide_repr) - - for line in wide_repr.splitlines()[1::13]: - assert "DataFrame Index" in line - - def test_wide_repr_multiindex(self): - with option_context("mode.sim_interactive", True, "display.max_columns", 20): - midx = MultiIndex.from_arrays([["a" * 5] * 10] * 2) - max_cols = get_option("display.max_columns") - df = DataFrame([["a" * 25] * (max_cols - 1)] * 10, index=midx) - df.index.names = ["Level 0", "Level 1"] - with option_context("display.expand_frame_repr", False): - rep_str = repr(df) - with option_context("display.expand_frame_repr", True): - wide_repr = repr(df) - assert rep_str != wide_repr - - with option_context("display.width", 150): - wider_repr = repr(df) - assert len(wider_repr) < len(wide_repr) - - for line in wide_repr.splitlines()[1::13]: - assert "Level 0 Level 1" in line - - def test_wide_repr_multiindex_cols(self): - with option_context("mode.sim_interactive", True, "display.max_columns", 20): - max_cols = get_option("display.max_columns") - midx = MultiIndex.from_arrays([["a" * 5] * 10] * 2) - mcols = MultiIndex.from_arrays([["b" * 3] * (max_cols - 1)] * 2) - df = DataFrame( - [["c" * 25] * (max_cols - 1)] * 10, index=midx, columns=mcols - ) - df.index.names = ["Level 0", "Level 1"] - with option_context("display.expand_frame_repr", False): - rep_str = repr(df) - with option_context("display.expand_frame_repr", True): - wide_repr = repr(df) - assert rep_str != wide_repr - - with option_context("display.width", 150, "display.max_columns", 20): - wider_repr = repr(df) - assert len(wider_repr) < len(wide_repr) - - def test_wide_repr_unicode(self): - with option_context("mode.sim_interactive", True, "display.max_columns", 20): - max_cols = 20 - df = DataFrame([["a" * 25] * 10] * (max_cols - 1)) - with option_context("display.expand_frame_repr", False): - rep_str = repr(df) - with option_context("display.expand_frame_repr", True): - wide_repr = repr(df) - assert rep_str != wide_repr - - with option_context("display.width", 150): - wider_repr = repr(df) - assert len(wider_repr) < len(wide_repr) - - def test_wide_repr_wide_long_columns(self): - with option_context("mode.sim_interactive", True): - df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]}) - - result = repr(df) - assert "ccccc" in result - assert "ddddd" in result - - def test_long_series(self): - n = 1000 - s = Series( - np.random.default_rng(2).integers(-50, 50, n), - index=[f"s{x:04d}" for x in range(n)], - dtype="int64", - ) - - str_rep = str(s) - nmatches = len(re.findall("dtype", str_rep)) - assert nmatches == 1 - - def test_index_with_nan(self): - # GH 2850 - df = DataFrame( - { - "id1": {0: "1a3", 1: "9h4"}, - "id2": {0: np.nan, 1: "d67"}, - "id3": {0: "78d", 1: "79d"}, - "value": {0: 123, 1: 64}, - } - ) - - # multi-index - y = df.set_index(["id1", "id2", "id3"]) - result = y.to_string() - expected = ( - " value\nid1 id2 id3 \n" - "1a3 NaN 78d 123\n9h4 d67 79d 64" - ) - assert result == expected - - # index - y = df.set_index("id2") - result = y.to_string() - expected = ( - " id1 id3 value\nid2 \n" - "NaN 1a3 78d 123\nd67 9h4 79d 64" - ) - assert result == expected - - # with append (this failed in 0.12) - y = df.set_index(["id1", "id2"]).set_index("id3", append=True) - result = y.to_string() - expected = ( - " value\nid1 id2 id3 \n" - "1a3 NaN 78d 123\n9h4 d67 79d 64" - ) - assert result == expected - - # all-nan in mi - df2 = df.copy() - df2.loc[:, "id2"] = np.nan - y = df2.set_index("id2") - result = y.to_string() - expected = ( - " id1 id3 value\nid2 \n" - "NaN 1a3 78d 123\nNaN 9h4 79d 64" - ) - assert result == expected - - # partial nan in mi - df2 = df.copy() - df2.loc[:, "id2"] = np.nan - y = df2.set_index(["id2", "id3"]) - result = y.to_string() - expected = ( - " id1 value\nid2 id3 \n" - "NaN 78d 1a3 123\n 79d 9h4 64" - ) - assert result == expected - - df = DataFrame( - { - "id1": {0: np.nan, 1: "9h4"}, - "id2": {0: np.nan, 1: "d67"}, - "id3": {0: np.nan, 1: "79d"}, - "value": {0: 123, 1: 64}, - } - ) - - y = df.set_index(["id1", "id2", "id3"]) - result = y.to_string() - expected = ( - " value\nid1 id2 id3 \n" - "NaN NaN NaN 123\n9h4 d67 79d 64" - ) - assert result == expected - - def test_to_string(self): - # big mixed - biggie = DataFrame( - { - "A": np.random.default_rng(2).standard_normal(200), - "B": tm.makeStringIndex(200), - }, - ) - - biggie.loc[:20, "A"] = np.nan - biggie.loc[:20, "B"] = np.nan - s = biggie.to_string() - - buf = StringIO() - retval = biggie.to_string(buf=buf) - assert retval is None - assert buf.getvalue() == s - - assert isinstance(s, str) - - # print in right order - result = biggie.to_string( - columns=["B", "A"], col_space=17, float_format="%.5f".__mod__ - ) - lines = result.split("\n") - header = lines[0].strip().split() - joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]]) - recons = read_csv(StringIO(joined), names=header, header=None, sep=" ") - tm.assert_series_equal(recons["B"], biggie["B"]) - assert recons["A"].count() == biggie["A"].count() - assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all() - - # expected = ['B', 'A'] - # assert header == expected - - result = biggie.to_string(columns=["A"], col_space=17) - header = result.split("\n")[0].strip().split() - expected = ["A"] - assert header == expected - - biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"}) - - biggie.to_string(columns=["B", "A"], float_format=str) - biggie.to_string(columns=["B", "A"], col_space=12, float_format=str) - - frame = DataFrame(index=np.arange(200)) - frame.to_string() - - def test_to_string_no_header(self): - df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) - - df_s = df.to_string(header=False) - expected = "0 1 4\n1 2 5\n2 3 6" - - assert df_s == expected - - def test_to_string_specified_header(self): - df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) - - df_s = df.to_string(header=["X", "Y"]) - expected = " X Y\n0 1 4\n1 2 5\n2 3 6" - - assert df_s == expected - - msg = "Writing 2 cols but got 1 aliases" - with pytest.raises(ValueError, match=msg): - df.to_string(header=["X"]) - - def test_to_string_no_index(self): - # GH 16839, GH 13032 - df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]}) - - df_s = df.to_string(index=False) - # Leading space is expected for positive numbers. - expected = " x y z\n11 33 AAA\n22 -44 " - assert df_s == expected - - df_s = df[["y", "x", "z"]].to_string(index=False) - expected = " y x z\n 33 11 AAA\n-44 22 " - assert df_s == expected - - def test_to_string_line_width_no_index(self): - # GH 13998, GH 22505 - df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) - - df_s = df.to_string(line_width=1, index=False) - expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 " - - assert df_s == expected - - df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]}) - - df_s = df.to_string(line_width=1, index=False) - expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 " - - assert df_s == expected - - df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]}) - - df_s = df.to_string(line_width=1, index=False) - expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 " - - assert df_s == expected - - def test_to_string_line_width_no_header(self): - # GH 53054 - df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) - - df_s = df.to_string(line_width=1, header=False) - expected = "0 1 \\\n1 2 \n2 3 \n\n0 4 \n1 5 \n2 6 " - - assert df_s == expected - - df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]}) - - df_s = df.to_string(line_width=1, header=False) - expected = "0 11 \\\n1 22 \n2 33 \n\n0 4 \n1 5 \n2 6 " - - assert df_s == expected - - df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]}) - - df_s = df.to_string(line_width=1, header=False) - expected = "0 11 \\\n1 22 \n2 -33 \n\n0 4 \n1 5 \n2 -6 " - - assert df_s == expected - - def test_to_string_line_width_no_index_no_header(self): - # GH 53054 - df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) - - df_s = df.to_string(line_width=1, index=False, header=False) - expected = "1 \\\n2 \n3 \n\n4 \n5 \n6 " - - assert df_s == expected - - df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]}) - - df_s = df.to_string(line_width=1, index=False, header=False) - expected = "11 \\\n22 \n33 \n\n4 \n5 \n6 " - - assert df_s == expected - - df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]}) - - df_s = df.to_string(line_width=1, index=False, header=False) - expected = " 11 \\\n 22 \n-33 \n\n 4 \n 5 \n-6 " - - assert df_s == expected - - def test_to_string_line_width_with_both_index_and_header(self): - # GH 53054 - df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) - - df_s = df.to_string(line_width=1) - expected = ( - " x \\\n0 1 \n1 2 \n2 3 \n\n y \n0 4 \n1 5 \n2 6 " - ) - - assert df_s == expected - - df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]}) - - df_s = df.to_string(line_width=1) - expected = ( - " x \\\n0 11 \n1 22 \n2 33 \n\n y \n0 4 \n1 5 \n2 6 " - ) - - assert df_s == expected - - df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]}) - - df_s = df.to_string(line_width=1) - expected = ( - " x \\\n0 11 \n1 22 \n2 -33 \n\n y \n0 4 \n1 5 \n2 -6 " - ) - - assert df_s == expected - - def test_to_string_float_formatting(self): - tm.reset_display_options() - with option_context( - "display.precision", - 5, - "display.notebook_repr_html", - False, - ): - df = DataFrame( - {"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]} - ) - - df_s = df.to_string() - - if _three_digit_exp(): - expected = ( - " x\n0 0.00000e+000\n1 2.50000e-001\n" - "2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n" - "5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n" - "8 -1.00000e+006" - ) - else: - expected = ( - " x\n0 0.00000e+00\n1 2.50000e-01\n" - "2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n" - "5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n" - "8 -1.00000e+06" - ) - assert df_s == expected - - df = DataFrame({"x": [3234, 0.253]}) - df_s = df.to_string() - - expected = " x\n0 3234.000\n1 0.253" - assert df_s == expected - - tm.reset_display_options() - assert get_option("display.precision") == 6 - - df = DataFrame({"x": [1e9, 0.2512]}) - df_s = df.to_string() - - if _three_digit_exp(): - expected = " x\n0 1.000000e+009\n1 2.512000e-001" - else: - expected = " x\n0 1.000000e+09\n1 2.512000e-01" - assert df_s == expected - - def test_to_string_float_format_no_fixed_width(self): - # GH 21625 - df = DataFrame({"x": [0.19999]}) - expected = " x\n0 0.200" - assert df.to_string(float_format="%.3f") == expected - - # GH 22270 - df = DataFrame({"x": [100.0]}) - expected = " x\n0 100" - assert df.to_string(float_format="%.0f") == expected - - def test_to_string_small_float_values(self): - df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]}) - - result = df.to_string() - # sadness per above - if _three_digit_exp(): - expected = ( - " a\n" - "0 1.500000e+000\n" - "1 1.000000e-017\n" - "2 -5.500000e-007" - ) - else: - expected = ( - " a\n" - "0 1.500000e+00\n" - "1 1.000000e-17\n" - "2 -5.500000e-07" - ) - assert result == expected - - # but not all exactly zero - df = df * 0 - result = df.to_string() - expected = " 0\n0 0\n1 0\n2 -0" - - def test_to_string_float_index(self): - index = Index([1.5, 2, 3, 4, 5]) - df = DataFrame(np.arange(5), index=index) - - result = df.to_string() - expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4" - assert result == expected - - def test_to_string_complex_float_formatting(self): - # GH #25514, 25745 - with option_context("display.precision", 5): - df = DataFrame( - { - "x": [ - (0.4467846931321966 + 0.0715185102060818j), - (0.2739442392974528 + 0.23515228785438969j), - (0.26974928742135185 + 0.3250604054898979j), - (-1j), - ] - } - ) - result = df.to_string() - expected = ( - " x\n0 0.44678+0.07152j\n" - "1 0.27394+0.23515j\n" - "2 0.26975+0.32506j\n" - "3 -0.00000-1.00000j" - ) - assert result == expected - - def test_to_string_ascii_error(self): - data = [ - ( - "0 ", - " .gitignore ", - " 5 ", - " \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2", - ) - ] - df = DataFrame(data) - - # it works! - repr(df) - - def test_to_string_int_formatting(self): - df = DataFrame({"x": [-15, 20, 25, -35]}) - assert issubclass(df["x"].dtype.type, np.integer) - - output = df.to_string() - expected = " x\n0 -15\n1 20\n2 25\n3 -35" - assert output == expected - - def test_to_string_index_formatter(self): - df = DataFrame([range(5), range(5, 10), range(10, 15)]) - - rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]}) - - xp = """\ - 0 1 2 3 4 -a 0 1 2 3 4 -b 5 6 7 8 9 -c 10 11 12 13 14\ -""" - - assert rs == xp - - def test_to_string_left_justify_cols(self): - tm.reset_display_options() - df = DataFrame({"x": [3234, 0.253]}) - df_s = df.to_string(justify="left") - expected = " x \n0 3234.000\n1 0.253" - assert df_s == expected - - def test_to_string_format_na(self): - tm.reset_display_options() - df = DataFrame( - { - "A": [np.nan, -1, -2.1234, 3, 4], - "B": [np.nan, "foo", "foooo", "fooooo", "bar"], - } - ) - result = df.to_string() - - expected = ( - " A B\n" - "0 NaN NaN\n" - "1 -1.0000 foo\n" - "2 -2.1234 foooo\n" - "3 3.0000 fooooo\n" - "4 4.0000 bar" - ) - assert result == expected - - df = DataFrame( - { - "A": [np.nan, -1.0, -2.0, 3.0, 4.0], - "B": [np.nan, "foo", "foooo", "fooooo", "bar"], - } - ) - result = df.to_string() - - expected = ( - " A B\n" - "0 NaN NaN\n" - "1 -1.0 foo\n" - "2 -2.0 foooo\n" - "3 3.0 fooooo\n" - "4 4.0 bar" - ) - assert result == expected - - def test_to_string_format_inf(self): - # Issue #24861 - tm.reset_display_options() - df = DataFrame( - { - "A": [-np.inf, np.inf, -1, -2.1234, 3, 4], - "B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"], - } - ) - result = df.to_string() - - expected = ( - " A B\n" - "0 -inf -inf\n" - "1 inf inf\n" - "2 -1.0000 foo\n" - "3 -2.1234 foooo\n" - "4 3.0000 fooooo\n" - "5 4.0000 bar" - ) - assert result == expected - - df = DataFrame( - { - "A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0], - "B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"], - } - ) - result = df.to_string() - - expected = ( - " A B\n" - "0 -inf -inf\n" - "1 inf inf\n" - "2 -1.0 foo\n" - "3 -2.0 foooo\n" - "4 3.0 fooooo\n" - "5 4.0 bar" - ) - assert result == expected - - def test_to_string_decimal(self): - # Issue #23614 - df = DataFrame({"A": [6.0, 3.1, 2.2]}) - expected = " A\n0 6,0\n1 3,1\n2 2,2" - assert df.to_string(decimal=",") == expected - - def test_to_string_line_width(self): - df = DataFrame(123, index=range(10, 15), columns=range(30)) - s = df.to_string(line_width=80) - assert max(len(line) for line in s.split("\n")) == 80 - - def test_to_string_header_false(self): - # GH 49230 - df = DataFrame([1, 2]) - df.index.name = "a" - s = df.to_string(header=False) - expected = "a \n0 1\n1 2" - assert s == expected - - df = DataFrame([[1, 2], [3, 4]]) - df.index.name = "a" - s = df.to_string(header=False) - expected = "a \n0 1 2\n1 3 4" - assert s == expected - - def test_show_dimensions(self): - df = DataFrame(123, index=range(10, 15), columns=range(30)) - - with option_context( - "display.max_rows", - 10, - "display.max_columns", - 40, - "display.width", - 500, - "display.expand_frame_repr", - "info", - "display.show_dimensions", - True, - ): - assert "5 rows" in str(df) - assert "5 rows" in df._repr_html_() - with option_context( - "display.max_rows", - 10, - "display.max_columns", - 40, - "display.width", - 500, - "display.expand_frame_repr", - "info", - "display.show_dimensions", - False, - ): - assert "5 rows" not in str(df) - assert "5 rows" not in df._repr_html_() - with option_context( - "display.max_rows", - 2, - "display.max_columns", - 2, - "display.width", - 500, - "display.expand_frame_repr", - "info", - "display.show_dimensions", - "truncate", - ): - assert "5 rows" in str(df) - assert "5 rows" in df._repr_html_() - with option_context( - "display.max_rows", - 10, - "display.max_columns", - 40, - "display.width", - 500, - "display.expand_frame_repr", - "info", - "display.show_dimensions", - "truncate", - ): - assert "5 rows" not in str(df) - assert "5 rows" not in df._repr_html_() - - def test_repr_html(self, float_frame): - df = float_frame - df._repr_html_() - - with option_context("display.max_rows", 1, "display.max_columns", 1): - df._repr_html_() - - with option_context("display.notebook_repr_html", False): - df._repr_html_() - - tm.reset_display_options() - - df = DataFrame([[1, 2], [3, 4]]) - with option_context("display.show_dimensions", True): - assert "2 rows" in df._repr_html_() - with option_context("display.show_dimensions", False): - assert "2 rows" not in df._repr_html_() - - tm.reset_display_options() - - def test_repr_html_mathjax(self): - df = DataFrame([[1, 2], [3, 4]]) - assert "tex2jax_ignore" not in df._repr_html_() - - with option_context("display.html.use_mathjax", False): - assert "tex2jax_ignore" in df._repr_html_() - - def test_repr_html_wide(self): - max_cols = 20 - df = DataFrame([["a" * 25] * (max_cols - 1)] * 10) - with option_context("display.max_rows", 60, "display.max_columns", 20): - assert "..." not in df._repr_html_() - - wide_df = DataFrame([["a" * 25] * (max_cols + 1)] * 10) - with option_context("display.max_rows", 60, "display.max_columns", 20): - assert "..." in wide_df._repr_html_() - - def test_repr_html_wide_multiindex_cols(self): - max_cols = 20 - - mcols = MultiIndex.from_product( - [np.arange(max_cols // 2), ["foo", "bar"]], names=["first", "second"] - ) - df = DataFrame([["a" * 25] * len(mcols)] * 10, columns=mcols) - reg_repr = df._repr_html_() - assert "..." not in reg_repr - - mcols = MultiIndex.from_product( - (np.arange(1 + (max_cols // 2)), ["foo", "bar"]), names=["first", "second"] - ) - df = DataFrame([["a" * 25] * len(mcols)] * 10, columns=mcols) - with option_context("display.max_rows", 60, "display.max_columns", 20): - assert "..." in df._repr_html_() - - def test_repr_html_long(self): - with option_context("display.max_rows", 60): - max_rows = get_option("display.max_rows") - h = max_rows - 1 - df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)}) - reg_repr = df._repr_html_() - assert ".." not in reg_repr - assert str(41 + max_rows // 2) in reg_repr - - h = max_rows + 1 - df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)}) - long_repr = df._repr_html_() - assert ".." in long_repr - assert str(41 + max_rows // 2) not in long_repr - assert f"{h} rows " in long_repr - assert "2 columns" in long_repr - - def test_repr_html_float(self): - with option_context("display.max_rows", 60): - max_rows = get_option("display.max_rows") - h = max_rows - 1 - df = DataFrame( - { - "idx": np.linspace(-10, 10, h), - "A": np.arange(1, 1 + h), - "B": np.arange(41, 41 + h), - } - ).set_index("idx") - reg_repr = df._repr_html_() - assert ".." not in reg_repr - assert f"{40 + h}" in reg_repr - - h = max_rows + 1 - df = DataFrame( - { - "idx": np.linspace(-10, 10, h), - "A": np.arange(1, 1 + h), - "B": np.arange(41, 41 + h), - } - ).set_index("idx") - long_repr = df._repr_html_() - assert ".." in long_repr - assert "31" not in long_repr - assert f"{h} rows " in long_repr - assert "2 columns" in long_repr - - def test_repr_html_long_multiindex(self): - max_rows = 60 - max_L1 = max_rows // 2 - - tuples = list(itertools.product(np.arange(max_L1), ["foo", "bar"])) - idx = MultiIndex.from_tuples(tuples, names=["first", "second"]) - df = DataFrame( - np.random.default_rng(2).standard_normal((max_L1 * 2, 2)), - index=idx, - columns=["A", "B"], - ) - with option_context("display.max_rows", 60, "display.max_columns", 20): - reg_repr = df._repr_html_() - assert "..." not in reg_repr - - tuples = list(itertools.product(np.arange(max_L1 + 1), ["foo", "bar"])) - idx = MultiIndex.from_tuples(tuples, names=["first", "second"]) - df = DataFrame( - np.random.default_rng(2).standard_normal(((max_L1 + 1) * 2, 2)), - index=idx, - columns=["A", "B"], - ) - long_repr = df._repr_html_() - assert "..." in long_repr - - def test_repr_html_long_and_wide(self): - max_cols = 20 - max_rows = 60 - - h, w = max_rows - 1, max_cols - 1 - df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)}) - with option_context("display.max_rows", 60, "display.max_columns", 20): - assert "..." not in df._repr_html_() - - h, w = max_rows + 1, max_cols + 1 - df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)}) - with option_context("display.max_rows", 60, "display.max_columns", 20): - assert "..." in df._repr_html_() - - def test_info_repr(self): - # GH#21746 For tests inside a terminal (i.e. not CI) we need to detect - # the terminal size to ensure that we try to print something "too big" - term_width, term_height = get_terminal_size() - - max_rows = 60 - max_cols = 20 + (max(term_width, 80) - 80) // 4 - # Long - h, w = max_rows + 1, max_cols - 1 - df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)}) - assert has_vertically_truncated_repr(df) - with option_context("display.large_repr", "info"): - assert has_info_repr(df) - - # Wide - h, w = max_rows - 1, max_cols + 1 - df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)}) - assert has_horizontally_truncated_repr(df) - with option_context( - "display.large_repr", "info", "display.max_columns", max_cols - ): - assert has_info_repr(df) - - def test_info_repr_max_cols(self): - # GH #6939 - df = DataFrame(np.random.default_rng(2).standard_normal((10, 5))) - with option_context( - "display.large_repr", - "info", - "display.max_columns", - 1, - "display.max_info_columns", - 4, - ): - assert has_non_verbose_info_repr(df) - - with option_context( - "display.large_repr", - "info", - "display.max_columns", - 1, - "display.max_info_columns", - 5, - ): - assert not has_non_verbose_info_repr(df) - - # test verbose overrides - # set_option('display.max_info_columns', 4) # exceeded - - def test_info_repr_html(self): - max_rows = 60 - max_cols = 20 - # Long - h, w = max_rows + 1, max_cols - 1 - df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)}) - assert r"<class" not in df._repr_html_() - with option_context("display.large_repr", "info"): - assert r"<class" in df._repr_html_() - - # Wide - h, w = max_rows - 1, max_cols + 1 - df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)}) - assert " len(frame), hence max_rows - (50, 30, 10, 10), # max_rows < len(frame), hence min_rows - (100, 60, 10, 10), # same - (60, 60, 10, 60), # edge case - (61, 60, 10, 10), # edge case - ], - ) - def test_max_rows_fitted(self, length, min_rows, max_rows, expected): - """Check that display logic is correct. - - GH #37359 - - See description here: - https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options - """ - formatter = fmt.DataFrameFormatter( - DataFrame(np.random.default_rng(2).random((length, 3))), - max_rows=max_rows, - min_rows=min_rows, - ) - result = formatter.max_rows_fitted - assert result == expected - - def test_no_extra_space(self): - # GH 52690: Check that no extra space is given - col1 = "TEST" - col2 = "PANDAS" - col3 = "to_string" - expected = f"{col1:<6s} {col2:<7s} {col3:<10s}" - df = DataFrame([{"col1": "TEST", "col2": "PANDAS", "col3": "to_string"}]) - d = {"col1": "{:<6s}".format, "col2": "{:<7s}".format, "col3": "{:<10s}".format} - result = df.to_string(index=False, header=False, formatters=d) - assert result == expected - - -def gen_series_formatting(): - s1 = Series(["a"] * 100) - s2 = Series(["ab"] * 100) - s3 = Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"]) - s4 = s3[::-1] - test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4} - return test_sers - - -class TestSeriesFormatting: - def test_repr_unicode(self): - s = Series(["\u03c3"] * 10) - repr(s) - - a = Series(["\u05d0"] * 1000) - a.name = "title1" - repr(a) - - def test_to_string(self): - ts = tm.makeTimeSeries() - buf = StringIO() - - s = ts.to_string() - - retval = ts.to_string(buf=buf) - assert retval is None - assert buf.getvalue().strip() == s - - # pass float_format - format = "%.4f".__mod__ - result = ts.to_string(float_format=format) - result = [x.split()[1] for x in result.split("\n")[:-1]] - expected = [format(x) for x in ts] - assert result == expected - - # empty string - result = ts[:0].to_string() - assert result == "Series([], Freq: B)" - - result = ts[:0].to_string(length=0) - assert result == "Series([], Freq: B)" - - # name and length - cp = ts.copy() - cp.name = "foo" - result = cp.to_string(length=True, name=True, dtype=True) - last_line = result.split("\n")[-1].strip() - assert last_line == (f"Freq: B, Name: foo, Length: {len(cp)}, dtype: float64") - - def test_freq_name_separation(self): - s = Series( - np.random.default_rng(2).standard_normal(10), - index=date_range("1/1/2000", periods=10), - name=0, - ) - - result = repr(s) - assert "Freq: D, Name: 0" in result - - def test_to_string_mixed(self): - s = Series(["foo", np.nan, -1.23, 4.56]) - result = s.to_string() - expected = "".join(["0 foo\n", "1 NaN\n", "2 -1.23\n", "3 4.56"]) - assert result == expected - - # but don't count NAs as floats - s = Series(["foo", np.nan, "bar", "baz"]) - result = s.to_string() - expected = "".join(["0 foo\n", "1 NaN\n", "2 bar\n", "3 baz"]) - assert result == expected - - s = Series(["foo", 5, "bar", "baz"]) - result = s.to_string() - expected = "".join(["0 foo\n", "1 5\n", "2 bar\n", "3 baz"]) - assert result == expected - - def test_to_string_float_na_spacing(self): - s = Series([0.0, 1.5678, 2.0, -3.0, 4.0]) - s[::2] = np.nan - - result = s.to_string() - expected = ( - "0 NaN\n" - "1 1.5678\n" - "2 NaN\n" - "3 -3.0000\n" - "4 NaN" - ) - assert result == expected - - def test_to_string_without_index(self): - # GH 11729 Test index=False option - s = Series([1, 2, 3, 4]) - result = s.to_string(index=False) - expected = "\n".join(["1", "2", "3", "4"]) - assert result == expected - - def test_unicode_name_in_footer(self): - s = Series([1, 2], name="\u05e2\u05d1\u05e8\u05d9\u05ea") - sf = fmt.SeriesFormatter(s, name="\u05e2\u05d1\u05e8\u05d9\u05ea") - sf._get_footer() # should not raise exception - - def test_east_asian_unicode_series(self): - # not aligned properly because of east asian width - - # unicode index - s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"]) - expected = "".join( - [ - "あ a\n", - "いい bb\n", - "ううう CCC\n", - "ええええ D\ndtype: object", - ] - ) - assert repr(s) == expected - - # unicode values - s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"]) - expected = "".join( - [ - "a あ\n", - "bb いい\n", - "c ううう\n", - "ddd ええええ\n", - "dtype: object", - ] - ) - - assert repr(s) == expected - - # both - s = Series( - ["あ", "いい", "ううう", "ええええ"], - index=["ああ", "いいいい", "う", "えええ"], - ) - expected = "".join( - [ - "ああ あ\n", - "いいいい いい\n", - "う ううう\n", - "えええ ええええ\n", - "dtype: object", - ] - ) - - assert repr(s) == expected - - # unicode footer - s = Series( - ["あ", "いい", "ううう", "ええええ"], - index=["ああ", "いいいい", "う", "えええ"], - name="おおおおおおお", - ) - expected = ( - "ああ あ\nいいいい いい\nう ううう\n" - "えええ ええええ\nName: おおおおおおお, dtype: object" - ) - assert repr(s) == expected - - # MultiIndex - idx = MultiIndex.from_tuples( - [("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")] - ) - s = Series([1, 22, 3333, 44444], index=idx) - expected = ( - "あ いい 1\n" - "う え 22\n" - "おおお かかかか 3333\n" - "き くく 44444\ndtype: int64" - ) - assert repr(s) == expected - - # object dtype, shorter than unicode repr - s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"]) - expected = ( - "1 1\nAB 22\nNaN 3333\nあああ 44444\ndtype: int64" - ) - assert repr(s) == expected - - # object dtype, longer than unicode repr - s = Series( - [1, 22, 3333, 44444], index=[1, "AB", Timestamp("2011-01-01"), "あああ"] - ) - expected = ( - "1 1\n" - "AB 22\n" - "2011-01-01 00:00:00 3333\n" - "あああ 44444\ndtype: int64" - ) - assert repr(s) == expected - - # truncate - with option_context("display.max_rows", 3): - s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお") - - expected = ( - "0 あ\n ... \n" - "3 ええええ\n" - "Name: おおおおおおお, Length: 4, dtype: object" - ) - assert repr(s) == expected - - s.index = ["ああ", "いいいい", "う", "えええ"] - expected = ( - "ああ あ\n ... \n" - "えええ ええええ\n" - "Name: おおおおおおお, Length: 4, dtype: object" - ) - assert repr(s) == expected - - # Enable Unicode option ----------------------------------------- - with option_context("display.unicode.east_asian_width", True): - # unicode index - s = Series( - ["a", "bb", "CCC", "D"], - index=["あ", "いい", "ううう", "ええええ"], - ) - expected = ( - "あ a\nいい bb\nううう CCC\n" - "ええええ D\ndtype: object" - ) - assert repr(s) == expected - - # unicode values - s = Series( - ["あ", "いい", "ううう", "ええええ"], - index=["a", "bb", "c", "ddd"], - ) - expected = ( - "a あ\nbb いい\nc ううう\n" - "ddd ええええ\ndtype: object" - ) - assert repr(s) == expected - # both - s = Series( - ["あ", "いい", "ううう", "ええええ"], - index=["ああ", "いいいい", "う", "えええ"], - ) - expected = ( - "ああ あ\n" - "いいいい いい\n" - "う ううう\n" - "えええ ええええ\ndtype: object" - ) - assert repr(s) == expected - - # unicode footer - s = Series( - ["あ", "いい", "ううう", "ええええ"], - index=["ああ", "いいいい", "う", "えええ"], - name="おおおおおおお", - ) - expected = ( - "ああ あ\n" - "いいいい いい\n" - "う ううう\n" - "えええ ええええ\n" - "Name: おおおおおおお, dtype: object" - ) - assert repr(s) == expected - - # MultiIndex - idx = MultiIndex.from_tuples( - [("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")] - ) - s = Series([1, 22, 3333, 44444], index=idx) - expected = ( - "あ いい 1\n" - "う え 22\n" - "おおお かかかか 3333\n" - "き くく 44444\n" - "dtype: int64" - ) - assert repr(s) == expected - - # object dtype, shorter than unicode repr - s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"]) - expected = ( - "1 1\nAB 22\nNaN 3333\n" - "あああ 44444\ndtype: int64" - ) - assert repr(s) == expected - - # object dtype, longer than unicode repr - s = Series( - [1, 22, 3333, 44444], - index=[1, "AB", Timestamp("2011-01-01"), "あああ"], - ) - expected = ( - "1 1\n" - "AB 22\n" - "2011-01-01 00:00:00 3333\n" - "あああ 44444\ndtype: int64" - ) - assert repr(s) == expected - - # truncate - with option_context("display.max_rows", 3): - s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお") - expected = ( - "0 あ\n ... \n" - "3 ええええ\n" - "Name: おおおおおおお, Length: 4, dtype: object" - ) - assert repr(s) == expected - - s.index = ["ああ", "いいいい", "う", "えええ"] - expected = ( - "ああ あ\n" - " ... \n" - "えええ ええええ\n" - "Name: おおおおおおお, Length: 4, dtype: object" - ) - assert repr(s) == expected - - # ambiguous unicode - s = Series( - ["¡¡", "い¡¡", "ううう", "ええええ"], - index=["ああ", "¡¡¡¡いい", "¡¡", "えええ"], - ) - expected = ( - "ああ ¡¡\n" - "¡¡¡¡いい い¡¡\n" - "¡¡ ううう\n" - "えええ ええええ\ndtype: object" - ) - assert repr(s) == expected - - def test_float_trim_zeros(self): - vals = [ - 2.08430917305e10, - 3.52205017305e10, - 2.30674817305e10, - 2.03954217305e10, - 5.59897817305e10, - ] - for line in repr(Series(vals)).split("\n"): - if line.startswith("dtype:"): - continue - if _three_digit_exp(): - assert "+010" in line - else: - assert "+10" in line - - def test_datetimeindex(self): - index = date_range("20130102", periods=6) - s = Series(1, index=index) - result = s.to_string() - assert "2013-01-02" in result - - # nat in index - s2 = Series(2, index=[Timestamp("20130111"), NaT]) - s = pd.concat([s2, s]) - result = s.to_string() - assert "NaT" in result - - # nat in summary - result = str(s2.index) - assert "NaT" in result - - @pytest.mark.parametrize( - "start_date", - [ - "2017-01-01 23:59:59.999999999", - "2017-01-01 23:59:59.99999999", - "2017-01-01 23:59:59.9999999", - "2017-01-01 23:59:59.999999", - "2017-01-01 23:59:59.99999", - "2017-01-01 23:59:59.9999", - ], - ) - def test_datetimeindex_highprecision(self, start_date): - # GH19030 - # Check that high-precision time values for the end of day are - # included in repr for DatetimeIndex - s1 = Series(date_range(start=start_date, freq="D", periods=5)) - result = str(s1) - assert start_date in result - - dti = date_range(start=start_date, freq="D", periods=5) - s2 = Series(3, index=dti) - result = str(s2.index) - assert start_date in result - - def test_timedelta64(self): - Series(np.array([1100, 20], dtype="timedelta64[ns]")).to_string() - - s = Series(date_range("2012-1-1", periods=3, freq="D")) - - # GH2146 - - # adding NaTs - y = s - s.shift(1) - result = y.to_string() - assert "1 days" in result - assert "00:00:00" not in result - assert "NaT" in result - - # with frac seconds - o = Series([datetime(2012, 1, 1, microsecond=150)] * 3) - y = s - o - result = y.to_string() - assert "-1 days +23:59:59.999850" in result - - # rounding? - o = Series([datetime(2012, 1, 1, 1)] * 3) - y = s - o - result = y.to_string() - assert "-1 days +23:00:00" in result - assert "1 days 23:00:00" in result - - o = Series([datetime(2012, 1, 1, 1, 1)] * 3) - y = s - o - result = y.to_string() - assert "-1 days +22:59:00" in result - assert "1 days 22:59:00" in result - - o = Series([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3) - y = s - o - result = y.to_string() - assert "-1 days +22:58:59.999850" in result - assert "0 days 22:58:59.999850" in result - - # neg time - td = timedelta(minutes=5, seconds=3) - s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td - y = s - s2 - result = y.to_string() - assert "-1 days +23:54:57" in result - - td = timedelta(microseconds=550) - s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td - y = s - td - result = y.to_string() - assert "2012-01-01 23:59:59.999450" in result - - # no boxing of the actual elements - td = Series(pd.timedelta_range("1 days", periods=3)) - result = td.to_string() - assert result == "0 1 days\n1 2 days\n2 3 days" - - def test_mixed_datetime64(self): - df = DataFrame({"A": [1, 2], "B": ["2012-01-01", "2012-01-02"]}) - df["B"] = pd.to_datetime(df.B) - - result = repr(df.loc[0]) - assert "2012-01-01" in result - - def test_period(self): - # GH 12615 - index = pd.period_range("2013-01", periods=6, freq="M") - s = Series(np.arange(6, dtype="int64"), index=index) - exp = ( - "2013-01 0\n" - "2013-02 1\n" - "2013-03 2\n" - "2013-04 3\n" - "2013-05 4\n" - "2013-06 5\n" - "Freq: M, dtype: int64" - ) - assert str(s) == exp - - s = Series(index) - exp = ( - "0 2013-01\n" - "1 2013-02\n" - "2 2013-03\n" - "3 2013-04\n" - "4 2013-05\n" - "5 2013-06\n" - "dtype: period[M]" - ) - assert str(s) == exp - - # periods with mixed freq - s = Series( - [ - pd.Period("2011-01", freq="M"), - pd.Period("2011-02-01", freq="D"), - pd.Period("2011-03-01 09:00", freq="H"), - ] - ) - exp = ( - "0 2011-01\n1 2011-02-01\n" - "2 2011-03-01 09:00\ndtype: object" - ) - assert str(s) == exp - - def test_max_multi_index_display(self): - # GH 7101 - - # doc example (indexing.rst) - - # multi-index - arrays = [ - ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], - ["one", "two", "one", "two", "one", "two", "one", "two"], - ] - tuples = list(zip(*arrays)) - index = MultiIndex.from_tuples(tuples, names=["first", "second"]) - s = Series(np.random.default_rng(2).standard_normal(8), index=index) - - with option_context("display.max_rows", 10): - assert len(str(s).split("\n")) == 10 - with option_context("display.max_rows", 3): - assert len(str(s).split("\n")) == 5 - with option_context("display.max_rows", 2): - assert len(str(s).split("\n")) == 5 - with option_context("display.max_rows", 1): - assert len(str(s).split("\n")) == 4 - with option_context("display.max_rows", 0): - assert len(str(s).split("\n")) == 10 - - # index - s = Series(np.random.default_rng(2).standard_normal(8), None) - - with option_context("display.max_rows", 10): - assert len(str(s).split("\n")) == 9 - with option_context("display.max_rows", 3): - assert len(str(s).split("\n")) == 4 - with option_context("display.max_rows", 2): - assert len(str(s).split("\n")) == 4 - with option_context("display.max_rows", 1): - assert len(str(s).split("\n")) == 3 - with option_context("display.max_rows", 0): - assert len(str(s).split("\n")) == 9 - - # Make sure #8532 is fixed - def test_consistent_format(self): - s = Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10) - with option_context("display.max_rows", 10, "display.show_dimensions", False): - res = repr(s) - exp = ( - "0 1.0000\n1 1.0000\n2 1.0000\n3 " - "1.0000\n4 1.0000\n ... \n125 " - "1.0000\n126 1.0000\n127 0.9999\n128 " - "1.0000\n129 1.0000\ndtype: float64" - ) - assert res == exp - - def chck_ncols(self, s): - with option_context("display.max_rows", 10): - res = repr(s) - lines = res.split("\n") - lines = [ - line for line in repr(s).split("\n") if not re.match(r"[^\.]*\.+", line) - ][:-1] - ncolsizes = len({len(line.strip()) for line in lines}) - assert ncolsizes == 1 - - def test_format_explicit(self): - test_sers = gen_series_formatting() - with option_context("display.max_rows", 4, "display.show_dimensions", False): - res = repr(test_sers["onel"]) - exp = "0 a\n1 a\n ..\n98 a\n99 a\ndtype: object" - assert exp == res - res = repr(test_sers["twol"]) - exp = "0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype: object" - assert exp == res - res = repr(test_sers["asc"]) - exp = ( - "0 a\n1 ab\n ... \n4 abcde\n5 " - "abcdef\ndtype: object" - ) - assert exp == res - res = repr(test_sers["desc"]) - exp = ( - "5 abcdef\n4 abcde\n ... \n1 ab\n0 " - "a\ndtype: object" - ) - assert exp == res - - def test_ncols(self): - test_sers = gen_series_formatting() - for s in test_sers.values(): - self.chck_ncols(s) - - def test_max_rows_eq_one(self): - s = Series(range(10), dtype="int64") - with option_context("display.max_rows", 1): - strrepr = repr(s).split("\n") - exp1 = ["0", "0"] - res1 = strrepr[0].split() - assert exp1 == res1 - exp2 = [".."] - res2 = strrepr[1].split() - assert exp2 == res2 - - def test_truncate_ndots(self): - def getndots(s): - return len(re.match(r"[^\.]*(\.*)", s).groups()[0]) - - s = Series([0, 2, 3, 6]) - with option_context("display.max_rows", 2): - strrepr = repr(s).replace("\n", "") - assert getndots(strrepr) == 2 - - s = Series([0, 100, 200, 400]) - with option_context("display.max_rows", 2): - strrepr = repr(s).replace("\n", "") - assert getndots(strrepr) == 3 - - def test_show_dimensions(self): - # gh-7117 - s = Series(range(5)) - - assert "Length" not in repr(s) - - with option_context("display.max_rows", 4): - assert "Length" in repr(s) - - with option_context("display.show_dimensions", True): - assert "Length" in repr(s) - - with option_context("display.max_rows", 4, "display.show_dimensions", False): - assert "Length" not in repr(s) - - def test_repr_min_rows(self): - s = Series(range(20)) - - # default setting no truncation even if above min_rows - assert ".." not in repr(s) - - s = Series(range(61)) - - # default of max_rows 60 triggers truncation if above - assert ".." in repr(s) - - with option_context("display.max_rows", 10, "display.min_rows", 4): - # truncated after first two rows - assert ".." in repr(s) - assert "2 " not in repr(s) - - with option_context("display.max_rows", 12, "display.min_rows", None): - # when set to None, follow value of max_rows - assert "5 5" in repr(s) - - with option_context("display.max_rows", 10, "display.min_rows", 12): - # when set value higher as max_rows, use the minimum - assert "5 5" not in repr(s) - - with option_context("display.max_rows", None, "display.min_rows", 12): - # max_rows of None -> never truncate - assert ".." not in repr(s) - - def test_to_string_name(self): - s = Series(range(100), dtype="int64") - s.name = "myser" - res = s.to_string(max_rows=2, name=True) - exp = "0 0\n ..\n99 99\nName: myser" - assert res == exp - res = s.to_string(max_rows=2, name=False) - exp = "0 0\n ..\n99 99" - assert res == exp - - def test_to_string_dtype(self): - s = Series(range(100), dtype="int64") - res = s.to_string(max_rows=2, dtype=True) - exp = "0 0\n ..\n99 99\ndtype: int64" - assert res == exp - res = s.to_string(max_rows=2, dtype=False) - exp = "0 0\n ..\n99 99" - assert res == exp - - def test_to_string_length(self): - s = Series(range(100), dtype="int64") - res = s.to_string(max_rows=2, length=True) - exp = "0 0\n ..\n99 99\nLength: 100" - assert res == exp - - def test_to_string_na_rep(self): - s = Series(index=range(100), dtype=np.float64) - res = s.to_string(na_rep="foo", max_rows=2) - exp = "0 foo\n ..\n99 foo" - assert res == exp - - def test_to_string_float_format(self): - s = Series(range(10), dtype="float64") - res = s.to_string(float_format=lambda x: f"{x:2.1f}", max_rows=2) - exp = "0 0.0\n ..\n9 9.0" - assert res == exp - - def test_to_string_header(self): - s = Series(range(10), dtype="int64") - s.index.name = "foo" - res = s.to_string(header=True, max_rows=2) - exp = "foo\n0 0\n ..\n9 9" - assert res == exp - res = s.to_string(header=False, max_rows=2) - exp = "0 0\n ..\n9 9" - assert res == exp - - def test_to_string_multindex_header(self): - # GH 16718 - df = DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}).set_index(["a", "b"]) - res = df.to_string(header=["r1", "r2"]) - exp = " r1 r2\na b \n0 1 2 3" - assert res == exp - - def test_to_string_empty_col(self): - # GH 13653 - s = Series(["", "Hello", "World", "", "", "Mooooo", "", ""]) - res = s.to_string(index=False) - exp = " \n Hello\n World\n \n \nMooooo\n \n " - assert re.match(exp, res) - - -class TestGenericArrayFormatter: - def test_1d_array(self): - # GenericArrayFormatter is used on types for which there isn't a dedicated - # formatter. np.bool_ is one of those types. - obj = fmt.GenericArrayFormatter(np.array([True, False])) - res = obj.get_result() - assert len(res) == 2 - # Results should be right-justified. - assert res[0] == " True" - assert res[1] == " False" - - def test_2d_array(self): - obj = fmt.GenericArrayFormatter(np.array([[True, False], [False, True]])) - res = obj.get_result() - assert len(res) == 2 - assert res[0] == " [True, False]" - assert res[1] == " [False, True]" - - def test_3d_array(self): - obj = fmt.GenericArrayFormatter( - np.array([[[True, True], [False, False]], [[False, True], [True, False]]]) - ) - res = obj.get_result() - assert len(res) == 2 - assert res[0] == " [[True, True], [False, False]]" - assert res[1] == " [[False, True], [True, False]]" - - def test_2d_extension_type(self): - # GH 33770 - - # Define a stub extension type with just enough code to run Series.__repr__() - class DtypeStub(pd.api.extensions.ExtensionDtype): - @property - def type(self): - return np.ndarray - - @property - def name(self): - return "DtypeStub" - - class ExtTypeStub(pd.api.extensions.ExtensionArray): - def __len__(self) -> int: - return 2 - - def __getitem__(self, ix): - return [ix == 1, ix == 0] - - @property - def dtype(self): - return DtypeStub() - - series = Series(ExtTypeStub(), copy=False) - res = repr(series) # This line crashed before #33770 was fixed. - expected = "\n".join( - ["0 [False True]", "1 [ True False]", "dtype: DtypeStub"] - ) - assert res == expected - - -def _three_digit_exp(): - return f"{1.7e8:.4g}" == "1.7e+008" - - -class TestFloatArrayFormatter: - def test_misc(self): - obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64)) - result = obj.get_result() - assert len(result) == 0 - - def test_format(self): - obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64)) - result = obj.get_result() - assert result[0] == " 12.0" - assert result[1] == " 0.0" - - def test_output_display_precision_trailing_zeroes(self): - # Issue #20359: trimming zeros while there is no decimal point - - # Happens when display precision is set to zero - with option_context("display.precision", 0): - s = Series([840.0, 4200.0]) - expected_output = "0 840\n1 4200\ndtype: float64" - assert str(s) == expected_output - - @pytest.mark.parametrize( - "value,expected", - [ - ([9.4444], " 0\n0 9"), - ([0.49], " 0\n0 5e-01"), - ([10.9999], " 0\n0 11"), - ([9.5444, 9.6], " 0\n0 10\n1 10"), - ([0.46, 0.78, -9.9999], " 0\n0 5e-01\n1 8e-01\n2 -1e+01"), - ], - ) - def test_set_option_precision(self, value, expected): - # Issue #30122 - # Precision was incorrectly shown - - with option_context("display.precision", 0): - df_value = DataFrame(value) - assert str(df_value) == expected - - def test_output_significant_digits(self): - # Issue #9764 - - # In case default display precision changes: - with option_context("display.precision", 6): - # DataFrame example from issue #9764 - d = DataFrame( - { - "col1": [ - 9.999e-8, - 1e-7, - 1.0001e-7, - 2e-7, - 4.999e-7, - 5e-7, - 5.0001e-7, - 6e-7, - 9.999e-7, - 1e-6, - 1.0001e-6, - 2e-6, - 4.999e-6, - 5e-6, - 5.0001e-6, - 6e-6, - ] - } - ) - - expected_output = { - (0, 6): " col1\n" - "0 9.999000e-08\n" - "1 1.000000e-07\n" - "2 1.000100e-07\n" - "3 2.000000e-07\n" - "4 4.999000e-07\n" - "5 5.000000e-07", - (1, 6): " col1\n" - "1 1.000000e-07\n" - "2 1.000100e-07\n" - "3 2.000000e-07\n" - "4 4.999000e-07\n" - "5 5.000000e-07", - (1, 8): " col1\n" - "1 1.000000e-07\n" - "2 1.000100e-07\n" - "3 2.000000e-07\n" - "4 4.999000e-07\n" - "5 5.000000e-07\n" - "6 5.000100e-07\n" - "7 6.000000e-07", - (8, 16): " col1\n" - "8 9.999000e-07\n" - "9 1.000000e-06\n" - "10 1.000100e-06\n" - "11 2.000000e-06\n" - "12 4.999000e-06\n" - "13 5.000000e-06\n" - "14 5.000100e-06\n" - "15 6.000000e-06", - (9, 16): " col1\n" - "9 0.000001\n" - "10 0.000001\n" - "11 0.000002\n" - "12 0.000005\n" - "13 0.000005\n" - "14 0.000005\n" - "15 0.000006", - } - - for (start, stop), v in expected_output.items(): - assert str(d[start:stop]) == v - - def test_too_long(self): - # GH 10451 - with option_context("display.precision", 4): - # need both a number > 1e6 and something that normally formats to - # having length > display.precision + 6 - df = DataFrame({"x": [12345.6789]}) - assert str(df) == " x\n0 12345.6789" - df = DataFrame({"x": [2e6]}) - assert str(df) == " x\n0 2000000.0" - df = DataFrame({"x": [12345.6789, 2e6]}) - assert str(df) == " x\n0 1.2346e+04\n1 2.0000e+06" - - -class TestRepr_timedelta64: - def test_none(self): - delta_1d = pd.to_timedelta(1, unit="D") - delta_0d = pd.to_timedelta(0, unit="D") - delta_1s = pd.to_timedelta(1, unit="s") - delta_500ms = pd.to_timedelta(500, unit="ms") - - drepr = lambda x: x._repr_base() - assert drepr(delta_1d) == "1 days" - assert drepr(-delta_1d) == "-1 days" - assert drepr(delta_0d) == "0 days" - assert drepr(delta_1s) == "0 days 00:00:01" - assert drepr(delta_500ms) == "0 days 00:00:00.500000" - assert drepr(delta_1d + delta_1s) == "1 days 00:00:01" - assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01" - assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000" - assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000" - - def test_sub_day(self): - delta_1d = pd.to_timedelta(1, unit="D") - delta_0d = pd.to_timedelta(0, unit="D") - delta_1s = pd.to_timedelta(1, unit="s") - delta_500ms = pd.to_timedelta(500, unit="ms") - - drepr = lambda x: x._repr_base(format="sub_day") - assert drepr(delta_1d) == "1 days" - assert drepr(-delta_1d) == "-1 days" - assert drepr(delta_0d) == "00:00:00" - assert drepr(delta_1s) == "00:00:01" - assert drepr(delta_500ms) == "00:00:00.500000" - assert drepr(delta_1d + delta_1s) == "1 days 00:00:01" - assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01" - assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000" - assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000" - - def test_long(self): - delta_1d = pd.to_timedelta(1, unit="D") - delta_0d = pd.to_timedelta(0, unit="D") - delta_1s = pd.to_timedelta(1, unit="s") - delta_500ms = pd.to_timedelta(500, unit="ms") - - drepr = lambda x: x._repr_base(format="long") - assert drepr(delta_1d) == "1 days 00:00:00" - assert drepr(-delta_1d) == "-1 days +00:00:00" - assert drepr(delta_0d) == "0 days 00:00:00" - assert drepr(delta_1s) == "0 days 00:00:01" - assert drepr(delta_500ms) == "0 days 00:00:00.500000" - assert drepr(delta_1d + delta_1s) == "1 days 00:00:01" - assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01" - assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000" - assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000" - - def test_all(self): - delta_1d = pd.to_timedelta(1, unit="D") - delta_0d = pd.to_timedelta(0, unit="D") - delta_1ns = pd.to_timedelta(1, unit="ns") - - drepr = lambda x: x._repr_base(format="all") - assert drepr(delta_1d) == "1 days 00:00:00.000000000" - assert drepr(-delta_1d) == "-1 days +00:00:00.000000000" - assert drepr(delta_0d) == "0 days 00:00:00.000000000" - assert drepr(delta_1ns) == "0 days 00:00:00.000000001" - assert drepr(-delta_1d + delta_1ns) == "-1 days +00:00:00.000000001" - - -class TestTimedelta64Formatter: - def test_days(self): - x = pd.to_timedelta(list(range(5)) + [NaT], unit="D") - result = fmt.Timedelta64Formatter(x, box=True).get_result() - assert result[0].strip() == "'0 days'" - assert result[1].strip() == "'1 days'" - - result = fmt.Timedelta64Formatter(x[1:2], box=True).get_result() - assert result[0].strip() == "'1 days'" - - result = fmt.Timedelta64Formatter(x, box=False).get_result() - assert result[0].strip() == "0 days" - assert result[1].strip() == "1 days" - - result = fmt.Timedelta64Formatter(x[1:2], box=False).get_result() - assert result[0].strip() == "1 days" - - def test_days_neg(self): - x = pd.to_timedelta(list(range(5)) + [NaT], unit="D") - result = fmt.Timedelta64Formatter(-x, box=True).get_result() - assert result[0].strip() == "'0 days'" - assert result[1].strip() == "'-1 days'" - - def test_subdays(self): - y = pd.to_timedelta(list(range(5)) + [NaT], unit="s") - result = fmt.Timedelta64Formatter(y, box=True).get_result() - assert result[0].strip() == "'0 days 00:00:00'" - assert result[1].strip() == "'0 days 00:00:01'" - - def test_subdays_neg(self): - y = pd.to_timedelta(list(range(5)) + [NaT], unit="s") - result = fmt.Timedelta64Formatter(-y, box=True).get_result() - assert result[0].strip() == "'0 days 00:00:00'" - assert result[1].strip() == "'-1 days +23:59:59'" - - def test_zero(self): - x = pd.to_timedelta(list(range(1)) + [NaT], unit="D") - result = fmt.Timedelta64Formatter(x, box=True).get_result() - assert result[0].strip() == "'0 days'" - - x = pd.to_timedelta(list(range(1)), unit="D") - result = fmt.Timedelta64Formatter(x, box=True).get_result() - assert result[0].strip() == "'0 days'" - - -class TestDatetime64Formatter: - def test_mixed(self): - x = Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), NaT]) - result = fmt.Datetime64Formatter(x).get_result() - assert result[0].strip() == "2013-01-01 00:00:00" - assert result[1].strip() == "2013-01-01 12:00:00" - - def test_dates(self): - x = Series([datetime(2013, 1, 1), datetime(2013, 1, 2), NaT]) - result = fmt.Datetime64Formatter(x).get_result() - assert result[0].strip() == "2013-01-01" - assert result[1].strip() == "2013-01-02" - - def test_date_nanos(self): - x = Series([Timestamp(200)]) - result = fmt.Datetime64Formatter(x).get_result() - assert result[0].strip() == "1970-01-01 00:00:00.000000200" - - def test_dates_display(self): - # 10170 - # make sure that we are consistently display date formatting - x = Series(date_range("20130101 09:00:00", periods=5, freq="D")) - x.iloc[1] = np.nan - result = fmt.Datetime64Formatter(x).get_result() - assert result[0].strip() == "2013-01-01 09:00:00" - assert result[1].strip() == "NaT" - assert result[4].strip() == "2013-01-05 09:00:00" - - x = Series(date_range("20130101 09:00:00", periods=5, freq="s")) - x.iloc[1] = np.nan - result = fmt.Datetime64Formatter(x).get_result() - assert result[0].strip() == "2013-01-01 09:00:00" - assert result[1].strip() == "NaT" - assert result[4].strip() == "2013-01-01 09:00:04" - - x = Series(date_range("20130101 09:00:00", periods=5, freq="ms")) - x.iloc[1] = np.nan - result = fmt.Datetime64Formatter(x).get_result() - assert result[0].strip() == "2013-01-01 09:00:00.000" - assert result[1].strip() == "NaT" - assert result[4].strip() == "2013-01-01 09:00:00.004" - - x = Series(date_range("20130101 09:00:00", periods=5, freq="us")) - x.iloc[1] = np.nan - result = fmt.Datetime64Formatter(x).get_result() - assert result[0].strip() == "2013-01-01 09:00:00.000000" - assert result[1].strip() == "NaT" - assert result[4].strip() == "2013-01-01 09:00:00.000004" - - x = Series(date_range("20130101 09:00:00", periods=5, freq="N")) - x.iloc[1] = np.nan - result = fmt.Datetime64Formatter(x).get_result() - assert result[0].strip() == "2013-01-01 09:00:00.000000000" - assert result[1].strip() == "NaT" - assert result[4].strip() == "2013-01-01 09:00:00.000000004" - - def test_datetime64formatter_yearmonth(self): - x = Series([datetime(2016, 1, 1), datetime(2016, 2, 2)]) - - def format_func(x): - return x.strftime("%Y-%m") - - formatter = fmt.Datetime64Formatter(x, formatter=format_func) - result = formatter.get_result() - assert result == ["2016-01", "2016-02"] - - def test_datetime64formatter_hoursecond(self): - x = Series( - pd.to_datetime(["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f") - ) - - def format_func(x): - return x.strftime("%H:%M") - - formatter = fmt.Datetime64Formatter(x, formatter=format_func) - result = formatter.get_result() - assert result == ["10:10", "12:12"] - - def test_datetime64formatter_tz_ms(self): - x = Series( - np.array(["2999-01-01", "2999-01-02", "NaT"], dtype="datetime64[ms]") - ).dt.tz_localize("US/Pacific") - result = fmt.Datetime64TZFormatter(x).get_result() - assert result[0].strip() == "2999-01-01 00:00:00-08:00" - assert result[1].strip() == "2999-01-02 00:00:00-08:00" - - -class TestNaTFormatting: - def test_repr(self): - assert repr(NaT) == "NaT" - - def test_str(self): - assert str(NaT) == "NaT" - - -class TestPeriodIndexFormat: - def test_period_format_and_strftime_default(self): - per = pd.PeriodIndex([datetime(2003, 1, 1, 12), None], freq="H") - - # Default formatting - formatted = per.format() - assert formatted[0] == "2003-01-01 12:00" # default: minutes not shown - assert formatted[1] == "NaT" - # format is equivalent to strftime(None)... - assert formatted[0] == per.strftime(None)[0] - assert per.strftime(None)[1] is np.nan # ...except for NaTs - - # Same test with nanoseconds freq - per = pd.period_range("2003-01-01 12:01:01.123456789", periods=2, freq="n") - formatted = per.format() - assert (formatted == per.strftime(None)).all() - assert formatted[0] == "2003-01-01 12:01:01.123456789" - assert formatted[1] == "2003-01-01 12:01:01.123456790" - - def test_period_custom(self): - # GH#46252 custom formatting directives %l (ms) and %u (us) - - # 3 digits - per = pd.period_range("2003-01-01 12:01:01.123", periods=2, freq="l") - formatted = per.format(date_format="%y %I:%M:%S (ms=%l us=%u ns=%n)") - assert formatted[0] == "03 12:01:01 (ms=123 us=123000 ns=123000000)" - assert formatted[1] == "03 12:01:01 (ms=124 us=124000 ns=124000000)" - - # 6 digits - per = pd.period_range("2003-01-01 12:01:01.123456", periods=2, freq="u") - formatted = per.format(date_format="%y %I:%M:%S (ms=%l us=%u ns=%n)") - assert formatted[0] == "03 12:01:01 (ms=123 us=123456 ns=123456000)" - assert formatted[1] == "03 12:01:01 (ms=123 us=123457 ns=123457000)" - - # 9 digits - per = pd.period_range("2003-01-01 12:01:01.123456789", periods=2, freq="n") - formatted = per.format(date_format="%y %I:%M:%S (ms=%l us=%u ns=%n)") - assert formatted[0] == "03 12:01:01 (ms=123 us=123456 ns=123456789)" - assert formatted[1] == "03 12:01:01 (ms=123 us=123456 ns=123456790)" - - def test_period_tz(self): - # Formatting periods created from a datetime with timezone. - - # This timestamp is in 2013 in Europe/Paris but is 2012 in UTC - dt = pd.to_datetime(["2013-01-01 00:00:00+01:00"], utc=True) - - # Converting to a period looses the timezone information - # Since tz is currently set as utc, we'll see 2012 - with tm.assert_produces_warning(UserWarning, match="will drop timezone"): - per = dt.to_period(freq="H") - assert per.format()[0] == "2012-12-31 23:00" - - # If tz is currently set as paris before conversion, we'll see 2013 - dt = dt.tz_convert("Europe/Paris") - with tm.assert_produces_warning(UserWarning, match="will drop timezone"): - per = dt.to_period(freq="H") - assert per.format()[0] == "2013-01-01 00:00" - - @pytest.mark.parametrize( - "locale_str", - [ - pytest.param(None, id=str(locale.getlocale())), - "it_IT.utf8", - "it_IT", # Note: encoding will be 'ISO8859-1' - "zh_CN.utf8", - "zh_CN", # Note: encoding will be 'gb2312' - ], - ) - def test_period_non_ascii_fmt(self, locale_str): - # GH#46468 non-ascii char in input format string leads to wrong output - - # Skip if locale cannot be set - if locale_str is not None and not tm.can_set_locale(locale_str, locale.LC_ALL): - pytest.skip(f"Skipping as locale '{locale_str}' cannot be set on host.") - - # Change locale temporarily for this test. - with tm.set_locale(locale_str, locale.LC_ALL) if locale_str else nullcontext(): - # Scalar - per = pd.Period("2018-03-11 13:00", freq="H") - assert per.strftime("%y é") == "18 é" - - # Index - per = pd.period_range("2003-01-01 01:00:00", periods=2, freq="12h") - formatted = per.format(date_format="%y é") - assert formatted[0] == "03 é" - assert formatted[1] == "03 é" - - @pytest.mark.parametrize( - "locale_str", - [ - pytest.param(None, id=str(locale.getlocale())), - "it_IT.utf8", - "it_IT", # Note: encoding will be 'ISO8859-1' - "zh_CN.utf8", - "zh_CN", # Note: encoding will be 'gb2312' - ], - ) - def test_period_custom_locale_directive(self, locale_str): - # GH#46319 locale-specific directive leads to non-utf8 c strftime char* result - - # Skip if locale cannot be set - if locale_str is not None and not tm.can_set_locale(locale_str, locale.LC_ALL): - pytest.skip(f"Skipping as locale '{locale_str}' cannot be set on host.") - - # Change locale temporarily for this test. - with tm.set_locale(locale_str, locale.LC_ALL) if locale_str else nullcontext(): - # Get locale-specific reference - am_local, pm_local = get_local_am_pm() - - # Scalar - per = pd.Period("2018-03-11 13:00", freq="H") - assert per.strftime("%p") == pm_local - - # Index - per = pd.period_range("2003-01-01 01:00:00", periods=2, freq="12h") - formatted = per.format(date_format="%y %I:%M:%S%p") - assert formatted[0] == f"03 01:00:00{am_local}" - assert formatted[1] == f"03 01:00:00{pm_local}" - - -class TestDatetimeIndexFormat: - def test_datetime(self): - formatted = pd.to_datetime([datetime(2003, 1, 1, 12), NaT]).format() - assert formatted[0] == "2003-01-01 12:00:00" - assert formatted[1] == "NaT" - - def test_date(self): - formatted = pd.to_datetime([datetime(2003, 1, 1), NaT]).format() - assert formatted[0] == "2003-01-01" - assert formatted[1] == "NaT" - - def test_date_tz(self): - formatted = pd.to_datetime([datetime(2013, 1, 1)], utc=True).format() - assert formatted[0] == "2013-01-01 00:00:00+00:00" - - formatted = pd.to_datetime([datetime(2013, 1, 1), NaT], utc=True).format() - assert formatted[0] == "2013-01-01 00:00:00+00:00" - - def test_date_explicit_date_format(self): - formatted = pd.to_datetime([datetime(2003, 2, 1), NaT]).format( - date_format="%m-%d-%Y", na_rep="UT" - ) - assert formatted[0] == "02-01-2003" - assert formatted[1] == "UT" - - -class TestDatetimeIndexUnicode: - def test_dates(self): - text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(2014, 1, 1)])) - assert "['2013-01-01'," in text - assert ", '2014-01-01']" in text - - def test_mixed(self): - text = str( - pd.to_datetime( - [datetime(2013, 1, 1), datetime(2014, 1, 1, 12), datetime(2014, 1, 1)] - ) - ) - assert "'2013-01-01 00:00:00'," in text - assert "'2014-01-01 00:00:00']" in text - - -class TestStringRepTimestamp: - def test_no_tz(self): - dt_date = datetime(2013, 1, 2) - assert str(dt_date) == str(Timestamp(dt_date)) - - dt_datetime = datetime(2013, 1, 2, 12, 1, 3) - assert str(dt_datetime) == str(Timestamp(dt_datetime)) - - dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45) - assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us)) - - ts_nanos_only = Timestamp(200) - assert str(ts_nanos_only) == "1970-01-01 00:00:00.000000200" - - ts_nanos_micros = Timestamp(1200) - assert str(ts_nanos_micros) == "1970-01-01 00:00:00.000001200" - - def test_tz_pytz(self): - dt_date = datetime(2013, 1, 2, tzinfo=pytz.utc) - assert str(dt_date) == str(Timestamp(dt_date)) - - dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=pytz.utc) - assert str(dt_datetime) == str(Timestamp(dt_datetime)) - - dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=pytz.utc) - assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us)) - - def test_tz_dateutil(self): - utc = dateutil.tz.tzutc() - - dt_date = datetime(2013, 1, 2, tzinfo=utc) - assert str(dt_date) == str(Timestamp(dt_date)) - - dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=utc) - assert str(dt_datetime) == str(Timestamp(dt_datetime)) - - dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=utc) - assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us)) - - def test_nat_representations(self): - for f in (str, repr, methodcaller("isoformat")): - assert f(NaT) == "NaT" - - -@pytest.mark.parametrize( - "percentiles, expected", - [ - ( - [0.01999, 0.02001, 0.5, 0.666666, 0.9999], - ["1.999%", "2.001%", "50%", "66.667%", "99.99%"], - ), - ( - [0, 0.5, 0.02001, 0.5, 0.666666, 0.9999], - ["0%", "50%", "2.0%", "50%", "66.67%", "99.99%"], - ), - ([0.281, 0.29, 0.57, 0.58], ["28.1%", "29%", "57%", "58%"]), - ([0.28, 0.29, 0.57, 0.58], ["28%", "29%", "57%", "58%"]), - ], -) -def test_format_percentiles(percentiles, expected): - result = fmt.format_percentiles(percentiles) - assert result == expected - - -@pytest.mark.parametrize( - "percentiles", - [([0.1, np.nan, 0.5]), ([-0.001, 0.1, 0.5]), ([2, 0.1, 0.5]), ([0.1, 0.5, "a"])], -) -def test_error_format_percentiles(percentiles): - msg = r"percentiles should all be in the interval \[0,1\]" - with pytest.raises(ValueError, match=msg): - fmt.format_percentiles(percentiles) - - -def test_format_percentiles_integer_idx(): - # Issue #26660 - result = fmt.format_percentiles(np.linspace(0, 1, 10 + 1)) - expected = [ - "0%", - "10%", - "20%", - "30%", - "40%", - "50%", - "60%", - "70%", - "80%", - "90%", - "100%", - ] - assert result == expected - - -def test_repr_html_ipython_config(ip): - code = textwrap.dedent( - """\ - from pandas import DataFrame - df = DataFrame({"A": [1, 2]}) - df._repr_html_() - - cfg = get_ipython().config - cfg['IPKernelApp']['parent_appname'] - df._repr_html_() - """ - ) - result = ip.run_cell(code) - assert not result.error_in_exec - - -@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"]) -@pytest.mark.parametrize( - "encoding, data", - [(None, "abc"), ("utf-8", "abc"), ("gbk", "造成输出中文显示乱码"), ("foo", "abc")], -) -def test_filepath_or_buffer_arg( - method, - filepath_or_buffer, - assert_filepath_or_buffer_equals, - encoding, - data, - filepath_or_buffer_id, -): - df = DataFrame([data]) - if method in ["to_latex"]: # uses styler implementation - pytest.importorskip("jinja2") - - if filepath_or_buffer_id not in ["string", "pathlike"] and encoding is not None: - with pytest.raises( - ValueError, match="buf is not a file name and encoding is specified." - ): - getattr(df, method)(buf=filepath_or_buffer, encoding=encoding) - elif encoding == "foo": - with pytest.raises(LookupError, match="unknown encoding"): - getattr(df, method)(buf=filepath_or_buffer, encoding=encoding) - else: - expected = getattr(df, method)() - getattr(df, method)(buf=filepath_or_buffer, encoding=encoding) - assert_filepath_or_buffer_equals(expected) - - -@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"]) -def test_filepath_or_buffer_bad_arg_raises(float_frame, method): - if method in ["to_latex"]: # uses styler implementation - pytest.importorskip("jinja2") - msg = "buf is not a file name and it has no write method" - with pytest.raises(TypeError, match=msg): - getattr(float_frame, method)(buf=object()) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_values.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_values.py deleted file mode 100644 index cb1595e68264fbe5f07b014be4975657fa2fa8cf..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_values.py +++ /dev/null @@ -1,29 +0,0 @@ -import numpy as np -import pytest - -from pandas import ( - IntervalIndex, - Series, - period_range, -) -import pandas._testing as tm - - -class TestValues: - @pytest.mark.parametrize( - "data", - [ - period_range("2000", periods=4), - IntervalIndex.from_breaks([1, 2, 3, 4]), - ], - ) - def test_values_object_extension_dtypes(self, data): - # https://github.com/pandas-dev/pandas/issues/23995 - result = Series(data).values - expected = np.array(data.astype(object)) - tm.assert_numpy_array_equal(result, expected) - - def test_values(self, datetime_series): - tm.assert_almost_equal( - datetime_series.values, list(datetime_series), check_dtype=False - ) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/test_constructors.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/test_constructors.py deleted file mode 100644 index b74ee5cf8f2bccb7d9d4caf4347d88547ecf5dfa..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/test_constructors.py +++ /dev/null @@ -1,2242 +0,0 @@ -from collections import OrderedDict -from collections.abc import Iterator -from datetime import ( - datetime, - timedelta, -) - -from dateutil.tz import tzoffset -import numpy as np -from numpy import ma -import pytest - -from pandas._libs import ( - iNaT, - lib, -) -from pandas.errors import IntCastingNaNError -import pandas.util._test_decorators as td - -from pandas.core.dtypes.common import is_categorical_dtype -from pandas.core.dtypes.dtypes import CategoricalDtype - -import pandas as pd -from pandas import ( - Categorical, - DataFrame, - DatetimeIndex, - DatetimeTZDtype, - Index, - Interval, - IntervalIndex, - MultiIndex, - NaT, - Period, - RangeIndex, - Series, - Timestamp, - date_range, - isna, - period_range, - timedelta_range, -) -import pandas._testing as tm -from pandas.core.arrays import ( - IntegerArray, - IntervalArray, - period_array, -) -from pandas.core.internals.blocks import NumpyBlock - - -class TestSeriesConstructors: - def test_from_ints_with_non_nano_dt64_dtype(self, index_or_series): - values = np.arange(10) - - res = index_or_series(values, dtype="M8[s]") - expected = index_or_series(values.astype("M8[s]")) - tm.assert_equal(res, expected) - - res = index_or_series(list(values), dtype="M8[s]") - tm.assert_equal(res, expected) - - def test_from_na_value_and_interval_of_datetime_dtype(self): - # GH#41805 - ser = Series([None], dtype="interval[datetime64[ns]]") - assert ser.isna().all() - assert ser.dtype == "interval[datetime64[ns], right]" - - def test_infer_with_date_and_datetime(self): - # GH#49341 pre-2.0 we inferred datetime-and-date to datetime64, which - # was inconsistent with Index behavior - ts = Timestamp(2016, 1, 1) - vals = [ts.to_pydatetime(), ts.date()] - - ser = Series(vals) - expected = Series(vals, dtype=object) - tm.assert_series_equal(ser, expected) - - idx = Index(vals) - expected = Index(vals, dtype=object) - tm.assert_index_equal(idx, expected) - - def test_unparsable_strings_with_dt64_dtype(self): - # pre-2.0 these would be silently ignored and come back with object dtype - vals = ["aa"] - msg = "^Unknown datetime string format, unable to parse: aa, at position 0$" - with pytest.raises(ValueError, match=msg): - Series(vals, dtype="datetime64[ns]") - - with pytest.raises(ValueError, match=msg): - Series(np.array(vals, dtype=object), dtype="datetime64[ns]") - - @pytest.mark.parametrize( - "constructor", - [ - # NOTE: some overlap with test_constructor_empty but that test does not - # test for None or an empty generator. - # test_constructor_pass_none tests None but only with the index also - # passed. - (lambda idx: Series(index=idx)), - (lambda idx: Series(None, index=idx)), - (lambda idx: Series({}, index=idx)), - (lambda idx: Series((), index=idx)), - (lambda idx: Series([], index=idx)), - (lambda idx: Series((_ for _ in []), index=idx)), - (lambda idx: Series(data=None, index=idx)), - (lambda idx: Series(data={}, index=idx)), - (lambda idx: Series(data=(), index=idx)), - (lambda idx: Series(data=[], index=idx)), - (lambda idx: Series(data=(_ for _ in []), index=idx)), - ], - ) - @pytest.mark.parametrize("empty_index", [None, []]) - def test_empty_constructor(self, constructor, empty_index): - # GH 49573 (addition of empty_index parameter) - expected = Series(index=empty_index) - result = constructor(empty_index) - - assert result.dtype == object - assert len(result.index) == 0 - tm.assert_series_equal(result, expected, check_index_type=True) - - def test_invalid_dtype(self): - # GH15520 - msg = "not understood" - invalid_list = [Timestamp, "Timestamp", list] - for dtype in invalid_list: - with pytest.raises(TypeError, match=msg): - Series([], name="time", dtype=dtype) - - def test_invalid_compound_dtype(self): - # GH#13296 - c_dtype = np.dtype([("a", "i8"), ("b", "f4")]) - cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype) - - with pytest.raises(ValueError, match="Use DataFrame instead"): - Series(cdt_arr, index=["A", "B"]) - - def test_scalar_conversion(self): - # Pass in scalar is disabled - scalar = Series(0.5) - assert not isinstance(scalar, float) - - def test_scalar_extension_dtype(self, ea_scalar_and_dtype): - # GH 28401 - - ea_scalar, ea_dtype = ea_scalar_and_dtype - - ser = Series(ea_scalar, index=range(3)) - expected = Series([ea_scalar] * 3, dtype=ea_dtype) - - assert ser.dtype == ea_dtype - tm.assert_series_equal(ser, expected) - - def test_constructor(self, datetime_series): - empty_series = Series() - assert datetime_series.index._is_all_dates - - # Pass in Series - derived = Series(datetime_series) - assert derived.index._is_all_dates - - assert tm.equalContents(derived.index, datetime_series.index) - # Ensure new index is not created - assert id(datetime_series.index) == id(derived.index) - - # Mixed type Series - mixed = Series(["hello", np.nan], index=[0, 1]) - assert mixed.dtype == np.object_ - assert np.isnan(mixed[1]) - - assert not empty_series.index._is_all_dates - assert not Series().index._is_all_dates - - # exception raised is of type ValueError GH35744 - with pytest.raises( - ValueError, - match=r"Data must be 1-dimensional, got ndarray of shape \(3, 3\) instead", - ): - Series(np.random.default_rng(2).standard_normal((3, 3)), index=np.arange(3)) - - mixed.name = "Series" - rs = Series(mixed).name - xp = "Series" - assert rs == xp - - # raise on MultiIndex GH4187 - m = MultiIndex.from_arrays([[1, 2], [3, 4]]) - msg = "initializing a Series from a MultiIndex is not supported" - with pytest.raises(NotImplementedError, match=msg): - Series(m) - - def test_constructor_index_ndim_gt_1_raises(self): - # GH#18579 - df = DataFrame([[1, 2], [3, 4], [5, 6]], index=[3, 6, 9]) - with pytest.raises(ValueError, match="Index data must be 1-dimensional"): - Series([1, 3, 2], index=df) - - @pytest.mark.parametrize("input_class", [list, dict, OrderedDict]) - def test_constructor_empty(self, input_class): - empty = Series() - empty2 = Series(input_class()) - - # these are Index() and RangeIndex() which don't compare type equal - # but are just .equals - tm.assert_series_equal(empty, empty2, check_index_type=False) - - # With explicit dtype: - empty = Series(dtype="float64") - empty2 = Series(input_class(), dtype="float64") - tm.assert_series_equal(empty, empty2, check_index_type=False) - - # GH 18515 : with dtype=category: - empty = Series(dtype="category") - empty2 = Series(input_class(), dtype="category") - tm.assert_series_equal(empty, empty2, check_index_type=False) - - if input_class is not list: - # With index: - empty = Series(index=range(10)) - empty2 = Series(input_class(), index=range(10)) - tm.assert_series_equal(empty, empty2) - - # With index and dtype float64: - empty = Series(np.nan, index=range(10)) - empty2 = Series(input_class(), index=range(10), dtype="float64") - tm.assert_series_equal(empty, empty2) - - # GH 19853 : with empty string, index and dtype str - empty = Series("", dtype=str, index=range(3)) - empty2 = Series("", index=range(3)) - tm.assert_series_equal(empty, empty2) - - @pytest.mark.parametrize("input_arg", [np.nan, float("nan")]) - def test_constructor_nan(self, input_arg): - empty = Series(dtype="float64", index=range(10)) - empty2 = Series(input_arg, index=range(10)) - - tm.assert_series_equal(empty, empty2, check_index_type=False) - - @pytest.mark.parametrize( - "dtype", - ["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"], - ) - @pytest.mark.parametrize("index", [None, Index([])]) - def test_constructor_dtype_only(self, dtype, index): - # GH-20865 - result = Series(dtype=dtype, index=index) - assert result.dtype == dtype - assert len(result) == 0 - - def test_constructor_no_data_index_order(self): - result = Series(index=["b", "a", "c"]) - assert result.index.tolist() == ["b", "a", "c"] - - def test_constructor_no_data_string_type(self): - # GH 22477 - result = Series(index=[1], dtype=str) - assert np.isnan(result.iloc[0]) - - @pytest.mark.parametrize("item", ["entry", "ѐ", 13]) - def test_constructor_string_element_string_type(self, item): - # GH 22477 - result = Series(item, index=[1], dtype=str) - assert result.iloc[0] == str(item) - - def test_constructor_dtype_str_na_values(self, string_dtype): - # https://github.com/pandas-dev/pandas/issues/21083 - ser = Series(["x", None], dtype=string_dtype) - result = ser.isna() - expected = Series([False, True]) - tm.assert_series_equal(result, expected) - assert ser.iloc[1] is None - - ser = Series(["x", np.nan], dtype=string_dtype) - assert np.isnan(ser.iloc[1]) - - def test_constructor_series(self): - index1 = ["d", "b", "a", "c"] - index2 = sorted(index1) - s1 = Series([4, 7, -5, 3], index=index1) - s2 = Series(s1, index=index2) - - tm.assert_series_equal(s2, s1.sort_index()) - - def test_constructor_iterable(self): - # GH 21987 - class Iter: - def __iter__(self) -> Iterator: - yield from range(10) - - expected = Series(list(range(10)), dtype="int64") - result = Series(Iter(), dtype="int64") - tm.assert_series_equal(result, expected) - - def test_constructor_sequence(self): - # GH 21987 - expected = Series(list(range(10)), dtype="int64") - result = Series(range(10), dtype="int64") - tm.assert_series_equal(result, expected) - - def test_constructor_single_str(self): - # GH 21987 - expected = Series(["abc"]) - result = Series("abc") - tm.assert_series_equal(result, expected) - - def test_constructor_list_like(self): - # make sure that we are coercing different - # list-likes to standard dtypes and not - # platform specific - expected = Series([1, 2, 3], dtype="int64") - for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]: - result = Series(obj, index=[0, 1, 2]) - tm.assert_series_equal(result, expected) - - def test_constructor_boolean_index(self): - # GH#18579 - s1 = Series([1, 2, 3], index=[4, 5, 6]) - - index = s1 == 2 - result = Series([1, 3, 2], index=index) - expected = Series([1, 3, 2], index=[False, True, False]) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"]) - def test_constructor_index_dtype(self, dtype): - # GH 17088 - - s = Series(Index([0, 2, 4]), dtype=dtype) - assert s.dtype == dtype - - @pytest.mark.parametrize( - "input_vals", - [ - ([1, 2]), - (["1", "2"]), - (list(date_range("1/1/2011", periods=2, freq="H"))), - (list(date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))), - ([Interval(left=0, right=5)]), - ], - ) - def test_constructor_list_str(self, input_vals, string_dtype): - # GH 16605 - # Ensure that data elements from a list are converted to strings - # when dtype is str, 'str', or 'U' - result = Series(input_vals, dtype=string_dtype) - expected = Series(input_vals).astype(string_dtype) - tm.assert_series_equal(result, expected) - - def test_constructor_list_str_na(self, string_dtype): - result = Series([1.0, 2.0, np.nan], dtype=string_dtype) - expected = Series(["1.0", "2.0", np.nan], dtype=object) - tm.assert_series_equal(result, expected) - assert np.isnan(result[2]) - - def test_constructor_generator(self): - gen = (i for i in range(10)) - - result = Series(gen) - exp = Series(range(10)) - tm.assert_series_equal(result, exp) - - # same but with non-default index - gen = (i for i in range(10)) - result = Series(gen, index=range(10, 20)) - exp.index = range(10, 20) - tm.assert_series_equal(result, exp) - - def test_constructor_map(self): - # GH8909 - m = (x for x in range(10)) - - result = Series(m) - exp = Series(range(10)) - tm.assert_series_equal(result, exp) - - # same but with non-default index - m = (x for x in range(10)) - result = Series(m, index=range(10, 20)) - exp.index = range(10, 20) - tm.assert_series_equal(result, exp) - - def test_constructor_categorical(self): - cat = Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"]) - res = Series(cat) - tm.assert_categorical_equal(res.values, cat) - - # can cast to a new dtype - result = Series(Categorical([1, 2, 3]), dtype="int64") - expected = Series([1, 2, 3], dtype="int64") - tm.assert_series_equal(result, expected) - - def test_construct_from_categorical_with_dtype(self): - # GH12574 - cat = Series(Categorical([1, 2, 3]), dtype="category") - msg = "is_categorical_dtype is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - assert is_categorical_dtype(cat) - assert is_categorical_dtype(cat.dtype) - - def test_construct_intlist_values_category_dtype(self): - ser = Series([1, 2, 3], dtype="category") - msg = "is_categorical_dtype is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - assert is_categorical_dtype(ser) - assert is_categorical_dtype(ser.dtype) - - def test_constructor_categorical_with_coercion(self): - factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]) - # test basic creation / coercion of categoricals - s = Series(factor, name="A") - assert s.dtype == "category" - assert len(s) == len(factor) - str(s.values) - str(s) - - # in a frame - df = DataFrame({"A": factor}) - result = df["A"] - tm.assert_series_equal(result, s) - result = df.iloc[:, 0] - tm.assert_series_equal(result, s) - assert len(df) == len(factor) - str(df.values) - str(df) - - df = DataFrame({"A": s}) - result = df["A"] - tm.assert_series_equal(result, s) - assert len(df) == len(factor) - str(df.values) - str(df) - - # multiples - df = DataFrame({"A": s, "B": s, "C": 1}) - result1 = df["A"] - result2 = df["B"] - tm.assert_series_equal(result1, s) - tm.assert_series_equal(result2, s, check_names=False) - assert result2.name == "B" - assert len(df) == len(factor) - str(df.values) - str(df) - - def test_constructor_categorical_with_coercion2(self): - # GH8623 - x = DataFrame( - [[1, "John P. Doe"], [2, "Jane Dove"], [1, "John P. Doe"]], - columns=["person_id", "person_name"], - ) - x["person_name"] = Categorical(x.person_name) # doing this breaks transform - - expected = x.iloc[0].person_name - result = x.person_name.iloc[0] - assert result == expected - - result = x.person_name[0] - assert result == expected - - result = x.person_name.loc[0] - assert result == expected - - def test_constructor_series_to_categorical(self): - # see GH#16524: test conversion of Series to Categorical - series = Series(["a", "b", "c"]) - - result = Series(series, dtype="category") - expected = Series(["a", "b", "c"], dtype="category") - - tm.assert_series_equal(result, expected) - - def test_constructor_categorical_dtype(self): - result = Series( - ["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True) - ) - assert isinstance(result.dtype, CategoricalDtype) - tm.assert_index_equal(result.cat.categories, Index(["a", "b", "c"])) - assert result.cat.ordered - - result = Series(["a", "b"], dtype=CategoricalDtype(["b", "a"])) - assert isinstance(result.dtype, CategoricalDtype) - tm.assert_index_equal(result.cat.categories, Index(["b", "a"])) - assert result.cat.ordered is False - - # GH 19565 - Check broadcasting of scalar with Categorical dtype - result = Series( - "a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True) - ) - expected = Series( - ["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True) - ) - tm.assert_series_equal(result, expected) - - def test_constructor_categorical_string(self): - # GH 26336: the string 'category' maintains existing CategoricalDtype - cdt = CategoricalDtype(categories=list("dabc"), ordered=True) - expected = Series(list("abcabc"), dtype=cdt) - - # Series(Categorical, dtype='category') keeps existing dtype - cat = Categorical(list("abcabc"), dtype=cdt) - result = Series(cat, dtype="category") - tm.assert_series_equal(result, expected) - - # Series(Series[Categorical], dtype='category') keeps existing dtype - result = Series(result, dtype="category") - tm.assert_series_equal(result, expected) - - def test_categorical_sideeffects_free(self): - # Passing a categorical to a Series and then changing values in either - # the series or the categorical should not change the values in the - # other one, IF you specify copy! - cat = Categorical(["a", "b", "c", "a"]) - s = Series(cat, copy=True) - assert s.cat is not cat - s = s.cat.rename_categories([1, 2, 3]) - exp_s = np.array([1, 2, 3, 1], dtype=np.int64) - exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_) - tm.assert_numpy_array_equal(s.__array__(), exp_s) - tm.assert_numpy_array_equal(cat.__array__(), exp_cat) - - # setting - s[0] = 2 - exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64) - tm.assert_numpy_array_equal(s.__array__(), exp_s2) - tm.assert_numpy_array_equal(cat.__array__(), exp_cat) - - # however, copy is False by default - # so this WILL change values - cat = Categorical(["a", "b", "c", "a"]) - s = Series(cat, copy=False) - assert s.values is cat - s = s.cat.rename_categories([1, 2, 3]) - assert s.values is not cat - exp_s = np.array([1, 2, 3, 1], dtype=np.int64) - tm.assert_numpy_array_equal(s.__array__(), exp_s) - - s[0] = 2 - exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64) - tm.assert_numpy_array_equal(s.__array__(), exp_s2) - - def test_unordered_compare_equal(self): - left = Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"])) - right = Series(Categorical(["a", "b", np.nan], categories=["a", "b"])) - tm.assert_series_equal(left, right) - - def test_constructor_maskedarray(self): - data = ma.masked_all((3,), dtype=float) - result = Series(data) - expected = Series([np.nan, np.nan, np.nan]) - tm.assert_series_equal(result, expected) - - data[0] = 0.0 - data[2] = 2.0 - index = ["a", "b", "c"] - result = Series(data, index=index) - expected = Series([0.0, np.nan, 2.0], index=index) - tm.assert_series_equal(result, expected) - - data[1] = 1.0 - result = Series(data, index=index) - expected = Series([0.0, 1.0, 2.0], index=index) - tm.assert_series_equal(result, expected) - - data = ma.masked_all((3,), dtype=int) - result = Series(data) - expected = Series([np.nan, np.nan, np.nan], dtype=float) - tm.assert_series_equal(result, expected) - - data[0] = 0 - data[2] = 2 - index = ["a", "b", "c"] - result = Series(data, index=index) - expected = Series([0, np.nan, 2], index=index, dtype=float) - tm.assert_series_equal(result, expected) - - data[1] = 1 - result = Series(data, index=index) - expected = Series([0, 1, 2], index=index, dtype=int) - tm.assert_series_equal(result, expected) - - data = ma.masked_all((3,), dtype=bool) - result = Series(data) - expected = Series([np.nan, np.nan, np.nan], dtype=object) - tm.assert_series_equal(result, expected) - - data[0] = True - data[2] = False - index = ["a", "b", "c"] - result = Series(data, index=index) - expected = Series([True, np.nan, False], index=index, dtype=object) - tm.assert_series_equal(result, expected) - - data[1] = True - result = Series(data, index=index) - expected = Series([True, True, False], index=index, dtype=bool) - tm.assert_series_equal(result, expected) - - data = ma.masked_all((3,), dtype="M8[ns]") - result = Series(data) - expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]") - tm.assert_series_equal(result, expected) - - data[0] = datetime(2001, 1, 1) - data[2] = datetime(2001, 1, 3) - index = ["a", "b", "c"] - result = Series(data, index=index) - expected = Series( - [datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)], - index=index, - dtype="M8[ns]", - ) - tm.assert_series_equal(result, expected) - - data[1] = datetime(2001, 1, 2) - result = Series(data, index=index) - expected = Series( - [datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)], - index=index, - dtype="M8[ns]", - ) - tm.assert_series_equal(result, expected) - - def test_constructor_maskedarray_hardened(self): - # Check numpy masked arrays with hard masks -- from GH24574 - data = ma.masked_all((3,), dtype=float).harden_mask() - result = Series(data) - expected = Series([np.nan, np.nan, np.nan]) - tm.assert_series_equal(result, expected) - - def test_series_ctor_plus_datetimeindex(self, using_copy_on_write): - rng = date_range("20090415", "20090519", freq="B") - data = {k: 1 for k in rng} - - result = Series(data, index=rng) - if using_copy_on_write: - assert result.index.is_(rng) - else: - assert result.index is rng - - def test_constructor_default_index(self): - s = Series([0, 1, 2]) - tm.assert_index_equal(s.index, Index(range(3)), exact=True) - - @pytest.mark.parametrize( - "input", - [ - [1, 2, 3], - (1, 2, 3), - list(range(3)), - Categorical(["a", "b", "a"]), - (i for i in range(3)), - (x for x in range(3)), - ], - ) - def test_constructor_index_mismatch(self, input): - # GH 19342 - # test that construction of a Series with an index of different length - # raises an error - msg = r"Length of values \(3\) does not match length of index \(4\)" - with pytest.raises(ValueError, match=msg): - Series(input, index=np.arange(4)) - - def test_constructor_numpy_scalar(self): - # GH 19342 - # construction with a numpy scalar - # should not raise - result = Series(np.array(100), index=np.arange(4), dtype="int64") - expected = Series(100, index=np.arange(4), dtype="int64") - tm.assert_series_equal(result, expected) - - def test_constructor_broadcast_list(self): - # GH 19342 - # construction with single-element container and index - # should raise - msg = r"Length of values \(1\) does not match length of index \(3\)" - with pytest.raises(ValueError, match=msg): - Series(["foo"], index=["a", "b", "c"]) - - def test_constructor_corner(self): - df = tm.makeTimeDataFrame() - objs = [df, df] - s = Series(objs, index=[0, 1]) - assert isinstance(s, Series) - - def test_constructor_sanitize(self): - s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8") - assert s.dtype == np.dtype("i8") - - msg = r"Cannot convert non-finite values \(NA or inf\) to integer" - with pytest.raises(IntCastingNaNError, match=msg): - Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8") - - def test_constructor_copy(self): - # GH15125 - # test dtype parameter has no side effects on copy=True - for data in [[1.0], np.array([1.0])]: - x = Series(data) - y = Series(x, copy=True, dtype=float) - - # copy=True maintains original data in Series - tm.assert_series_equal(x, y) - - # changes to origin of copy does not affect the copy - x[0] = 2.0 - assert not x.equals(y) - assert x[0] == 2.0 - assert y[0] == 1.0 - - @td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite test - @pytest.mark.parametrize( - "index", - [ - date_range("20170101", periods=3, tz="US/Eastern"), - date_range("20170101", periods=3), - timedelta_range("1 day", periods=3), - period_range("2012Q1", periods=3, freq="Q"), - Index(list("abc")), - Index([1, 2, 3]), - RangeIndex(0, 3), - ], - ids=lambda x: type(x).__name__, - ) - def test_constructor_limit_copies(self, index): - # GH 17449 - # limit copies of input - s = Series(index) - - # we make 1 copy; this is just a smoke test here - assert s._mgr.blocks[0].values is not index - - def test_constructor_shallow_copy(self): - # constructing a Series from Series with copy=False should still - # give a "shallow" copy (share data, not attributes) - # https://github.com/pandas-dev/pandas/issues/49523 - s = Series([1, 2, 3]) - s_orig = s.copy() - s2 = Series(s) - assert s2._mgr is not s._mgr - # Overwriting index of s2 doesn't change s - s2.index = ["a", "b", "c"] - tm.assert_series_equal(s, s_orig) - - def test_constructor_pass_none(self): - s = Series(None, index=range(5)) - assert s.dtype == np.float64 - - s = Series(None, index=range(5), dtype=object) - assert s.dtype == np.object_ - - # GH 7431 - # inference on the index - s = Series(index=np.array([None])) - expected = Series(index=Index([None])) - tm.assert_series_equal(s, expected) - - def test_constructor_pass_nan_nat(self): - # GH 13467 - exp = Series([np.nan, np.nan], dtype=np.float64) - assert exp.dtype == np.float64 - tm.assert_series_equal(Series([np.nan, np.nan]), exp) - tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp) - - exp = Series([NaT, NaT]) - assert exp.dtype == "datetime64[ns]" - tm.assert_series_equal(Series([NaT, NaT]), exp) - tm.assert_series_equal(Series(np.array([NaT, NaT])), exp) - - tm.assert_series_equal(Series([NaT, np.nan]), exp) - tm.assert_series_equal(Series(np.array([NaT, np.nan])), exp) - - tm.assert_series_equal(Series([np.nan, NaT]), exp) - tm.assert_series_equal(Series(np.array([np.nan, NaT])), exp) - - def test_constructor_cast(self): - msg = "could not convert string to float" - with pytest.raises(ValueError, match=msg): - Series(["a", "b", "c"], dtype=float) - - def test_constructor_signed_int_overflow_raises(self): - # GH#41734 disallow silent overflow, enforced in 2.0 - msg = "Values are too large to be losslessly converted" - with pytest.raises(ValueError, match=msg): - Series([1, 200, 923442], dtype="int8") - - with pytest.raises(ValueError, match=msg): - Series([1, 200, 923442], dtype="uint8") - - @pytest.mark.parametrize( - "values", - [ - np.array([1], dtype=np.uint16), - np.array([1], dtype=np.uint32), - np.array([1], dtype=np.uint64), - [np.uint16(1)], - [np.uint32(1)], - [np.uint64(1)], - ], - ) - def test_constructor_numpy_uints(self, values): - # GH#47294 - value = values[0] - result = Series(values) - - assert result[0].dtype == value.dtype - assert result[0] == value - - def test_constructor_unsigned_dtype_overflow(self, any_unsigned_int_numpy_dtype): - # see gh-15832 - msg = "Trying to coerce negative values to unsigned integers" - with pytest.raises(OverflowError, match=msg): - Series([-1], dtype=any_unsigned_int_numpy_dtype) - - def test_constructor_floating_data_int_dtype(self, frame_or_series): - # GH#40110 - arr = np.random.default_rng(2).standard_normal(2) - - # Long-standing behavior (for Series, new in 2.0 for DataFrame) - # has been to ignore the dtype on these; - # not clear if this is what we want long-term - # expected = frame_or_series(arr) - - # GH#49599 as of 2.0 we raise instead of silently retaining float dtype - msg = "Trying to coerce float values to integer" - with pytest.raises(ValueError, match=msg): - frame_or_series(arr, dtype="i8") - - with pytest.raises(ValueError, match=msg): - frame_or_series(list(arr), dtype="i8") - - # pre-2.0, when we had NaNs, we silently ignored the integer dtype - arr[0] = np.nan - # expected = frame_or_series(arr) - - msg = r"Cannot convert non-finite values \(NA or inf\) to integer" - with pytest.raises(IntCastingNaNError, match=msg): - frame_or_series(arr, dtype="i8") - - exc = IntCastingNaNError - if frame_or_series is Series: - # TODO: try to align these - exc = ValueError - msg = "cannot convert float NaN to integer" - with pytest.raises(exc, match=msg): - # same behavior if we pass list instead of the ndarray - frame_or_series(list(arr), dtype="i8") - - # float array that can be losslessly cast to integers - arr = np.array([1.0, 2.0], dtype="float64") - expected = frame_or_series(arr.astype("i8")) - - obj = frame_or_series(arr, dtype="i8") - tm.assert_equal(obj, expected) - - obj = frame_or_series(list(arr), dtype="i8") - tm.assert_equal(obj, expected) - - def test_constructor_coerce_float_fail(self, any_int_numpy_dtype): - # see gh-15832 - # Updated: make sure we treat this list the same as we would treat - # the equivalent ndarray - # GH#49599 pre-2.0 we silently retained float dtype, in 2.0 we raise - vals = [1, 2, 3.5] - - msg = "Trying to coerce float values to integer" - with pytest.raises(ValueError, match=msg): - Series(vals, dtype=any_int_numpy_dtype) - with pytest.raises(ValueError, match=msg): - Series(np.array(vals), dtype=any_int_numpy_dtype) - - def test_constructor_coerce_float_valid(self, float_numpy_dtype): - s = Series([1, 2, 3.5], dtype=float_numpy_dtype) - expected = Series([1, 2, 3.5]).astype(float_numpy_dtype) - tm.assert_series_equal(s, expected) - - def test_constructor_invalid_coerce_ints_with_float_nan(self, any_int_numpy_dtype): - # GH 22585 - # Updated: make sure we treat this list the same as we would treat the - # equivalent ndarray - vals = [1, 2, np.nan] - # pre-2.0 this would return with a float dtype, in 2.0 we raise - - msg = "cannot convert float NaN to integer" - with pytest.raises(ValueError, match=msg): - Series(vals, dtype=any_int_numpy_dtype) - msg = r"Cannot convert non-finite values \(NA or inf\) to integer" - with pytest.raises(IntCastingNaNError, match=msg): - Series(np.array(vals), dtype=any_int_numpy_dtype) - - def test_constructor_dtype_no_cast(self, using_copy_on_write): - # see gh-1572 - s = Series([1, 2, 3]) - s2 = Series(s, dtype=np.int64) - - s2[1] = 5 - if using_copy_on_write: - assert s[1] == 2 - else: - assert s[1] == 5 - - def test_constructor_datelike_coercion(self): - # GH 9477 - # incorrectly inferring on dateimelike looking when object dtype is - # specified - s = Series([Timestamp("20130101"), "NOV"], dtype=object) - assert s.iloc[0] == Timestamp("20130101") - assert s.iloc[1] == "NOV" - assert s.dtype == object - - def test_constructor_datelike_coercion2(self): - # the dtype was being reset on the slicing and re-inferred to datetime - # even thought the blocks are mixed - belly = "216 3T19".split() - wing1 = "2T15 4H19".split() - wing2 = "416 4T20".split() - mat = pd.to_datetime("2016-01-22 2019-09-07".split()) - df = DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly) - - result = df.loc["3T19"] - assert result.dtype == object - result = df.loc["216"] - assert result.dtype == object - - def test_constructor_mixed_int_and_timestamp(self, frame_or_series): - # specifically Timestamp with nanos, not datetimes - objs = [Timestamp(9), 10, NaT._value] - result = frame_or_series(objs, dtype="M8[ns]") - - expected = frame_or_series([Timestamp(9), Timestamp(10), NaT]) - tm.assert_equal(result, expected) - - def test_constructor_datetimes_with_nulls(self): - # gh-15869 - for arr in [ - np.array([None, None, None, None, datetime.now(), None]), - np.array([None, None, datetime.now(), None]), - ]: - result = Series(arr) - assert result.dtype == "M8[ns]" - - def test_constructor_dtype_datetime64(self): - s = Series(iNaT, dtype="M8[ns]", index=range(5)) - assert isna(s).all() - - # in theory this should be all nulls, but since - # we are not specifying a dtype is ambiguous - s = Series(iNaT, index=range(5)) - assert not isna(s).all() - - s = Series(np.nan, dtype="M8[ns]", index=range(5)) - assert isna(s).all() - - s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype="M8[ns]") - assert isna(s[1]) - assert s.dtype == "M8[ns]" - - s = Series([datetime(2001, 1, 2, 0, 0), np.nan], dtype="M8[ns]") - assert isna(s[1]) - assert s.dtype == "M8[ns]" - - def test_constructor_dtype_datetime64_10(self): - # GH3416 - pydates = [datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)] - dates = [np.datetime64(x) for x in pydates] - - ser = Series(dates) - assert ser.dtype == "M8[ns]" - - ser.iloc[0] = np.nan - assert ser.dtype == "M8[ns]" - - # GH3414 related - expected = Series(pydates, dtype="datetime64[ms]") - - result = Series(Series(dates).view(np.int64) / 1000000, dtype="M8[ms]") - tm.assert_series_equal(result, expected) - - result = Series(dates, dtype="datetime64[ms]") - tm.assert_series_equal(result, expected) - - expected = Series( - [NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]" - ) - result = Series([np.nan] + dates[1:], dtype="datetime64[ns]") - tm.assert_series_equal(result, expected) - - def test_constructor_dtype_datetime64_11(self): - pydates = [datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)] - dates = [np.datetime64(x) for x in pydates] - - dts = Series(dates, dtype="datetime64[ns]") - - # valid astype - dts.astype("int64") - - # invalid casting - msg = r"Converting from datetime64\[ns\] to int32 is not supported" - with pytest.raises(TypeError, match=msg): - dts.astype("int32") - - # ints are ok - # we test with np.int64 to get similar results on - # windows / 32-bit platforms - result = Series(dts, dtype=np.int64) - expected = Series(dts.astype(np.int64)) - tm.assert_series_equal(result, expected) - - def test_constructor_dtype_datetime64_9(self): - # invalid dates can be help as object - result = Series([datetime(2, 1, 1)]) - assert result[0] == datetime(2, 1, 1, 0, 0) - - result = Series([datetime(3000, 1, 1)]) - assert result[0] == datetime(3000, 1, 1, 0, 0) - - def test_constructor_dtype_datetime64_8(self): - # don't mix types - result = Series([Timestamp("20130101"), 1], index=["a", "b"]) - assert result["a"] == Timestamp("20130101") - assert result["b"] == 1 - - def test_constructor_dtype_datetime64_7(self): - # GH6529 - # coerce datetime64 non-ns properly - dates = date_range("01-Jan-2015", "01-Dec-2015", freq="M") - values2 = dates.view(np.ndarray).astype("datetime64[ns]") - expected = Series(values2, index=dates) - - for unit in ["s", "D", "ms", "us", "ns"]: - dtype = np.dtype(f"M8[{unit}]") - values1 = dates.view(np.ndarray).astype(dtype) - result = Series(values1, dates) - if unit == "D": - # for unit="D" we cast to nearest-supported reso, i.e. "s" - dtype = np.dtype("M8[s]") - assert result.dtype == dtype - tm.assert_series_equal(result, expected.astype(dtype)) - - # GH 13876 - # coerce to non-ns to object properly - expected = Series(values2, index=dates, dtype=object) - for dtype in ["s", "D", "ms", "us", "ns"]: - values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]") - result = Series(values1, index=dates, dtype=object) - tm.assert_series_equal(result, expected) - - # leave datetime.date alone - dates2 = np.array([d.date() for d in dates.to_pydatetime()], dtype=object) - series1 = Series(dates2, dates) - tm.assert_numpy_array_equal(series1.values, dates2) - assert series1.dtype == object - - def test_constructor_dtype_datetime64_6(self): - # as of 2.0, these no longer infer datetime64 based on the strings, - # matching the Index behavior - - ser = Series([None, NaT, "2013-08-05 15:30:00.000001"]) - assert ser.dtype == object - - ser = Series([np.nan, NaT, "2013-08-05 15:30:00.000001"]) - assert ser.dtype == object - - ser = Series([NaT, None, "2013-08-05 15:30:00.000001"]) - assert ser.dtype == object - - ser = Series([NaT, np.nan, "2013-08-05 15:30:00.000001"]) - assert ser.dtype == object - - def test_constructor_dtype_datetime64_5(self): - # tz-aware (UTC and other tz's) - # GH 8411 - dr = date_range("20130101", periods=3) - assert Series(dr).iloc[0].tz is None - dr = date_range("20130101", periods=3, tz="UTC") - assert str(Series(dr).iloc[0].tz) == "UTC" - dr = date_range("20130101", periods=3, tz="US/Eastern") - assert str(Series(dr).iloc[0].tz) == "US/Eastern" - - def test_constructor_dtype_datetime64_4(self): - # non-convertible - s = Series([1479596223000, -1479590, NaT]) - assert s.dtype == "object" - assert s[2] is NaT - assert "NaT" in str(s) - - def test_constructor_dtype_datetime64_3(self): - # if we passed a NaT it remains - s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), NaT]) - assert s.dtype == "object" - assert s[2] is NaT - assert "NaT" in str(s) - - def test_constructor_dtype_datetime64_2(self): - # if we passed a nan it remains - s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan]) - assert s.dtype == "object" - assert s[2] is np.nan - assert "NaN" in str(s) - - def test_constructor_with_datetime_tz(self): - # 8260 - # support datetime64 with tz - - dr = date_range("20130101", periods=3, tz="US/Eastern") - s = Series(dr) - assert s.dtype.name == "datetime64[ns, US/Eastern]" - assert s.dtype == "datetime64[ns, US/Eastern]" - assert isinstance(s.dtype, DatetimeTZDtype) - assert "datetime64[ns, US/Eastern]" in str(s) - - # export - result = s.values - assert isinstance(result, np.ndarray) - assert result.dtype == "datetime64[ns]" - - exp = DatetimeIndex(result) - exp = exp.tz_localize("UTC").tz_convert(tz=s.dt.tz) - tm.assert_index_equal(dr, exp) - - # indexing - result = s.iloc[0] - assert result == Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern") - result = s[0] - assert result == Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern") - - result = s[Series([True, True, False], index=s.index)] - tm.assert_series_equal(result, s[0:2]) - - result = s.iloc[0:1] - tm.assert_series_equal(result, Series(dr[0:1])) - - # concat - result = pd.concat([s.iloc[0:1], s.iloc[1:]]) - tm.assert_series_equal(result, s) - - # short str - assert "datetime64[ns, US/Eastern]" in str(s) - - # formatting with NaT - result = s.shift() - assert "datetime64[ns, US/Eastern]" in str(result) - assert "NaT" in str(result) - - # long str - t = Series(date_range("20130101", periods=1000, tz="US/Eastern")) - assert "datetime64[ns, US/Eastern]" in str(t) - - result = DatetimeIndex(s, freq="infer") - tm.assert_index_equal(result, dr) - - def test_constructor_with_datetime_tz4(self): - # inference - s = Series( - [ - Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"), - Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"), - ] - ) - assert s.dtype == "datetime64[ns, US/Pacific]" - assert lib.infer_dtype(s, skipna=True) == "datetime64" - - def test_constructor_with_datetime_tz3(self): - s = Series( - [ - Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"), - Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"), - ] - ) - assert s.dtype == "object" - assert lib.infer_dtype(s, skipna=True) == "datetime" - - def test_constructor_with_datetime_tz2(self): - # with all NaT - s = Series(NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]") - expected = Series(DatetimeIndex(["NaT", "NaT"], tz="US/Eastern")) - tm.assert_series_equal(s, expected) - - def test_constructor_no_partial_datetime_casting(self): - # GH#40111 - vals = [ - "nan", - Timestamp("1990-01-01"), - "2015-03-14T16:15:14.123-08:00", - "2019-03-04T21:56:32.620-07:00", - None, - ] - ser = Series(vals) - assert all(ser[i] is vals[i] for i in range(len(vals))) - - @pytest.mark.parametrize("arr_dtype", [np.int64, np.float64]) - @pytest.mark.parametrize("kind", ["M", "m"]) - @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"]) - def test_construction_to_datetimelike_unit(self, arr_dtype, kind, unit): - # tests all units - # gh-19223 - # TODO: GH#19223 was about .astype, doesn't belong here - dtype = f"{kind}8[{unit}]" - arr = np.array([1, 2, 3], dtype=arr_dtype) - ser = Series(arr) - result = ser.astype(dtype) - - expected = Series(arr.astype(dtype)) - - if unit in ["ns", "us", "ms", "s"]: - assert result.dtype == dtype - assert expected.dtype == dtype - else: - # Otherwise we cast to nearest-supported unit, i.e. seconds - assert result.dtype == f"{kind}8[s]" - assert expected.dtype == f"{kind}8[s]" - - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", NaT, np.nan, None]) - def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg): - # GH 17415: With naive string - result = Series([arg], dtype="datetime64[ns, CET]") - expected = Series(Timestamp(arg)).dt.tz_localize("CET") - tm.assert_series_equal(result, expected) - - def test_constructor_datetime64_bigendian(self): - # GH#30976 - ms = np.datetime64(1, "ms") - arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]") - - result = Series(arr) - expected = Series([Timestamp(ms)]).astype("M8[ms]") - assert expected.dtype == "M8[ms]" - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray]) - def test_construction_interval(self, interval_constructor): - # construction from interval & array of intervals - intervals = interval_constructor.from_breaks(np.arange(3), closed="right") - result = Series(intervals) - assert result.dtype == "interval[int64, right]" - tm.assert_index_equal(Index(result.values), Index(intervals)) - - @pytest.mark.parametrize( - "data_constructor", [list, np.array], ids=["list", "ndarray[object]"] - ) - def test_constructor_infer_interval(self, data_constructor): - # GH 23563: consistent closed results in interval dtype - data = [Interval(0, 1), Interval(0, 2), None] - result = Series(data_constructor(data)) - expected = Series(IntervalArray(data)) - assert result.dtype == "interval[float64, right]" - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize( - "data_constructor", [list, np.array], ids=["list", "ndarray[object]"] - ) - def test_constructor_interval_mixed_closed(self, data_constructor): - # GH 23563: mixed closed results in object dtype (not interval dtype) - data = [Interval(0, 1, closed="both"), Interval(0, 2, closed="neither")] - result = Series(data_constructor(data)) - assert result.dtype == object - assert result.tolist() == data - - def test_construction_consistency(self): - # make sure that we are not re-localizing upon construction - # GH 14928 - ser = Series(date_range("20130101", periods=3, tz="US/Eastern")) - - result = Series(ser, dtype=ser.dtype) - tm.assert_series_equal(result, ser) - - result = Series(ser.dt.tz_convert("UTC"), dtype=ser.dtype) - tm.assert_series_equal(result, ser) - - # Pre-2.0 dt64 values were treated as utc, which was inconsistent - # with DatetimeIndex, which treats them as wall times, see GH#33401 - result = Series(ser.values, dtype=ser.dtype) - expected = Series(ser.values).dt.tz_localize(ser.dtype.tz) - tm.assert_series_equal(result, expected) - - with tm.assert_produces_warning(None): - # one suggested alternative to the deprecated (changed in 2.0) usage - middle = Series(ser.values).dt.tz_localize("UTC") - result = middle.dt.tz_convert(ser.dtype.tz) - tm.assert_series_equal(result, ser) - - with tm.assert_produces_warning(None): - # the other suggested alternative to the deprecated usage - result = Series(ser.values.view("int64"), dtype=ser.dtype) - tm.assert_series_equal(result, ser) - - @pytest.mark.parametrize( - "data_constructor", [list, np.array], ids=["list", "ndarray[object]"] - ) - def test_constructor_infer_period(self, data_constructor): - data = [Period("2000", "D"), Period("2001", "D"), None] - result = Series(data_constructor(data)) - expected = Series(period_array(data)) - tm.assert_series_equal(result, expected) - assert result.dtype == "Period[D]" - - @pytest.mark.xfail(reason="PeriodDtype Series not supported yet") - def test_construct_from_ints_including_iNaT_scalar_period_dtype(self): - series = Series([0, 1000, 2000, pd._libs.iNaT], dtype="period[D]") - - val = series[3] - assert isna(val) - - series[2] = val - assert isna(series[2]) - - def test_constructor_period_incompatible_frequency(self): - data = [Period("2000", "D"), Period("2001", "A")] - result = Series(data) - assert result.dtype == object - assert result.tolist() == data - - def test_constructor_periodindex(self): - # GH7932 - # converting a PeriodIndex when put in a Series - - pi = period_range("20130101", periods=5, freq="D") - s = Series(pi) - assert s.dtype == "Period[D]" - expected = Series(pi.astype(object)) - tm.assert_series_equal(s, expected) - - def test_constructor_dict(self): - d = {"a": 0.0, "b": 1.0, "c": 2.0} - - result = Series(d) - expected = Series(d, index=sorted(d.keys())) - tm.assert_series_equal(result, expected) - - result = Series(d, index=["b", "c", "d", "a"]) - expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"]) - tm.assert_series_equal(result, expected) - - pidx = tm.makePeriodIndex(100) - d = {pidx[0]: 0, pidx[1]: 1} - result = Series(d, index=pidx) - expected = Series(np.nan, pidx, dtype=np.float64) - expected.iloc[0] = 0 - expected.iloc[1] = 1 - tm.assert_series_equal(result, expected) - - def test_constructor_dict_list_value_explicit_dtype(self): - # GH 18625 - d = {"a": [[2], [3], [4]]} - result = Series(d, index=["a"], dtype="object") - expected = Series(d, index=["a"]) - tm.assert_series_equal(result, expected) - - def test_constructor_dict_order(self): - # GH19018 - # initialization ordering: by insertion order - d = {"b": 1, "a": 0, "c": 2} - result = Series(d) - expected = Series([1, 0, 2], index=list("bac")) - tm.assert_series_equal(result, expected) - - def test_constructor_dict_extension(self, ea_scalar_and_dtype, request): - ea_scalar, ea_dtype = ea_scalar_and_dtype - if isinstance(ea_scalar, Timestamp): - mark = pytest.mark.xfail( - reason="Construction from dict goes through " - "maybe_convert_objects which casts to nano" - ) - request.node.add_marker(mark) - d = {"a": ea_scalar} - result = Series(d, index=["a"]) - expected = Series(ea_scalar, index=["a"], dtype=ea_dtype) - - assert result.dtype == ea_dtype - - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize("value", [2, np.nan, None, float("nan")]) - def test_constructor_dict_nan_key(self, value): - # GH 18480 - d = {1: "a", value: "b", float("nan"): "c", 4: "d"} - result = Series(d).sort_values() - expected = Series(["a", "b", "c", "d"], index=[1, value, np.nan, 4]) - tm.assert_series_equal(result, expected) - - # MultiIndex: - d = {(1, 1): "a", (2, np.nan): "b", (3, value): "c"} - result = Series(d).sort_values() - expected = Series( - ["a", "b", "c"], index=Index([(1, 1), (2, np.nan), (3, value)]) - ) - tm.assert_series_equal(result, expected) - - def test_constructor_dict_datetime64_index(self): - # GH 9456 - - dates_as_str = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"] - values = [42544017.198965244, 1234565, 40512335.181958228, -1] - - def create_data(constructor): - return dict(zip((constructor(x) for x in dates_as_str), values)) - - data_datetime64 = create_data(np.datetime64) - data_datetime = create_data(lambda x: datetime.strptime(x, "%Y-%m-%d")) - data_Timestamp = create_data(Timestamp) - - expected = Series(values, (Timestamp(x) for x in dates_as_str)) - - result_datetime64 = Series(data_datetime64) - result_datetime = Series(data_datetime) - result_Timestamp = Series(data_Timestamp) - - tm.assert_series_equal(result_datetime64, expected) - tm.assert_series_equal(result_datetime, expected) - tm.assert_series_equal(result_Timestamp, expected) - - def test_constructor_dict_tuple_indexer(self): - # GH 12948 - data = {(1, 1, None): -1.0} - result = Series(data) - expected = Series( - -1.0, index=MultiIndex(levels=[[1], [1], [np.nan]], codes=[[0], [0], [-1]]) - ) - tm.assert_series_equal(result, expected) - - def test_constructor_mapping(self, non_dict_mapping_subclass): - # GH 29788 - ndm = non_dict_mapping_subclass({3: "three"}) - result = Series(ndm) - expected = Series(["three"], index=[3]) - - tm.assert_series_equal(result, expected) - - def test_constructor_list_of_tuples(self): - data = [(1, 1), (2, 2), (2, 3)] - s = Series(data) - assert list(s) == data - - def test_constructor_tuple_of_tuples(self): - data = ((1, 1), (2, 2), (2, 3)) - s = Series(data) - assert tuple(s) == data - - def test_constructor_dict_of_tuples(self): - data = {(1, 2): 3, (None, 5): 6} - result = Series(data).sort_values() - expected = Series([3, 6], index=MultiIndex.from_tuples([(1, 2), (None, 5)])) - tm.assert_series_equal(result, expected) - - # https://github.com/pandas-dev/pandas/issues/22698 - @pytest.mark.filterwarnings("ignore:elementwise comparison:FutureWarning") - def test_fromDict(self): - data = {"a": 0, "b": 1, "c": 2, "d": 3} - - series = Series(data) - tm.assert_is_sorted(series.index) - - data = {"a": 0, "b": "1", "c": "2", "d": datetime.now()} - series = Series(data) - assert series.dtype == np.object_ - - data = {"a": 0, "b": "1", "c": "2", "d": "3"} - series = Series(data) - assert series.dtype == np.object_ - - data = {"a": "0", "b": "1"} - series = Series(data, dtype=float) - assert series.dtype == np.float64 - - def test_fromValue(self, datetime_series): - nans = Series(np.nan, index=datetime_series.index, dtype=np.float64) - assert nans.dtype == np.float64 - assert len(nans) == len(datetime_series) - - strings = Series("foo", index=datetime_series.index) - assert strings.dtype == np.object_ - assert len(strings) == len(datetime_series) - - d = datetime.now() - dates = Series(d, index=datetime_series.index) - assert dates.dtype == "M8[us]" - assert len(dates) == len(datetime_series) - - # GH12336 - # Test construction of categorical series from value - categorical = Series(0, index=datetime_series.index, dtype="category") - expected = Series(0, index=datetime_series.index).astype("category") - assert categorical.dtype == "category" - assert len(categorical) == len(datetime_series) - tm.assert_series_equal(categorical, expected) - - def test_constructor_dtype_timedelta64(self): - # basic - td = Series([timedelta(days=i) for i in range(3)]) - assert td.dtype == "timedelta64[ns]" - - td = Series([timedelta(days=1)]) - assert td.dtype == "timedelta64[ns]" - - td = Series([timedelta(days=1), timedelta(days=2), np.timedelta64(1, "s")]) - - assert td.dtype == "timedelta64[ns]" - - # mixed with NaT - td = Series([timedelta(days=1), NaT], dtype="m8[ns]") - assert td.dtype == "timedelta64[ns]" - - td = Series([timedelta(days=1), np.nan], dtype="m8[ns]") - assert td.dtype == "timedelta64[ns]" - - td = Series([np.timedelta64(300000000), NaT], dtype="m8[ns]") - assert td.dtype == "timedelta64[ns]" - - # improved inference - # GH5689 - td = Series([np.timedelta64(300000000), NaT]) - assert td.dtype == "timedelta64[ns]" - - # because iNaT is int, not coerced to timedelta - td = Series([np.timedelta64(300000000), iNaT]) - assert td.dtype == "object" - - td = Series([np.timedelta64(300000000), np.nan]) - assert td.dtype == "timedelta64[ns]" - - td = Series([NaT, np.timedelta64(300000000)]) - assert td.dtype == "timedelta64[ns]" - - td = Series([np.timedelta64(1, "s")]) - assert td.dtype == "timedelta64[ns]" - - # valid astype - td.astype("int64") - - # invalid casting - msg = r"Converting from timedelta64\[ns\] to int32 is not supported" - with pytest.raises(TypeError, match=msg): - td.astype("int32") - - # this is an invalid casting - msg = "|".join( - [ - "Could not convert object to NumPy timedelta", - "Could not convert 'foo' to NumPy timedelta", - ] - ) - with pytest.raises(ValueError, match=msg): - Series([timedelta(days=1), "foo"], dtype="m8[ns]") - - # leave as object here - td = Series([timedelta(days=i) for i in range(3)] + ["foo"]) - assert td.dtype == "object" - - # as of 2.0, these no longer infer timedelta64 based on the strings, - # matching Index behavior - ser = Series([None, NaT, "1 Day"]) - assert ser.dtype == object - - ser = Series([np.nan, NaT, "1 Day"]) - assert ser.dtype == object - - ser = Series([NaT, None, "1 Day"]) - assert ser.dtype == object - - ser = Series([NaT, np.nan, "1 Day"]) - assert ser.dtype == object - - # GH 16406 - def test_constructor_mixed_tz(self): - s = Series([Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")]) - expected = Series( - [Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")], - dtype="object", - ) - tm.assert_series_equal(s, expected) - - def test_NaT_scalar(self): - series = Series([0, 1000, 2000, iNaT], dtype="M8[ns]") - - val = series[3] - assert isna(val) - - series[2] = val - assert isna(series[2]) - - def test_NaT_cast(self): - # GH10747 - result = Series([np.nan]).astype("M8[ns]") - expected = Series([NaT]) - tm.assert_series_equal(result, expected) - - def test_constructor_name_hashable(self): - for n in [777, 777.0, "name", datetime(2001, 11, 11), (1,), "\u05D0"]: - for data in [[1, 2, 3], np.ones(3), {"a": 0, "b": 1}]: - s = Series(data, name=n) - assert s.name == n - - def test_constructor_name_unhashable(self): - msg = r"Series\.name must be a hashable type" - for n in [["name_list"], np.ones(2), {1: 2}]: - for data in [["name_list"], np.ones(2), {1: 2}]: - with pytest.raises(TypeError, match=msg): - Series(data, name=n) - - def test_auto_conversion(self): - series = Series(list(date_range("1/1/2000", periods=10))) - assert series.dtype == "M8[ns]" - - def test_convert_non_ns(self): - # convert from a numpy array of non-ns timedelta64 - arr = np.array([1, 2, 3], dtype="timedelta64[s]") - ser = Series(arr) - assert ser.dtype == arr.dtype - - tdi = timedelta_range("00:00:01", periods=3, freq="s").as_unit("s") - expected = Series(tdi) - assert expected.dtype == arr.dtype - tm.assert_series_equal(ser, expected) - - # convert from a numpy array of non-ns datetime64 - arr = np.array( - ["2013-01-01", "2013-01-02", "2013-01-03"], dtype="datetime64[D]" - ) - ser = Series(arr) - expected = Series(date_range("20130101", periods=3, freq="D"), dtype="M8[s]") - assert expected.dtype == "M8[s]" - tm.assert_series_equal(ser, expected) - - arr = np.array( - ["2013-01-01 00:00:01", "2013-01-01 00:00:02", "2013-01-01 00:00:03"], - dtype="datetime64[s]", - ) - ser = Series(arr) - expected = Series( - date_range("20130101 00:00:01", periods=3, freq="s"), dtype="M8[s]" - ) - assert expected.dtype == "M8[s]" - tm.assert_series_equal(ser, expected) - - @pytest.mark.parametrize( - "index", - [ - date_range("1/1/2000", periods=10), - timedelta_range("1 day", periods=10), - period_range("2000-Q1", periods=10, freq="Q"), - ], - ids=lambda x: type(x).__name__, - ) - def test_constructor_cant_cast_datetimelike(self, index): - # floats are not ok - # strip Index to convert PeriodIndex -> Period - # We don't care whether the error message says - # PeriodIndex or PeriodArray - msg = f"Cannot cast {type(index).__name__.rstrip('Index')}.*? to " - - with pytest.raises(TypeError, match=msg): - Series(index, dtype=float) - - # ints are ok - # we test with np.int64 to get similar results on - # windows / 32-bit platforms - result = Series(index, dtype=np.int64) - expected = Series(index.astype(np.int64)) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize( - "index", - [ - date_range("1/1/2000", periods=10), - timedelta_range("1 day", periods=10), - period_range("2000-Q1", periods=10, freq="Q"), - ], - ids=lambda x: type(x).__name__, - ) - def test_constructor_cast_object(self, index): - s = Series(index, dtype=object) - exp = Series(index).astype(object) - tm.assert_series_equal(s, exp) - - s = Series(Index(index, dtype=object), dtype=object) - exp = Series(index).astype(object) - tm.assert_series_equal(s, exp) - - s = Series(index.astype(object), dtype=object) - exp = Series(index).astype(object) - tm.assert_series_equal(s, exp) - - @pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64]) - def test_constructor_generic_timestamp_no_frequency(self, dtype, request): - # see gh-15524, gh-15987 - msg = "dtype has no unit. Please pass in" - - if np.dtype(dtype).name not in ["timedelta64", "datetime64"]: - mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit") - request.node.add_marker(mark) - - with pytest.raises(ValueError, match=msg): - Series([], dtype=dtype) - - @pytest.mark.parametrize("unit", ["ps", "as", "fs", "Y", "M", "W", "D", "h", "m"]) - @pytest.mark.parametrize("kind", ["m", "M"]) - def test_constructor_generic_timestamp_bad_frequency(self, kind, unit): - # see gh-15524, gh-15987 - # as of 2.0 we raise on any non-supported unit rather than silently - # cast to nanos; previously we only raised for frequencies higher - # than ns - dtype = f"{kind}8[{unit}]" - - msg = "dtype=.* is not supported. Supported resolutions are" - with pytest.raises(TypeError, match=msg): - Series([], dtype=dtype) - - with pytest.raises(TypeError, match=msg): - # pre-2.0 the DataFrame cast raised but the Series case did not - DataFrame([[0]], dtype=dtype) - - @pytest.mark.parametrize("dtype", [None, "uint8", "category"]) - def test_constructor_range_dtype(self, dtype): - # GH 16804 - expected = Series([0, 1, 2, 3, 4], dtype=dtype or "int64") - result = Series(range(5), dtype=dtype) - tm.assert_series_equal(result, expected) - - def test_constructor_range_overflows(self): - # GH#30173 range objects that overflow int64 - rng = range(2**63, 2**63 + 4) - ser = Series(rng) - expected = Series(list(rng)) - tm.assert_series_equal(ser, expected) - assert list(ser) == list(rng) - assert ser.dtype == np.uint64 - - rng2 = range(2**63 + 4, 2**63, -1) - ser2 = Series(rng2) - expected2 = Series(list(rng2)) - tm.assert_series_equal(ser2, expected2) - assert list(ser2) == list(rng2) - assert ser2.dtype == np.uint64 - - rng3 = range(-(2**63), -(2**63) - 4, -1) - ser3 = Series(rng3) - expected3 = Series(list(rng3)) - tm.assert_series_equal(ser3, expected3) - assert list(ser3) == list(rng3) - assert ser3.dtype == object - - rng4 = range(2**73, 2**73 + 4) - ser4 = Series(rng4) - expected4 = Series(list(rng4)) - tm.assert_series_equal(ser4, expected4) - assert list(ser4) == list(rng4) - assert ser4.dtype == object - - def test_constructor_tz_mixed_data(self): - # GH 13051 - dt_list = [ - Timestamp("2016-05-01 02:03:37"), - Timestamp("2016-04-30 19:03:37-0700", tz="US/Pacific"), - ] - result = Series(dt_list) - expected = Series(dt_list, dtype=object) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize("pydt", [True, False]) - def test_constructor_data_aware_dtype_naive(self, tz_aware_fixture, pydt): - # GH#25843, GH#41555, GH#33401 - tz = tz_aware_fixture - ts = Timestamp("2019", tz=tz) - if pydt: - ts = ts.to_pydatetime() - - msg = ( - "Cannot convert timezone-aware data to timezone-naive dtype. " - r"Use pd.Series\(values\).dt.tz_localize\(None\) instead." - ) - with pytest.raises(ValueError, match=msg): - Series([ts], dtype="datetime64[ns]") - - with pytest.raises(ValueError, match=msg): - Series(np.array([ts], dtype=object), dtype="datetime64[ns]") - - with pytest.raises(ValueError, match=msg): - Series({0: ts}, dtype="datetime64[ns]") - - msg = "Cannot unbox tzaware Timestamp to tznaive dtype" - with pytest.raises(TypeError, match=msg): - Series(ts, index=[0], dtype="datetime64[ns]") - - def test_constructor_datetime64(self): - rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s") - dates = np.asarray(rng) - - series = Series(dates) - assert np.issubdtype(series.dtype, np.dtype("M8[ns]")) - - def test_constructor_datetimelike_scalar_to_string_dtype( - self, nullable_string_dtype - ): - # https://github.com/pandas-dev/pandas/pull/33846 - result = Series("M", index=[1, 2, 3], dtype=nullable_string_dtype) - expected = Series(["M", "M", "M"], index=[1, 2, 3], dtype=nullable_string_dtype) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize( - "values", - [ - [np.datetime64("2012-01-01"), np.datetime64("2013-01-01")], - ["2012-01-01", "2013-01-01"], - ], - ) - def test_constructor_sparse_datetime64(self, values): - # https://github.com/pandas-dev/pandas/issues/35762 - dtype = pd.SparseDtype("datetime64[ns]") - result = Series(values, dtype=dtype) - arr = pd.arrays.SparseArray(values, dtype=dtype) - expected = Series(arr) - tm.assert_series_equal(result, expected) - - def test_construction_from_ordered_collection(self): - # https://github.com/pandas-dev/pandas/issues/36044 - result = Series({"a": 1, "b": 2}.keys()) - expected = Series(["a", "b"]) - tm.assert_series_equal(result, expected) - - result = Series({"a": 1, "b": 2}.values()) - expected = Series([1, 2]) - tm.assert_series_equal(result, expected) - - def test_construction_from_large_int_scalar_no_overflow(self): - # https://github.com/pandas-dev/pandas/issues/36291 - n = 1_000_000_000_000_000_000_000 - result = Series(n, index=[0]) - expected = Series(n) - tm.assert_series_equal(result, expected) - - def test_constructor_list_of_periods_infers_period_dtype(self): - series = Series(list(period_range("2000-01-01", periods=10, freq="D"))) - assert series.dtype == "Period[D]" - - series = Series( - [Period("2011-01-01", freq="D"), Period("2011-02-01", freq="D")] - ) - assert series.dtype == "Period[D]" - - def test_constructor_subclass_dict(self, dict_subclass): - data = dict_subclass((x, 10.0 * x) for x in range(10)) - series = Series(data) - expected = Series(dict(data.items())) - tm.assert_series_equal(series, expected) - - def test_constructor_ordereddict(self): - # GH3283 - data = OrderedDict( - (f"col{i}", np.random.default_rng(2).random()) for i in range(12) - ) - - series = Series(data) - expected = Series(list(data.values()), list(data.keys())) - tm.assert_series_equal(series, expected) - - # Test with subclass - class A(OrderedDict): - pass - - series = Series(A(data)) - tm.assert_series_equal(series, expected) - - def test_constructor_dict_multiindex(self): - d = {("a", "a"): 0.0, ("b", "a"): 1.0, ("b", "c"): 2.0} - _d = sorted(d.items()) - result = Series(d) - expected = Series( - [x[1] for x in _d], index=MultiIndex.from_tuples([x[0] for x in _d]) - ) - tm.assert_series_equal(result, expected) - - d["z"] = 111.0 - _d.insert(0, ("z", d["z"])) - result = Series(d) - expected = Series( - [x[1] for x in _d], index=Index([x[0] for x in _d], tupleize_cols=False) - ) - result = result.reindex(index=expected.index) - tm.assert_series_equal(result, expected) - - def test_constructor_dict_multiindex_reindex_flat(self): - # construction involves reindexing with a MultiIndex corner case - data = {("i", "i"): 0, ("i", "j"): 1, ("j", "i"): 2, "j": np.nan} - expected = Series(data) - - result = Series(expected[:-1].to_dict(), index=expected.index) - tm.assert_series_equal(result, expected) - - def test_constructor_dict_timedelta_index(self): - # GH #12169 : Resample category data with timedelta index - # construct Series from dict as data and TimedeltaIndex as index - # will result NaN in result Series data - expected = Series( - data=["A", "B", "C"], index=pd.to_timedelta([0, 10, 20], unit="s") - ) - - result = Series( - data={ - pd.to_timedelta(0, unit="s"): "A", - pd.to_timedelta(10, unit="s"): "B", - pd.to_timedelta(20, unit="s"): "C", - }, - index=pd.to_timedelta([0, 10, 20], unit="s"), - ) - tm.assert_series_equal(result, expected) - - def test_constructor_infer_index_tz(self): - values = [188.5, 328.25] - tzinfo = tzoffset(None, 7200) - index = [ - datetime(2012, 5, 11, 11, tzinfo=tzinfo), - datetime(2012, 5, 11, 12, tzinfo=tzinfo), - ] - series = Series(data=values, index=index) - - assert series.index.tz == tzinfo - - # it works! GH#2443 - repr(series.index[0]) - - def test_constructor_with_pandas_dtype(self): - # going through 2D->1D path - vals = [(1,), (2,), (3,)] - ser = Series(vals) - dtype = ser.array.dtype # NumpyEADtype - ser2 = Series(vals, dtype=dtype) - tm.assert_series_equal(ser, ser2) - - def test_constructor_int_dtype_missing_values(self): - # GH#43017 - result = Series(index=[0], dtype="int64") - expected = Series(np.nan, index=[0], dtype="float64") - tm.assert_series_equal(result, expected) - - def test_constructor_bool_dtype_missing_values(self): - # GH#43018 - result = Series(index=[0], dtype="bool") - expected = Series(True, index=[0], dtype="bool") - tm.assert_series_equal(result, expected) - - def test_constructor_int64_dtype(self, any_int_dtype): - # GH#44923 - result = Series(["0", "1", "2"], dtype=any_int_dtype) - expected = Series([0, 1, 2], dtype=any_int_dtype) - tm.assert_series_equal(result, expected) - - def test_constructor_raise_on_lossy_conversion_of_strings(self): - # GH#44923 - with pytest.raises( - ValueError, match="string values cannot be losslessly cast to int8" - ): - Series(["128"], dtype="int8") - - def test_constructor_dtype_timedelta_alternative_construct(self): - # GH#35465 - result = Series([1000000, 200000, 3000000], dtype="timedelta64[ns]") - expected = Series(pd.to_timedelta([1000000, 200000, 3000000], unit="ns")) - tm.assert_series_equal(result, expected) - - @pytest.mark.xfail( - reason="Not clear what the correct expected behavior should be with " - "integers now that we support non-nano. ATM (2022-10-08) we treat ints " - "as nanoseconds, then cast to the requested dtype. xref #48312" - ) - def test_constructor_dtype_timedelta_ns_s(self): - # GH#35465 - result = Series([1000000, 200000, 3000000], dtype="timedelta64[ns]") - expected = Series([1000000, 200000, 3000000], dtype="timedelta64[s]") - tm.assert_series_equal(result, expected) - - @pytest.mark.xfail( - reason="Not clear what the correct expected behavior should be with " - "integers now that we support non-nano. ATM (2022-10-08) we treat ints " - "as nanoseconds, then cast to the requested dtype. xref #48312" - ) - def test_constructor_dtype_timedelta_ns_s_astype_int64(self): - # GH#35465 - result = Series([1000000, 200000, 3000000], dtype="timedelta64[ns]").astype( - "int64" - ) - expected = Series([1000000, 200000, 3000000], dtype="timedelta64[s]").astype( - "int64" - ) - tm.assert_series_equal(result, expected) - - @pytest.mark.filterwarnings( - "ignore:elementwise comparison failed:DeprecationWarning" - ) - @pytest.mark.parametrize("func", [Series, DataFrame, Index, pd.array]) - def test_constructor_mismatched_null_nullable_dtype( - self, func, any_numeric_ea_dtype - ): - # GH#44514 - msg = "|".join( - [ - "cannot safely cast non-equivalent object", - r"int\(\) argument must be a string, a bytes-like object " - "or a (real )?number", - r"Cannot cast array data from dtype\('O'\) to dtype\('float64'\) " - "according to the rule 'safe'", - "object cannot be converted to a FloatingDtype", - "'values' contains non-numeric NA", - ] - ) - - for null in tm.NP_NAT_OBJECTS + [NaT]: - with pytest.raises(TypeError, match=msg): - func([null, 1.0, 3.0], dtype=any_numeric_ea_dtype) - - def test_series_constructor_ea_int_from_bool(self): - # GH#42137 - result = Series([True, False, True, pd.NA], dtype="Int64") - expected = Series([1, 0, 1, pd.NA], dtype="Int64") - tm.assert_series_equal(result, expected) - - result = Series([True, False, True], dtype="Int64") - expected = Series([1, 0, 1], dtype="Int64") - tm.assert_series_equal(result, expected) - - def test_series_constructor_ea_int_from_string_bool(self): - # GH#42137 - with pytest.raises(ValueError, match="invalid literal"): - Series(["True", "False", "True", pd.NA], dtype="Int64") - - @pytest.mark.parametrize("val", [1, 1.0]) - def test_series_constructor_overflow_uint_ea(self, val): - # GH#38798 - max_val = np.iinfo(np.uint64).max - 1 - result = Series([max_val, val], dtype="UInt64") - expected = Series(np.array([max_val, 1], dtype="uint64"), dtype="UInt64") - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize("val", [1, 1.0]) - def test_series_constructor_overflow_uint_ea_with_na(self, val): - # GH#38798 - max_val = np.iinfo(np.uint64).max - 1 - result = Series([max_val, val, pd.NA], dtype="UInt64") - expected = Series( - IntegerArray( - np.array([max_val, 1, 0], dtype="uint64"), - np.array([0, 0, 1], dtype=np.bool_), - ) - ) - tm.assert_series_equal(result, expected) - - def test_series_constructor_overflow_uint_with_nan(self): - # GH#38798 - max_val = np.iinfo(np.uint64).max - 1 - result = Series([max_val, np.nan], dtype="UInt64") - expected = Series( - IntegerArray( - np.array([max_val, 1], dtype="uint64"), - np.array([0, 1], dtype=np.bool_), - ) - ) - tm.assert_series_equal(result, expected) - - def test_series_constructor_ea_all_na(self): - # GH#38798 - result = Series([np.nan, np.nan], dtype="UInt64") - expected = Series( - IntegerArray( - np.array([1, 1], dtype="uint64"), - np.array([1, 1], dtype=np.bool_), - ) - ) - tm.assert_series_equal(result, expected) - - def test_series_from_index_dtype_equal_does_not_copy(self): - # GH#52008 - idx = Index([1, 2, 3]) - expected = idx.copy(deep=True) - ser = Series(idx, dtype="int64") - ser.iloc[0] = 100 - tm.assert_index_equal(idx, expected) - - def test_series_string_inference(self): - # GH#54430 - pytest.importorskip("pyarrow") - dtype = "string[pyarrow_numpy]" - expected = Series(["a", "b"], dtype=dtype) - with pd.option_context("future.infer_string", True): - ser = Series(["a", "b"]) - tm.assert_series_equal(ser, expected) - - expected = Series(["a", 1], dtype="object") - with pd.option_context("future.infer_string", True): - ser = Series(["a", 1]) - tm.assert_series_equal(ser, expected) - - @pytest.mark.parametrize("na_value", [None, np.nan, pd.NA]) - def test_series_string_with_na_inference(self, na_value): - # GH#54430 - pytest.importorskip("pyarrow") - dtype = "string[pyarrow_numpy]" - expected = Series(["a", na_value], dtype=dtype) - with pd.option_context("future.infer_string", True): - ser = Series(["a", na_value]) - tm.assert_series_equal(ser, expected) - - def test_series_string_inference_scalar(self): - # GH#54430 - pytest.importorskip("pyarrow") - expected = Series("a", index=[1], dtype="string[pyarrow_numpy]") - with pd.option_context("future.infer_string", True): - ser = Series("a", index=[1]) - tm.assert_series_equal(ser, expected) - - def test_series_string_inference_array_string_dtype(self): - # GH#54496 - pytest.importorskip("pyarrow") - expected = Series(["a", "b"], dtype="string[pyarrow_numpy]") - with pd.option_context("future.infer_string", True): - ser = Series(np.array(["a", "b"])) - tm.assert_series_equal(ser, expected) - - def test_series_string_inference_storage_definition(self): - # GH#54793 - pytest.importorskip("pyarrow") - expected = Series(["a", "b"], dtype="string[pyarrow_numpy]") - with pd.option_context("future.infer_string", True): - result = Series(["a", "b"], dtype="string") - tm.assert_series_equal(result, expected) - - def test_series_constructor_infer_string_scalar(self): - # GH#55537 - with pd.option_context("future.infer_string", True): - ser = Series("a", index=[1, 2], dtype="string[python]") - expected = Series(["a", "a"], index=[1, 2], dtype="string[python]") - tm.assert_series_equal(ser, expected) - assert ser.dtype.storage == "python" - - def test_series_string_inference_na_first(self): - # GH#55655 - pytest.importorskip("pyarrow") - expected = Series([pd.NA, "b"], dtype="string[pyarrow_numpy]") - with pd.option_context("future.infer_string", True): - result = Series([pd.NA, "b"]) - tm.assert_series_equal(result, expected) - - -class TestSeriesConstructorIndexCoercion: - def test_series_constructor_datetimelike_index_coercion(self): - idx = tm.makeDateIndex(10000) - ser = Series( - np.random.default_rng(2).standard_normal(len(idx)), idx.astype(object) - ) - # as of 2.0, we no longer silently cast the object-dtype index - # to DatetimeIndex GH#39307, GH#23598 - assert not isinstance(ser.index, DatetimeIndex) - - def test_series_constructor_infer_multiindex(self): - index_lists = [["a", "a", "b", "b"], ["x", "y", "x", "y"]] - - multi = Series(1.0, index=[np.array(x) for x in index_lists]) - assert isinstance(multi.index, MultiIndex) - - multi = Series(1.0, index=index_lists) - assert isinstance(multi.index, MultiIndex) - - multi = Series(range(4), index=index_lists) - assert isinstance(multi.index, MultiIndex) - - -class TestSeriesConstructorInternals: - def test_constructor_no_pandas_array(self, using_array_manager): - ser = Series([1, 2, 3]) - result = Series(ser.array) - tm.assert_series_equal(ser, result) - if not using_array_manager: - assert isinstance(result._mgr.blocks[0], NumpyBlock) - assert result._mgr.blocks[0].is_numeric - - @td.skip_array_manager_invalid_test - def test_from_array(self): - result = Series(pd.array(["1H", "2H"], dtype="timedelta64[ns]")) - assert result._mgr.blocks[0].is_extension is False - - result = Series(pd.array(["2015"], dtype="datetime64[ns]")) - assert result._mgr.blocks[0].is_extension is False - - @td.skip_array_manager_invalid_test - def test_from_list_dtype(self): - result = Series(["1H", "2H"], dtype="timedelta64[ns]") - assert result._mgr.blocks[0].is_extension is False - - result = Series(["2015"], dtype="datetime64[ns]") - assert result._mgr.blocks[0].is_extension is False - - -def test_constructor(rand_series_with_duplicate_datetimeindex): - dups = rand_series_with_duplicate_datetimeindex - assert isinstance(dups, Series) - assert isinstance(dups.index, DatetimeIndex) - - -@pytest.mark.parametrize( - "input_dict,expected", - [ - ({0: 0}, np.array([[0]], dtype=np.int64)), - ({"a": "a"}, np.array([["a"]], dtype=object)), - ({1: 1}, np.array([[1]], dtype=np.int64)), - ], -) -def test_numpy_array(input_dict, expected): - result = np.array([Series(input_dict)]) - tm.assert_numpy_array_equal(result, expected) - - -def test_index_ordered_dict_keys(): - # GH 22077 - - param_index = OrderedDict( - [ - ((("a", "b"), ("c", "d")), 1), - ((("a", None), ("c", "d")), 2), - ] - ) - series = Series([1, 2], index=param_index.keys()) - expected = Series( - [1, 2], - index=MultiIndex.from_tuples( - [(("a", "b"), ("c", "d")), (("a", None), ("c", "d"))] - ), - ) - tm.assert_series_equal(series, expected) - - -@pytest.mark.parametrize( - "input_list", - [ - [1, complex("nan"), 2], - [1 + 1j, complex("nan"), 2 + 2j], - ], -) -def test_series_with_complex_nan(input_list): - # GH#53627 - ser = Series(input_list) - result = Series(ser.array) - assert ser.dtype == "complex128" - tm.assert_series_equal(ser, result) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/util/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/util/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/utils/wheel.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/utils/wheel.py deleted file mode 100644 index e5e3f34ed81453ce759c6ade8b2def733e9063e2..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/utils/wheel.py +++ /dev/null @@ -1,136 +0,0 @@ -"""Support functions for working with wheel files. -""" - -import logging -from email.message import Message -from email.parser import Parser -from typing import Tuple -from zipfile import BadZipFile, ZipFile - -from pip._vendor.packaging.utils import canonicalize_name - -from pip._internal.exceptions import UnsupportedWheel - -VERSION_COMPATIBLE = (1, 0) - - -logger = logging.getLogger(__name__) - - -def parse_wheel(wheel_zip: ZipFile, name: str) -> Tuple[str, Message]: - """Extract information from the provided wheel, ensuring it meets basic - standards. - - Returns the name of the .dist-info directory and the parsed WHEEL metadata. - """ - try: - info_dir = wheel_dist_info_dir(wheel_zip, name) - metadata = wheel_metadata(wheel_zip, info_dir) - version = wheel_version(metadata) - except UnsupportedWheel as e: - raise UnsupportedWheel("{} has an invalid wheel, {}".format(name, str(e))) - - check_compatibility(version, name) - - return info_dir, metadata - - -def wheel_dist_info_dir(source: ZipFile, name: str) -> str: - """Returns the name of the contained .dist-info directory. - - Raises AssertionError or UnsupportedWheel if not found, >1 found, or - it doesn't match the provided name. - """ - # Zip file path separators must be / - subdirs = {p.split("/", 1)[0] for p in source.namelist()} - - info_dirs = [s for s in subdirs if s.endswith(".dist-info")] - - if not info_dirs: - raise UnsupportedWheel(".dist-info directory not found") - - if len(info_dirs) > 1: - raise UnsupportedWheel( - "multiple .dist-info directories found: {}".format(", ".join(info_dirs)) - ) - - info_dir = info_dirs[0] - - info_dir_name = canonicalize_name(info_dir) - canonical_name = canonicalize_name(name) - if not info_dir_name.startswith(canonical_name): - raise UnsupportedWheel( - ".dist-info directory {!r} does not start with {!r}".format( - info_dir, canonical_name - ) - ) - - return info_dir - - -def read_wheel_metadata_file(source: ZipFile, path: str) -> bytes: - try: - return source.read(path) - # BadZipFile for general corruption, KeyError for missing entry, - # and RuntimeError for password-protected files - except (BadZipFile, KeyError, RuntimeError) as e: - raise UnsupportedWheel(f"could not read {path!r} file: {e!r}") - - -def wheel_metadata(source: ZipFile, dist_info_dir: str) -> Message: - """Return the WHEEL metadata of an extracted wheel, if possible. - Otherwise, raise UnsupportedWheel. - """ - path = f"{dist_info_dir}/WHEEL" - # Zip file path separators must be / - wheel_contents = read_wheel_metadata_file(source, path) - - try: - wheel_text = wheel_contents.decode() - except UnicodeDecodeError as e: - raise UnsupportedWheel(f"error decoding {path!r}: {e!r}") - - # FeedParser (used by Parser) does not raise any exceptions. The returned - # message may have .defects populated, but for backwards-compatibility we - # currently ignore them. - return Parser().parsestr(wheel_text) - - -def wheel_version(wheel_data: Message) -> Tuple[int, ...]: - """Given WHEEL metadata, return the parsed Wheel-Version. - Otherwise, raise UnsupportedWheel. - """ - version_text = wheel_data["Wheel-Version"] - if version_text is None: - raise UnsupportedWheel("WHEEL is missing Wheel-Version") - - version = version_text.strip() - - try: - return tuple(map(int, version.split("."))) - except ValueError: - raise UnsupportedWheel(f"invalid Wheel-Version: {version!r}") - - -def check_compatibility(version: Tuple[int, ...], name: str) -> None: - """Raises errors or warns if called with an incompatible Wheel-Version. - - pip should refuse to install a Wheel-Version that's a major series - ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when - installing a version only minor version ahead (e.g 1.2 > 1.1). - - version: a 2-tuple representing a Wheel-Version (Major, Minor) - name: name of wheel or package to raise exception about - - :raises UnsupportedWheel: when an incompatible Wheel-Version is given - """ - if version[0] > VERSION_COMPATIBLE[0]: - raise UnsupportedWheel( - "{}'s Wheel-Version ({}) is not compatible with this version " - "of pip".format(name, ".".join(map(str, version))) - ) - elif version > VERSION_COMPATIBLE: - logger.warning( - "Installing from a newer Wheel-Version (%s)", - ".".join(map(str, version)), - ) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/hdl.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/hdl.py deleted file mode 100644 index 319ec93100bcbaa984c8e44613ccbe115f5de877..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/hdl.py +++ /dev/null @@ -1,465 +0,0 @@ -""" - pygments.lexers.hdl - ~~~~~~~~~~~~~~~~~~~ - - Lexers for hardware descriptor languages. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, bygroups, include, using, this, words -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Whitespace - -__all__ = ['VerilogLexer', 'SystemVerilogLexer', 'VhdlLexer'] - - -class VerilogLexer(RegexLexer): - """ - For verilog source code with preprocessor directives. - - .. versionadded:: 1.4 - """ - name = 'verilog' - aliases = ['verilog', 'v'] - filenames = ['*.v'] - mimetypes = ['text/x-verilog'] - - #: optional Comment or Whitespace - _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+' - - tokens = { - 'root': [ - (r'^\s*`define', Comment.Preproc, 'macro'), - (r'\s+', Whitespace), - (r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation - (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), - (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), - (r'[{}#@]', Punctuation), - (r'L?"', String, 'string'), - (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), - (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex), - (r'([0-9]+)|(\'b)[01]+', Number.Bin), - (r'([0-9]+)|(\'d)[0-9]+', Number.Integer), - (r'([0-9]+)|(\'o)[0-7]+', Number.Oct), - (r'\'[01xz]', Number), - (r'\d+[Ll]?', Number.Integer), - (r'[~!%^&*+=|?:<>/-]', Operator), - (r'[()\[\],.;\']', Punctuation), - (r'`[a-zA-Z_]\w*', Name.Constant), - - (r'^(\s*)(package)(\s+)', bygroups(Whitespace, Keyword.Namespace, Text)), - (r'^(\s*)(import)(\s+)', bygroups(Whitespace, Keyword.Namespace, Text), - 'import'), - - (words(( - 'always', 'always_comb', 'always_ff', 'always_latch', 'and', - 'assign', 'automatic', 'begin', 'break', 'buf', 'bufif0', 'bufif1', - 'case', 'casex', 'casez', 'cmos', 'const', 'continue', 'deassign', - 'default', 'defparam', 'disable', 'do', 'edge', 'else', 'end', 'endcase', - 'endfunction', 'endgenerate', 'endmodule', 'endpackage', 'endprimitive', - 'endspecify', 'endtable', 'endtask', 'enum', 'event', 'final', 'for', - 'force', 'forever', 'fork', 'function', 'generate', 'genvar', 'highz0', - 'highz1', 'if', 'initial', 'inout', 'input', 'integer', 'join', 'large', - 'localparam', 'macromodule', 'medium', 'module', 'nand', 'negedge', - 'nmos', 'nor', 'not', 'notif0', 'notif1', 'or', 'output', 'packed', - 'parameter', 'pmos', 'posedge', 'primitive', 'pull0', 'pull1', - 'pulldown', 'pullup', 'rcmos', 'ref', 'release', 'repeat', 'return', - 'rnmos', 'rpmos', 'rtran', 'rtranif0', 'rtranif1', 'scalared', 'signed', - 'small', 'specify', 'specparam', 'strength', 'string', 'strong0', - 'strong1', 'struct', 'table', 'task', 'tran', 'tranif0', 'tranif1', - 'type', 'typedef', 'unsigned', 'var', 'vectored', 'void', 'wait', - 'weak0', 'weak1', 'while', 'xnor', 'xor'), suffix=r'\b'), - Keyword), - - (words(( - 'accelerate', 'autoexpand_vectornets', 'celldefine', 'default_nettype', - 'else', 'elsif', 'endcelldefine', 'endif', 'endprotect', 'endprotected', - 'expand_vectornets', 'ifdef', 'ifndef', 'include', 'noaccelerate', - 'noexpand_vectornets', 'noremove_gatenames', 'noremove_netnames', - 'nounconnected_drive', 'protect', 'protected', 'remove_gatenames', - 'remove_netnames', 'resetall', 'timescale', 'unconnected_drive', - 'undef'), prefix=r'`', suffix=r'\b'), - Comment.Preproc), - - (words(( - 'bits', 'bitstoreal', 'bitstoshortreal', 'countdrivers', 'display', 'fclose', - 'fdisplay', 'finish', 'floor', 'fmonitor', 'fopen', 'fstrobe', 'fwrite', - 'getpattern', 'history', 'incsave', 'input', 'itor', 'key', 'list', 'log', - 'monitor', 'monitoroff', 'monitoron', 'nokey', 'nolog', 'printtimescale', - 'random', 'readmemb', 'readmemh', 'realtime', 'realtobits', 'reset', - 'reset_count', 'reset_value', 'restart', 'rtoi', 'save', 'scale', 'scope', - 'shortrealtobits', 'showscopes', 'showvariables', 'showvars', 'sreadmemb', - 'sreadmemh', 'stime', 'stop', 'strobe', 'time', 'timeformat', 'write'), - prefix=r'\$', suffix=r'\b'), - Name.Builtin), - - (words(( - 'byte', 'shortint', 'int', 'longint', 'integer', 'time', - 'bit', 'logic', 'reg', 'supply0', 'supply1', 'tri', 'triand', - 'trior', 'tri0', 'tri1', 'trireg', 'uwire', 'wire', 'wand', 'wor' - 'shortreal', 'real', 'realtime'), suffix=r'\b'), - Keyword.Type), - (r'[a-zA-Z_]\w*:(?!:)', Name.Label), - (r'\$?[a-zA-Z_]\w*', Name), - (r'\\(\S+)', Name), - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation - (r'\\', String), # stray backslash - ], - 'macro': [ - (r'[^/\n]+', Comment.Preproc), - (r'/[*](.|\n)*?[*]/', Comment.Multiline), - (r'//.*?\n', Comment.Single, '#pop'), - (r'/', Comment.Preproc), - (r'(?<=\\)\n', Comment.Preproc), - (r'\n', Whitespace, '#pop'), - ], - 'import': [ - (r'[\w:]+\*?', Name.Namespace, '#pop') - ] - } - - def analyse_text(text): - """Verilog code will use one of reg/wire/assign for sure, and that - is not common elsewhere.""" - result = 0 - if 'reg' in text: - result += 0.1 - if 'wire' in text: - result += 0.1 - if 'assign' in text: - result += 0.1 - - return result - - -class SystemVerilogLexer(RegexLexer): - """ - Extends verilog lexer to recognise all SystemVerilog keywords from IEEE - 1800-2009 standard. - - .. versionadded:: 1.5 - """ - name = 'systemverilog' - aliases = ['systemverilog', 'sv'] - filenames = ['*.sv', '*.svh'] - mimetypes = ['text/x-systemverilog'] - - #: optional Comment or Whitespace - _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+' - - tokens = { - 'root': [ - (r'^(\s*)(`define)', bygroups(Whitespace, Comment.Preproc), 'macro'), - (r'^(\s*)(package)(\s+)', bygroups(Whitespace, Keyword.Namespace, Whitespace)), - (r'^(\s*)(import)(\s+)', bygroups(Whitespace, Keyword.Namespace, Whitespace), 'import'), - - (r'\s+', Whitespace), - (r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation - (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), - (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), - (r'[{}#@]', Punctuation), - (r'L?"', String, 'string'), - (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), - - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), - (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - - (r'([1-9][_0-9]*)?\s*\'[sS]?[bB]\s*[xXzZ?01][_xXzZ?01]*', - Number.Bin), - (r'([1-9][_0-9]*)?\s*\'[sS]?[oO]\s*[xXzZ?0-7][_xXzZ?0-7]*', - Number.Oct), - (r'([1-9][_0-9]*)?\s*\'[sS]?[dD]\s*[xXzZ?0-9][_xXzZ?0-9]*', - Number.Integer), - (r'([1-9][_0-9]*)?\s*\'[sS]?[hH]\s*[xXzZ?0-9a-fA-F][_xXzZ?0-9a-fA-F]*', - Number.Hex), - - (r'\'[01xXzZ]', Number), - (r'[0-9][_0-9]*', Number.Integer), - - (r'[~!%^&*+=|?:<>/-]', Operator), - (words(('inside', 'dist'), suffix=r'\b'), Operator.Word), - - (r'[()\[\],.;\'$]', Punctuation), - (r'`[a-zA-Z_]\w*', Name.Constant), - - (words(( - 'accept_on', 'alias', 'always', 'always_comb', 'always_ff', - 'always_latch', 'and', 'assert', 'assign', 'assume', 'automatic', - 'before', 'begin', 'bind', 'bins', 'binsof', 'break', 'buf', - 'bufif0', 'bufif1', 'case', 'casex', 'casez', 'cell', - 'checker', 'clocking', 'cmos', 'config', - 'constraint', 'context', 'continue', 'cover', 'covergroup', - 'coverpoint', 'cross', 'deassign', 'default', 'defparam', 'design', - 'disable', 'do', 'edge', 'else', 'end', 'endcase', - 'endchecker', 'endclocking', 'endconfig', 'endfunction', - 'endgenerate', 'endgroup', 'endinterface', 'endmodule', 'endpackage', - 'endprimitive', 'endprogram', 'endproperty', 'endsequence', - 'endspecify', 'endtable', 'endtask', 'enum', 'eventually', - 'expect', 'export', 'extern', 'final', 'first_match', - 'for', 'force', 'foreach', 'forever', 'fork', 'forkjoin', 'function', - 'generate', 'genvar', 'global', 'highz0', 'highz1', 'if', 'iff', - 'ifnone', 'ignore_bins', 'illegal_bins', 'implies', 'implements', 'import', - 'incdir', 'include', 'initial', 'inout', 'input', - 'instance', 'interconnect', 'interface', 'intersect', 'join', - 'join_any', 'join_none', 'large', 'let', 'liblist', 'library', - 'local', 'localparam', 'macromodule', 'matches', - 'medium', 'modport', 'module', 'nand', 'negedge', 'nettype', 'new', 'nexttime', - 'nmos', 'nor', 'noshowcancelled', 'not', 'notif0', 'notif1', 'null', - 'or', 'output', 'package', 'packed', 'parameter', 'pmos', 'posedge', - 'primitive', 'priority', 'program', 'property', 'protected', 'pull0', - 'pull1', 'pulldown', 'pullup', 'pulsestyle_ondetect', - 'pulsestyle_onevent', 'pure', 'rand', 'randc', 'randcase', - 'randsequence', 'rcmos', 'ref', - 'reject_on', 'release', 'repeat', 'restrict', 'return', 'rnmos', - 'rpmos', 'rtran', 'rtranif0', 'rtranif1', 's_always', 's_eventually', - 's_nexttime', 's_until', 's_until_with', 'scalared', 'sequence', - 'showcancelled', 'small', 'soft', 'solve', - 'specify', 'specparam', 'static', 'strong', 'strong0', - 'strong1', 'struct', 'super', 'sync_accept_on', - 'sync_reject_on', 'table', 'tagged', 'task', 'this', 'throughout', - 'timeprecision', 'timeunit', 'tran', 'tranif0', 'tranif1', - 'typedef', 'union', 'unique', 'unique0', 'until', - 'until_with', 'untyped', 'use', 'vectored', - 'virtual', 'wait', 'wait_order', 'weak', 'weak0', - 'weak1', 'while', 'wildcard', 'with', 'within', - 'xnor', 'xor'), - suffix=r'\b'), - Keyword), - - (r'(class)(\s+)([a-zA-Z_]\w*)', - bygroups(Keyword.Declaration, Whitespace, Name.Class)), - (r'(extends)(\s+)([a-zA-Z_]\w*)', - bygroups(Keyword.Declaration, Whitespace, Name.Class)), - (r'(endclass\b)(?:(\s*)(:)(\s*)([a-zA-Z_]\w*))?', - bygroups(Keyword.Declaration, Whitespace, Punctuation, Whitespace, Name.Class)), - - (words(( - # Variable types - 'bit', 'byte', 'chandle', 'const', 'event', 'int', 'integer', - 'logic', 'longint', 'real', 'realtime', 'reg', 'shortint', - 'shortreal', 'signed', 'string', 'time', 'type', 'unsigned', - 'var', 'void', - # Net types - 'supply0', 'supply1', 'tri', 'triand', 'trior', 'trireg', - 'tri0', 'tri1', 'uwire', 'wand', 'wire', 'wor'), - suffix=r'\b'), - Keyword.Type), - - (words(( - '`__FILE__', '`__LINE__', '`begin_keywords', '`celldefine', - '`default_nettype', '`define', '`else', '`elsif', '`end_keywords', - '`endcelldefine', '`endif', '`ifdef', '`ifndef', '`include', - '`line', '`nounconnected_drive', '`pragma', '`resetall', - '`timescale', '`unconnected_drive', '`undef', '`undefineall'), - suffix=r'\b'), - Comment.Preproc), - - (words(( - # Simulation control tasks (20.2) - '$exit', '$finish', '$stop', - # Simulation time functions (20.3) - '$realtime', '$stime', '$time', - # Timescale tasks (20.4) - '$printtimescale', '$timeformat', - # Conversion functions - '$bitstoreal', '$bitstoshortreal', '$cast', '$itor', - '$realtobits', '$rtoi', '$shortrealtobits', '$signed', - '$unsigned', - # Data query functions (20.6) - '$bits', '$isunbounded', '$typename', - # Array query functions (20.7) - '$dimensions', '$high', '$increment', '$left', '$low', '$right', - '$size', '$unpacked_dimensions', - # Math functions (20.8) - '$acos', '$acosh', '$asin', '$asinh', '$atan', '$atan2', - '$atanh', '$ceil', '$clog2', '$cos', '$cosh', '$exp', '$floor', - '$hypot', '$ln', '$log10', '$pow', '$sin', '$sinh', '$sqrt', - '$tan', '$tanh', - # Bit vector system functions (20.9) - '$countbits', '$countones', '$isunknown', '$onehot', '$onehot0', - # Severity tasks (20.10) - '$info', '$error', '$fatal', '$warning', - # Assertion control tasks (20.12) - '$assertcontrol', '$assertfailoff', '$assertfailon', - '$assertkill', '$assertnonvacuouson', '$assertoff', '$asserton', - '$assertpassoff', '$assertpasson', '$assertvacuousoff', - # Sampled value system functions (20.13) - '$changed', '$changed_gclk', '$changing_gclk', '$falling_gclk', - '$fell', '$fell_gclk', '$future_gclk', '$past', '$past_gclk', - '$rising_gclk', '$rose', '$rose_gclk', '$sampled', '$stable', - '$stable_gclk', '$steady_gclk', - # Coverage control functions (20.14) - '$coverage_control', '$coverage_get', '$coverage_get_max', - '$coverage_merge', '$coverage_save', '$get_coverage', - '$load_coverage_db', '$set_coverage_db_name', - # Probabilistic distribution functions (20.15) - '$dist_chi_square', '$dist_erlang', '$dist_exponential', - '$dist_normal', '$dist_poisson', '$dist_t', '$dist_uniform', - '$random', - # Stochastic analysis tasks and functions (20.16) - '$q_add', '$q_exam', '$q_full', '$q_initialize', '$q_remove', - # PLA modeling tasks (20.17) - '$async$and$array', '$async$and$plane', '$async$nand$array', - '$async$nand$plane', '$async$nor$array', '$async$nor$plane', - '$async$or$array', '$async$or$plane', '$sync$and$array', - '$sync$and$plane', '$sync$nand$array', '$sync$nand$plane', - '$sync$nor$array', '$sync$nor$plane', '$sync$or$array', - '$sync$or$plane', - # Miscellaneous tasks and functions (20.18) - '$system', - # Display tasks (21.2) - '$display', '$displayb', '$displayh', '$displayo', '$monitor', - '$monitorb', '$monitorh', '$monitoro', '$monitoroff', - '$monitoron', '$strobe', '$strobeb', '$strobeh', '$strobeo', - '$write', '$writeb', '$writeh', '$writeo', - # File I/O tasks and functions (21.3) - '$fclose', '$fdisplay', '$fdisplayb', '$fdisplayh', - '$fdisplayo', '$feof', '$ferror', '$fflush', '$fgetc', '$fgets', - '$fmonitor', '$fmonitorb', '$fmonitorh', '$fmonitoro', '$fopen', - '$fread', '$fscanf', '$fseek', '$fstrobe', '$fstrobeb', - '$fstrobeh', '$fstrobeo', '$ftell', '$fwrite', '$fwriteb', - '$fwriteh', '$fwriteo', '$rewind', '$sformat', '$sformatf', - '$sscanf', '$swrite', '$swriteb', '$swriteh', '$swriteo', - '$ungetc', - # Memory load tasks (21.4) - '$readmemb', '$readmemh', - # Memory dump tasks (21.5) - '$writememb', '$writememh', - # Command line input (21.6) - '$test$plusargs', '$value$plusargs', - # VCD tasks (21.7) - '$dumpall', '$dumpfile', '$dumpflush', '$dumplimit', '$dumpoff', - '$dumpon', '$dumpports', '$dumpportsall', '$dumpportsflush', - '$dumpportslimit', '$dumpportsoff', '$dumpportson', '$dumpvars', - ), suffix=r'\b'), - Name.Builtin), - - (r'[a-zA-Z_]\w*:(?!:)', Name.Label), - (r'\$?[a-zA-Z_]\w*', Name), - (r'\\(\S+)', Name), - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation - (r'\\', String), # stray backslash - ], - 'macro': [ - (r'[^/\n]+', Comment.Preproc), - (r'/[*](.|\n)*?[*]/', Comment.Multiline), - (r'//.*?$', Comment.Single, '#pop'), - (r'/', Comment.Preproc), - (r'(?<=\\)\n', Comment.Preproc), - (r'\n', Whitespace, '#pop'), - ], - 'import': [ - (r'[\w:]+\*?', Name.Namespace, '#pop') - ] - } - - -class VhdlLexer(RegexLexer): - """ - For VHDL source code. - - .. versionadded:: 1.5 - """ - name = 'vhdl' - aliases = ['vhdl'] - filenames = ['*.vhdl', '*.vhd'] - mimetypes = ['text/x-vhdl'] - flags = re.MULTILINE | re.IGNORECASE - - tokens = { - 'root': [ - (r'\s+', Whitespace), - (r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation - (r'--.*?$', Comment.Single), - (r"'(U|X|0|1|Z|W|L|H|-)'", String.Char), - (r'[~!%^&*+=|?:<>/-]', Operator), - (r"'[a-z_]\w*", Name.Attribute), - (r'[()\[\],.;\']', Punctuation), - (r'"[^\n\\"]*"', String), - - (r'(library)(\s+)([a-z_]\w*)', - bygroups(Keyword, Whitespace, Name.Namespace)), - (r'(use)(\s+)(entity)', bygroups(Keyword, Whitespace, Keyword)), - (r'(use)(\s+)([a-z_][\w.]*\.)(all)', - bygroups(Keyword, Whitespace, Name.Namespace, Keyword)), - (r'(use)(\s+)([a-z_][\w.]*)', - bygroups(Keyword, Whitespace, Name.Namespace)), - (r'(std|ieee)(\.[a-z_]\w*)', - bygroups(Name.Namespace, Name.Namespace)), - (words(('std', 'ieee', 'work'), suffix=r'\b'), - Name.Namespace), - (r'(entity|component)(\s+)([a-z_]\w*)', - bygroups(Keyword, Whitespace, Name.Class)), - (r'(architecture|configuration)(\s+)([a-z_]\w*)(\s+)' - r'(of)(\s+)([a-z_]\w*)(\s+)(is)', - bygroups(Keyword, Whitespace, Name.Class, Whitespace, Keyword, Whitespace, - Name.Class, Whitespace, Keyword)), - (r'([a-z_]\w*)(:)(\s+)(process|for)', - bygroups(Name.Class, Operator, Whitespace, Keyword)), - (r'(end)(\s+)', bygroups(using(this), Whitespace), 'endblock'), - - include('types'), - include('keywords'), - include('numbers'), - - (r'[a-z_]\w*', Name), - ], - 'endblock': [ - include('keywords'), - (r'[a-z_]\w*', Name.Class), - (r'\s+', Whitespace), - (r';', Punctuation, '#pop'), - ], - 'types': [ - (words(( - 'boolean', 'bit', 'character', 'severity_level', 'integer', 'time', - 'delay_length', 'natural', 'positive', 'string', 'bit_vector', - 'file_open_kind', 'file_open_status', 'std_ulogic', 'std_ulogic_vector', - 'std_logic', 'std_logic_vector', 'signed', 'unsigned'), suffix=r'\b'), - Keyword.Type), - ], - 'keywords': [ - (words(( - 'abs', 'access', 'after', 'alias', 'all', 'and', - 'architecture', 'array', 'assert', 'attribute', 'begin', 'block', - 'body', 'buffer', 'bus', 'case', 'component', 'configuration', - 'constant', 'disconnect', 'downto', 'else', 'elsif', 'end', - 'entity', 'exit', 'file', 'for', 'function', 'generate', - 'generic', 'group', 'guarded', 'if', 'impure', 'in', - 'inertial', 'inout', 'is', 'label', 'library', 'linkage', - 'literal', 'loop', 'map', 'mod', 'nand', 'new', - 'next', 'nor', 'not', 'null', 'of', 'on', - 'open', 'or', 'others', 'out', 'package', 'port', - 'postponed', 'procedure', 'process', 'pure', 'range', 'record', - 'register', 'reject', 'rem', 'return', 'rol', 'ror', 'select', - 'severity', 'signal', 'shared', 'sla', 'sll', 'sra', - 'srl', 'subtype', 'then', 'to', 'transport', 'type', - 'units', 'until', 'use', 'variable', 'wait', 'when', - 'while', 'with', 'xnor', 'xor'), suffix=r'\b'), - Keyword), - ], - 'numbers': [ - (r'\d{1,2}#[0-9a-f_]+#?', Number.Integer), - (r'\d+', Number.Integer), - (r'(\d+\.\d*|\.\d+|\d+)E[+-]?\d+', Number.Float), - (r'X"[0-9a-f_]+"', Number.Hex), - (r'O"[0-7_]+"', Number.Oct), - (r'B"[01_]+"', Number.Bin), - ], - } diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/verification.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/verification.py deleted file mode 100644 index 41d45d40670bee3f7c1d7c88662286d402fc51c2..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/verification.py +++ /dev/null @@ -1,114 +0,0 @@ -""" - pygments.lexers.verification - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Lexer for Intermediate Verification Languages (IVLs). - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer, include, words -from pygments.token import Comment, Operator, Keyword, Name, Number, \ - Punctuation, Text, Generic - -__all__ = ['BoogieLexer', 'SilverLexer'] - - -class BoogieLexer(RegexLexer): - """ - For Boogie source code. - - .. versionadded:: 2.1 - """ - name = 'Boogie' - url = 'https://boogie-docs.readthedocs.io/en/latest/' - aliases = ['boogie'] - filenames = ['*.bpl'] - - tokens = { - 'root': [ - # Whitespace and Comments - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), # line continuation - (r'//[/!](.*?)\n', Comment.Doc), - (r'//(.*?)\n', Comment.Single), - (r'/\*', Comment.Multiline, 'comment'), - - (words(( - 'axiom', 'break', 'call', 'ensures', 'else', 'exists', 'function', - 'forall', 'if', 'invariant', 'modifies', 'procedure', 'requires', - 'then', 'var', 'while'), - suffix=r'\b'), Keyword), - (words(('const',), suffix=r'\b'), Keyword.Reserved), - - (words(('bool', 'int', 'ref'), suffix=r'\b'), Keyword.Type), - include('numbers'), - (r"(>=|<=|:=|!=|==>|&&|\|\||[+/\-=>*<\[\]])", Operator), - (r'\{.*?\}', Generic.Emph), #triggers - (r"([{}():;,.])", Punctuation), - # Identifier - (r'[a-zA-Z_]\w*', Name), - ], - 'comment': [ - (r'[^*/]+', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[*/]', Comment.Multiline), - ], - 'numbers': [ - (r'[0-9]+', Number.Integer), - ], - } - - -class SilverLexer(RegexLexer): - """ - For Silver source code. - - .. versionadded:: 2.2 - """ - name = 'Silver' - aliases = ['silver'] - filenames = ['*.sil', '*.vpr'] - - tokens = { - 'root': [ - # Whitespace and Comments - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), # line continuation - (r'//[/!](.*?)\n', Comment.Doc), - (r'//(.*?)\n', Comment.Single), - (r'/\*', Comment.Multiline, 'comment'), - - (words(( - 'result', 'true', 'false', 'null', 'method', 'function', - 'predicate', 'program', 'domain', 'axiom', 'var', 'returns', - 'field', 'define', 'fold', 'unfold', 'inhale', 'exhale', 'new', 'assert', - 'assume', 'goto', 'while', 'if', 'elseif', 'else', 'fresh', - 'constraining', 'Seq', 'Set', 'Multiset', 'union', 'intersection', - 'setminus', 'subset', 'unfolding', 'in', 'old', 'forall', 'exists', - 'acc', 'wildcard', 'write', 'none', 'epsilon', 'perm', 'unique', - 'apply', 'package', 'folding', 'label', 'forperm'), - suffix=r'\b'), Keyword), - (words(('requires', 'ensures', 'invariant'), suffix=r'\b'), Name.Decorator), - (words(('Int', 'Perm', 'Bool', 'Ref', 'Rational'), suffix=r'\b'), Keyword.Type), - include('numbers'), - (r'[!%&*+=|?:<>/\-\[\]]', Operator), - (r'\{.*?\}', Generic.Emph), #triggers - (r'([{}():;,.])', Punctuation), - # Identifier - (r'[\w$]\w*', Name), - ], - 'comment': [ - (r'[^*/]+', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[*/]', Comment.Multiline), - ], - 'numbers': [ - (r'[0-9]+', Number.Integer), - ], - } diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/status.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/status.py deleted file mode 100644 index 09eff405ec194ee2884f203cb48c5df54ff0b9c7..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/status.py +++ /dev/null @@ -1,132 +0,0 @@ -from types import TracebackType -from typing import Optional, Type - -from .console import Console, RenderableType -from .jupyter import JupyterMixin -from .live import Live -from .spinner import Spinner -from .style import StyleType - - -class Status(JupyterMixin): - """Displays a status indicator with a 'spinner' animation. - - Args: - status (RenderableType): A status renderable (str or Text typically). - console (Console, optional): Console instance to use, or None for global console. Defaults to None. - spinner (str, optional): Name of spinner animation (see python -m rich.spinner). Defaults to "dots". - spinner_style (StyleType, optional): Style of spinner. Defaults to "status.spinner". - speed (float, optional): Speed factor for spinner animation. Defaults to 1.0. - refresh_per_second (float, optional): Number of refreshes per second. Defaults to 12.5. - """ - - def __init__( - self, - status: RenderableType, - *, - console: Optional[Console] = None, - spinner: str = "dots", - spinner_style: StyleType = "status.spinner", - speed: float = 1.0, - refresh_per_second: float = 12.5, - ): - self.status = status - self.spinner_style = spinner_style - self.speed = speed - self._spinner = Spinner(spinner, text=status, style=spinner_style, speed=speed) - self._live = Live( - self.renderable, - console=console, - refresh_per_second=refresh_per_second, - transient=True, - ) - - @property - def renderable(self) -> Spinner: - return self._spinner - - @property - def console(self) -> "Console": - """Get the Console used by the Status objects.""" - return self._live.console - - def update( - self, - status: Optional[RenderableType] = None, - *, - spinner: Optional[str] = None, - spinner_style: Optional[StyleType] = None, - speed: Optional[float] = None, - ) -> None: - """Update status. - - Args: - status (Optional[RenderableType], optional): New status renderable or None for no change. Defaults to None. - spinner (Optional[str], optional): New spinner or None for no change. Defaults to None. - spinner_style (Optional[StyleType], optional): New spinner style or None for no change. Defaults to None. - speed (Optional[float], optional): Speed factor for spinner animation or None for no change. Defaults to None. - """ - if status is not None: - self.status = status - if spinner_style is not None: - self.spinner_style = spinner_style - if speed is not None: - self.speed = speed - if spinner is not None: - self._spinner = Spinner( - spinner, text=self.status, style=self.spinner_style, speed=self.speed - ) - self._live.update(self.renderable, refresh=True) - else: - self._spinner.update( - text=self.status, style=self.spinner_style, speed=self.speed - ) - - def start(self) -> None: - """Start the status animation.""" - self._live.start() - - def stop(self) -> None: - """Stop the spinner animation.""" - self._live.stop() - - def __rich__(self) -> RenderableType: - return self.renderable - - def __enter__(self) -> "Status": - self.start() - return self - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - self.stop() - - -if __name__ == "__main__": # pragma: no cover - - from time import sleep - - from .console import Console - - console = Console() - with console.status("[magenta]Covid detector booting up") as status: - sleep(3) - console.log("Importing advanced AI") - sleep(3) - console.log("Advanced Covid AI Ready") - sleep(3) - status.update(status="[bold blue] Scanning for Covid", spinner="earth") - sleep(3) - console.log("Found 10,000,000,000 copies of Covid32.exe") - sleep(3) - status.update( - status="[bold red]Moving Covid32.exe to Trash", - spinner="bouncingBall", - spinner_style="yellow", - ) - sleep(5) - console.print("[bold green]Covid deleted successfully") diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Decompiler Ex4 To Mq4 Keygen Generator.md b/spaces/quidiaMuxgu/Expedit-SAM/Decompiler Ex4 To Mq4 Keygen Generator.md deleted file mode 100644 index ce71bc40d957de3f1738f1e36d714647bcad131c..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Decompiler Ex4 To Mq4 Keygen Generator.md +++ /dev/null @@ -1,21 +0,0 @@ -
      -

      How to Use Decompiler Ex4 To Mq4 Keygen Generator to Convert MetaTrader 4 Files

      -

      If you are a forex trader or developer who uses MetaTrader 4 (MT4) platform, you may have encountered the problem of converting .ex4 files to .mq4 files. Ex4 files are compiled versions of mq4 files, which are the source code of MT4 indicators and expert advisors. Mq4 files are easier to edit and modify than ex4 files, but they are not always available or accessible. That's where Decompiler Ex4 To Mq4 Keygen Generator comes in handy.

      -

      Decompiler Ex4 To Mq4 Keygen Generator


      DOWNLOAD –––––>>> https://geags.com/2uCqhq



      -

      Decompiler Ex4 To Mq4 Keygen Generator is a software tool that can decompile ex4 files back to mq4 files, allowing you to access the source code and make changes as you wish. It can also generate a unique key for each decompiled file, ensuring that your work is protected and secure. In this article, we will show you how to use Decompiler Ex4 To Mq4 Keygen Generator to convert your ex4 files to mq4 files in a few simple steps.

      -

      Step 1: Download and Install Decompiler Ex4 To Mq4 Keygen Generator

      -

      The first step is to download and install Decompiler Ex4 To Mq4 Keygen Generator on your computer. You can get it from the official website or from any trusted source. The installation process is straightforward and should not take more than a few minutes. Once installed, you will see the main interface of the software, which looks like this:

      -Decompiler Ex4 To Mq4 Keygen Generator Interface -

      Step 2: Select the Ex4 File You Want to Decompile

      -

      The next step is to select the ex4 file you want to decompile. You can do this by clicking on the "Browse" button and navigating to the folder where your ex4 file is located. Alternatively, you can drag and drop the ex4 file into the software window. The software will automatically detect the ex4 file and display its name and size in the "Input File" field.

      -

      Step 3: Choose the Output Folder and File Name

      -

      The third step is to choose the output folder and file name for your decompiled mq4 file. You can do this by clicking on the "Browse" button next to the "Output File" field and selecting a folder where you want to save your mq4 file. You can also change the file name if you want, but make sure it has the .mq4 extension. The software will show you the output file path in the "Output File" field.

      -

      Step 4: Generate a Unique Key for Your Decompiled File

      -

      The fourth step is to generate a unique key for your decompiled file. This is an optional but recommended step that will ensure that your decompiled file is protected from unauthorized use or modification. You can do this by clicking on the "Generate Key" button and waiting for a few seconds. The software will generate a random alphanumeric key and display it in the "Key" field. You can copy this key and save it somewhere safe for future reference.

      -

      -

      Step 5: Decompile Your Ex4 File to Mq4 File

      -

      The final step is to decompile your ex4 file to mq4 file. You can do this by clicking on the "Decompile" button and waiting for the process to complete. The software will show you a progress bar and a message indicating when the decompilation is done. You can then open your output folder and find your decompiled mq4 file there.

      -

      Conclusion

      -

      Decompiler Ex4 To Mq4 Keygen Generator is a useful tool that can help you convert your ex4 files to mq4 files in a matter of minutes. It can also generate a unique key for each decompiled file, adding an extra layer of security to your work. With this tool, you can easily access and modify the source code of your MT4 indicators and expert advisors, enhancing your trading performance and results.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Diskdigger Lisans Anahtar BEST.md b/spaces/quidiaMuxgu/Expedit-SAM/Diskdigger Lisans Anahtar BEST.md deleted file mode 100644 index 3983e71ecc50f1a123b68aed3ed702cf8c71a677..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Diskdigger Lisans Anahtar BEST.md +++ /dev/null @@ -1,12 +0,0 @@ -

      Diskdigger lisans anahtar


      Download File ——— https://geags.com/2uCsN8



      - -Disk Digger Serial Key .... Diskdigger lisans anahtar dotnetfx40 x86 x32.63 client eltechs exagear desktop for ... "diskdigger lisans anahtarı" ile İlgili ...## #Diskdigger.Lisans.Anahtar.Serial.Numbers.. diskdigger serial keygen Hit2k, diskdigger Crack Hit2k, ... to tags: diskdigger full indir diskdigger lisans anahtarı ... Disk Digger - Serial number .. ..Disk Digger...Search: Diskdigger Serial Key -Disk Digger - Serial ... -Search: Diskdigger Serial Key -Disk Digger Serial Key .... -Diskdigger lisans anahtar dotnetfx40 x86 x32.63 client eltechs exagear desktop for ... -"diskdigger lisans anahtarı" ile İlgili ...## #Diskdigger.Lisans.Anahtar.Serial.Numbers.. -disk digger serial key - Diskdigger Serial Key - YouTube 8a78ff9644
      -
      -
      -

      diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Internet Download Manager 6.22 FINAL Crack REPACK [TechTools] Crack REPACK.md b/spaces/quidiaMuxgu/Expedit-SAM/Internet Download Manager 6.22 FINAL Crack REPACK [TechTools] Crack REPACK.md deleted file mode 100644 index 1efac8e3a0e558f727a0882d19a95a2e2fa3fd2e..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Internet Download Manager 6.22 FINAL Crack REPACK [TechTools] Crack REPACK.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Internet Download Manager 6.22 FINAL Crack [TechTools] Crack


      Download ››››› https://geags.com/2uCqOX



      - -Your Internet Provider, Government or hackers can very easily track all your activity! ... Downloading tip: Download anonymous and secure with VPN protection ... 1fdad05405
      -
      -
      -

      diff --git a/spaces/r3gm/RVC_HF/infer/modules/train/extract/extract_f0_print.py b/spaces/r3gm/RVC_HF/infer/modules/train/extract/extract_f0_print.py deleted file mode 100644 index 14ef598d73b807974204664f100c828918199816..0000000000000000000000000000000000000000 --- a/spaces/r3gm/RVC_HF/infer/modules/train/extract/extract_f0_print.py +++ /dev/null @@ -1,298 +0,0 @@ -import os -import sys -import traceback - -import parselmouth - -now_dir = os.getcwd() -sys.path.append(now_dir) -import logging -from LazyImport import lazyload - -import numpy as np -import pyworld -torchcrepe = lazyload("torchcrepe") # Fork Feature. Crepe algo for training and preprocess -torch = lazyload("torch") -#from torch import Tensor # Fork Feature. Used for pitch prediction for torch crepe. -tqdm = lazyload("tqdm") -from infer.lib.audio import load_audio - -logging.getLogger("numba").setLevel(logging.WARNING) -from multiprocessing import Process - -exp_dir = sys.argv[1] -f = open("%s/extract_f0_feature.log" % exp_dir, "a+") - -DoFormant = False -Quefrency = 1.0 -Timbre = 1.0 - -def printt(strr): - print(strr) - f.write(f"{strr}\n") - f.flush() - - -n_p = int(sys.argv[2]) -f0method = sys.argv[3] -extraction_crepe_hop_length = 0 -try: - extraction_crepe_hop_length = int(sys.argv[4]) -except: - print("Temp Issue. echl is not being passed with argument!") - extraction_crepe_hop_length = 128 - -class FeatureInput(object): - def __init__(self, samplerate=16000, hop_size=160): - self.fs = samplerate - self.hop = hop_size - - self.f0_bin = 256 - self.f0_max = 1100.0 - self.f0_min = 50.0 - self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700) - self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700) - - def mncrepe(self, method, x, p_len, crepe_hop_length): - f0 = None - torch_device_index = 0 - torch_device = torch.device( - f"cuda:{torch_device_index % torch.cuda.device_count()}" - ) if torch.cuda.is_available() \ - else torch.device("mps") if torch.backends.mps.is_available() \ - else torch.device("cpu") - - audio = torch.from_numpy(x.astype(np.float32)).to(torch_device, copy=True) - audio /= torch.quantile(torch.abs(audio), 0.999) - audio = torch.unsqueeze(audio, dim=0) - if audio.ndim == 2 and audio.shape[0] > 1: - audio = torch.mean(audio, dim=0, keepdim=True).detach() - audio = audio.detach() - - if method == 'mangio-crepe': - pitch: torch.Tensor = torchcrepe.predict( - audio, - self.fs, - crepe_hop_length, - self.f0_min, - self.f0_max, - "full", - batch_size=crepe_hop_length * 2, - device=torch_device, - pad=True, - ) - p_len = p_len or x.shape[0] // crepe_hop_length - # Resize the pitch - source = np.array(pitch.squeeze(0).cpu().float().numpy()) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * p_len, len(source)) / p_len, - np.arange(0, len(source)), - source, - ) - f0 = np.nan_to_num(target) - - elif method == 'crepe': - batch_size = 512 - audio = torch.tensor(np.copy(x))[None].float() - f0, pd = torchcrepe.predict( - audio, - self.fs, - 160, - self.f0_min, - self.f0_max, - "full", - batch_size=batch_size, - device=torch_device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - f0 = f0[1:] # Get rid of extra first frame - - return f0 - - def get_pm(self, x, p_len): - f0 = parselmouth.Sound(x, self.fs).to_pitch_ac( - time_step=160 / 16000, - voicing_threshold=0.6, - pitch_floor=self.f0_min, - pitch_ceiling=self.f0_max, - ).selected_array["frequency"] - - return np.pad( - f0, - [[max(0, (p_len - len(f0) + 1) // 2), max(0, p_len - len(f0) - (p_len - len(f0) + 1) // 2)]], - mode="constant" - ) - - def get_harvest(self, x): - f0_spectral = pyworld.harvest( - x.astype(np.double), - fs=self.fs, - f0_ceil=self.f0_max, - f0_floor=self.f0_min, - frame_period=1000 * self.hop / self.fs, - ) - return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.fs) - - def get_dio(self, x): - f0_spectral = pyworld.dio( - x.astype(np.double), - fs=self.fs, - f0_ceil=self.f0_max, - f0_floor=self.f0_min, - frame_period=1000 * self.hop / self.fs, - ) - return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.fs) - - def get_rmvpe(self, x): - if hasattr(self, "model_rmvpe") == False: - from infer.lib.rmvpe import RMVPE - - print("Loading rmvpe model") - self.model_rmvpe = RMVPE( - "assets/rmvpe/rmvpe.pt", is_half=False, device="cpu" - ) - return self.model_rmvpe.infer_from_audio(x, thred=0.03) - - def get_rmvpe_dml(self, x): - ... - - def get_f0_method_dict(self): - return { - "pm": self.get_pm, - "harvest": self.get_harvest, - "dio": self.get_dio, - "rmvpe": self.get_rmvpe - } - - def get_f0_hybrid_computation( - self, - methods_str, - x, - p_len, - crepe_hop_length, - ): - # Get various f0 methods from input to use in the computation stack - s = methods_str - s = s.split("hybrid")[1] - s = s.replace("[", "").replace("]", "") - methods = s.split("+") - f0_computation_stack = [] - - for method in methods: - if method in self.f0_method_dict: - f0 = self.f0_method_dict[method](x, p_len) if method == 'pm' else self.f0_method_dict[method](x) - f0_computation_stack.append(f0) - elif method == 'crepe' or method == 'mangio-crepe': - self.the_other_complex_function(x, method, crepe_hop_length) - - if len(f0_computation_stack) != 0: - f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0) if len(f0_computation_stack)>1 else f0_computation_stack[0] - return f0_median_hybrid - else: - raise ValueError("No valid methods were provided") - - def compute_f0(self, path, f0_method, crepe_hop_length): - x = load_audio(path, self.fs, DoFormant, Quefrency, Timbre) - p_len = x.shape[0] // self.hop - - if f0_method in self.f0_method_dict: - f0 = self.f0_method_dict[f0_method](x, p_len) if f0_method == 'pm' else self.f0_method_dict[f0_method](x) - elif f0_method in ['crepe', 'mangio-crepe']: - f0 = self.mncrepe(f0_method, x, p_len, crepe_hop_length) - elif "hybrid" in f0_method: # EXPERIMENTAL - # Perform hybrid median pitch estimation - f0 = self.get_f0_hybrid_computation( - f0_method, - x, - p_len, - crepe_hop_length, - ) - return f0 - - def coarse_f0(self, f0): - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * ( - self.f0_bin - 2 - ) / (self.f0_mel_max - self.f0_mel_min) + 1 - - # use 0 or 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > self.f0_bin - 1] = self.f0_bin - 1 - f0_coarse = np.rint(f0_mel).astype(int) - assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, ( - f0_coarse.max(), - f0_coarse.min(), - ) - return f0_coarse - - def go(self, paths, f0_method, crepe_hop_length, thread_n): - if len(paths) == 0: - printt("no-f0-todo") - return - with tqdm.tqdm(total=len(paths), leave=True, position=thread_n) as pbar: - description = f"thread:{thread_n}, f0ing, Hop-Length:{crepe_hop_length}" - pbar.set_description(description) - - for idx, (inp_path, opt_path1, opt_path2) in enumerate(paths): - try: - if ( - os.path.exists(opt_path1 + ".npy") - and os.path.exists(opt_path2 + ".npy") - ): - pbar.update(1) - continue - - featur_pit = self.compute_f0(inp_path, f0_method, crepe_hop_length) - np.save( - opt_path2, - featur_pit, - allow_pickle=False, - ) # nsf - coarse_pit = self.coarse_f0(featur_pit) - np.save( - opt_path1, - coarse_pit, - allow_pickle=False, - ) # ori - pbar.update(1) - except Exception as e: - printt(f"f0fail-{idx}-{inp_path}-{traceback.format_exc()}") - - -if __name__ == "__main__": - # exp_dir=r"E:\codes\py39\dataset\mi-test" - # n_p=16 - # f = open("%s/log_extract_f0.log"%exp_dir, "w") - printt(sys.argv) - featureInput = FeatureInput() - paths = [] - inp_root = "%s/1_16k_wavs" % (exp_dir) - opt_root1 = "%s/2a_f0" % (exp_dir) - opt_root2 = "%s/2b-f0nsf" % (exp_dir) - - os.makedirs(opt_root1, exist_ok=True) - os.makedirs(opt_root2, exist_ok=True) - for name in sorted(list(os.listdir(inp_root))): - inp_path = "%s/%s" % (inp_root, name) - if "spec" in inp_path: - continue - opt_path1 = "%s/%s" % (opt_root1, name) - opt_path2 = "%s/%s" % (opt_root2, name) - paths.append([inp_path, opt_path1, opt_path2]) - - ps = [] - print("Using f0 method: " + f0method) - for i in range(n_p): - p = Process( - target=featureInput.go, - args=(paths[i::n_p], f0method, extraction_crepe_hop_length, i), - ) - ps.append(p) - p.start() - for i in range(n_p): - ps[i].join() \ No newline at end of file diff --git a/spaces/radames/Candle-BLIP-Image-Captioning/build/m.d.ts b/spaces/radames/Candle-BLIP-Image-Captioning/build/m.d.ts deleted file mode 100644 index a5902f17e30d9670ba0a50b4a3fad1e6a5fec233..0000000000000000000000000000000000000000 --- a/spaces/radames/Candle-BLIP-Image-Captioning/build/m.d.ts +++ /dev/null @@ -1,55 +0,0 @@ -/* tslint:disable */ -/* eslint-disable */ -/** -*/ -export class Model { - free(): void; -/** -* @param {Uint8Array} weights -* @param {Uint8Array} tokenizer -* @param {Uint8Array} config -* @param {boolean} quantized -*/ - constructor(weights: Uint8Array, tokenizer: Uint8Array, config: Uint8Array, quantized: boolean); -/** -* @param {Uint8Array} image -* @returns {string} -*/ - generate_caption_from_image(image: Uint8Array): string; -} - -export type InitInput = RequestInfo | URL | Response | BufferSource | WebAssembly.Module; - -export interface InitOutput { - readonly memory: WebAssembly.Memory; - readonly __wbg_model_free: (a: number) => void; - readonly model_load: (a: number, b: number, c: number, d: number, e: number, f: number, g: number, h: number) => void; - readonly model_generate_caption_from_image: (a: number, b: number, c: number, d: number) => void; - readonly main: (a: number, b: number) => number; - readonly __wbindgen_add_to_stack_pointer: (a: number) => number; - readonly __wbindgen_malloc: (a: number, b: number) => number; - readonly __wbindgen_free: (a: number, b: number, c: number) => void; - readonly __wbindgen_realloc: (a: number, b: number, c: number, d: number) => number; - readonly __wbindgen_start: () => void; -} - -export type SyncInitInput = BufferSource | WebAssembly.Module; -/** -* Instantiates the given `module`, which can either be bytes or -* a precompiled `WebAssembly.Module`. -* -* @param {SyncInitInput} module -* -* @returns {InitOutput} -*/ -export function initSync(module: SyncInitInput): InitOutput; - -/** -* If `module_or_path` is {RequestInfo} or {URL}, makes a request and -* for everything else, calls `WebAssembly.instantiate` directly. -* -* @param {InitInput | Promise} module_or_path -* -* @returns {Promise} -*/ -export default function __wbg_init (module_or_path?: InitInput | Promise): Promise; diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Bmw Diagnostic Head Emulator V1.2 HOT.md b/spaces/raedeXanto/academic-chatgpt-beta/Bmw Diagnostic Head Emulator V1.2 HOT.md deleted file mode 100644 index 3fb49474df970ad513395a709c11b951eaa39dc9..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Bmw Diagnostic Head Emulator V1.2 HOT.md +++ /dev/null @@ -1,55 +0,0 @@ - -

      BMW Diagnostic Head Emulator v1.2: What Is It and How to Use It

      -

      If you own a BMW vehicle, you might have heard of BMW diagnostic head emulator v1.2, a software tool that allows you to communicate with your car using a standard OBD2 interface. But what exactly is this software, how does it work, and why do you need it? In this article, we will answer these questions and show you how to use BMW diagnostic head emulator v1.2 for diagnostics and coding.

      -

      Introduction

      -

      BMW diagnostic head emulator v1.2 is a software application that emulates the original BMW diagnostic head (also known as GT1 or DIS) that is used by BMW dealers and workshops. The software enables you to access various functions and features of your BMW vehicle, such as reading and clearing fault codes, viewing live data, performing tests, coding, and programming.

      -

      Bmw Diagnostic Head Emulator V1.2


      Download File ❤❤❤ https://tinourl.com/2uL4lc



      -

      The main advantage of using BMW diagnostic head emulator v1.2 is that you don't need any expensive or specialized hardware or cables to connect to your vehicle. All you need is a standard OBD2 interface that supports K-line or CAN protocols, such as ELM327 or K+DCAN, and a laptop or PC with Windows operating system. The software is compatible with most BMW models from 1995 to 2010.

      -

      To use BMW diagnostic head emulator v1.2, you need to install and set up the software on your computer first. The installation process is simple and straightforward, as long as you follow the instructions carefully. You can download the software from various sources online, such as or , but make sure you get a reliable and virus-free version.

      -

      How to Connect BMW Diagnostic Head Emulator v1.2 to Your Vehicle

      -

      Once you have installed the software on your computer, you need to connect it to your vehicle using an OBD2 interface. The OBD2 interface is a device that plugs into the OBD port of your vehicle, usually located under the dashboard or near the steering wheel. The OBD port is a standard connector that allows you to access various systems and sensors of your vehicle.

      -

      The OBD2 interface also connects to your computer via a USB cable or a Bluetooth connection. Depending on the type of interface you have, you might need to install some drivers or software on your computer as well. You can find more information about different types of OBD2 interfaces online, such as or . Make sure you get an interface that supports K-line or CAN protocols, as these are required for communication with BMW vehicles.

      -

      After connecting the O BD2 interface to your vehicle and your computer, you need to configure the network settings and the protocols for communication. You can do this by opening the BMW diagnostic head emulator v1.2 software and going to the Settings menu. Here, you need to select the correct COM port for your interface, as well as the baud rate, parity, data bits, and stop bits. You can find these values in the manual or the website of your interface.

      -

      Next, you need to select the protocol that matches your vehicle. There are two main protocols for BMW vehicles: K-line and CAN. K-line is an older protocol that uses a single wire for communication, while CAN is a newer protocol that uses two wires for communication. You can find out which protocol your vehicle uses by checking the OBD port. If it has 7 pins, it uses K-line; if it has 16 pins, it uses CAN.

      -

      If your vehicle uses K-line, you need to select the KWP2000 protocol in the software. If your vehicle uses CAN, you need to select the ISO15765 protocol in the software. You also need to select the correct CAN ID for your vehicle, which is a hexadecimal number that identifies your vehicle model and module. You can find a list of CAN IDs online, such as or . For example, if you have a BMW E90 3 Series with an engine module, the CAN ID is 12.

      -

      -

      How to Use BMW Diagnostic Head Emulator v1.2 for Diagnostics and Coding

      -

      After setting up the connection between your vehicle and your computer, you can start using BMW diagnostic head emulator v1.2 for diagnostics and coding. To do this, you need to launch the software and select the vehicle model and module that you want to access. You can do this by clicking on the Vehicle menu and choosing from the list of available models and modules.

      -

      Once you have selected the vehicle model and module, you can access various functions and features of your vehicle, such as reading and clearing fault codes, viewing live data, and performing tests. To read and clear fault codes, you need to click on the Fault Codes menu and choose Read Fault Codes or Clear Fault Codes. The software will display a list of fault codes that are stored in your vehicle's memory, along with their descriptions and possible causes. You can clear the fault codes by clicking on Clear Fault Codes.

      -

      To view live data, you need to click on the Live Data menu and choose View Live Data. The software will display a list of parameters that are measured by your vehicle's sensors, such as engine speed, coolant temperature, air flow, fuel pressure, etc. You can view these parameters in real time by clicking on them. You can also record and save the live data for later analysis.

      -

      To perform tests, you need to click on the Tests menu and choose Perform Tests. The software will display a list of tests that are available for your vehicle model and module, such as injector test, fuel pump test, oxygen sensor test, etc. You can perform these tests by clicking on them and following the instructions on the screen. The software will show you the results of the tests and indicate if there are any problems or errors.

      -

      To code and program new features, settings, and options for your vehicle, you need to click on the Coding menu and choose Code or Program. The software will display a list of coding or programming options that are available for your vehicle model and module, such as changing the language of the dashboard, activating or deactivating certain functions or features, adjusting some parameters or values, etc. You can code or program these options by clicking on them and selecting the desired option from a drop-down menu or entering a new value in a text box.

      -

      Tips and Tricks for Using BMW Diagnostic Head Emulator v1.2 Effectively

      -

      Using BMW diagnostic head emulator v1.2 can be very useful and convenient for diagnosing and coding your BMW vehicle, but it can also be challenging and risky if you don't know what you are doing or if you encounter some errors or issues with the software or the connection. Here are some tips and tricks for using BMW diagnostic head emulator v1.2 effectively:

      -
        -
      • Make sure you have a stable power supply for your vehicle and your computer when using BMW diagnostic head emulator v1.2. A low battery voltage or a sudden power loss can cause communication errors or damage to your vehicle's electronics.
      • -
      • Make sure you have a good quality OBD2 interface that supports K-line or CAN protocols and has a compatible firmware with BMW diagnostic head emulator v1.2. A cheap or faulty interface can cause communication errors or damage to your vehicle's electronics.
      • -
      • Make sure you have a reliable and virus-free version of BMW diagnostic head emulator v1. v1.2. You can download the software from various sources online, such as or , but make sure you scan the file for viruses and malware before installing it.
      • -
      • Make sure you backup your vehicle's original settings and data before coding or programming anything with BMW diagnostic head emulator v1.2. You can do this by clicking on the Backup menu and choosing Backup Settings or Backup Data. This way, you can restore your vehicle's original settings and data if something goes wrong or if you are not satisfied with the changes.
      • -
      • Make sure you know what you are doing and what are the consequences of coding or programming something with BMW diagnostic head emulator v1.2. Some coding or programming options can affect the performance, safety, or legality of your vehicle, so you should be careful and responsible when using them. You should also consult the manual or the website of your vehicle model and module for more information and guidance.
      • -
      • Make sure you troubleshoot common errors and issues with BMW diagnostic head emulator v1.2 or the connection. Some common errors and issues are: communication error, interface error, protocol error, software error, etc. You can find some solutions and fixes for these errors and issues online, such as or . You can also contact the developer or the seller of the software or the interface for support.
      • -
      • Make sure you update the software and the firmware of the OBD2 interface regularly. This way, you can ensure that you have the latest features, functions, and bug fixes for BMW diagnostic head emulator v1.2 and the OBD2 interface. You can update the software by clicking on the Update menu and choosing Update Software. You can update the firmware of the OBD2 interface by following the instructions on the manual or the website of your interface.
      • -
      • Make sure you access additional resources and support for BMW diagnostic head emulator v1.2. There are many online forums, blogs, videos, and guides that can help you learn more about BMW diagnostic head emulator v1.2 and how to use it effectively. Some examples are , , , etc. You can also join some online communities and groups of BMW enthusiasts and users of BMW diagnostic head emulator v1.2, such as , , , etc.
      • -
      -

      Conclusion

      -

      In conclusion, BMW diagnostic head emulator v1.2 is a powerful and versatile software tool that allows you to communicate with your BMW vehicle using a standard OBD2 interface. You can use it for diagnostics and coding, as well as for accessing various functions and features of your vehicle. You can also save money and time by using it instead of going to a dealer or a workshop.

      -

      If you want to try out BMW diagnostic head emulator v1.2 for yourself, you can download it from various sources online, such as or , and install it on your computer. You also need an OBD2 interface that supports K-line or CAN protocols, such as ELM327 or K+DCAN, which you can buy online or from a local store.

      -

      If you have any questions or feedback about BMW diagnostic head emulator v1.2, feel free to contact us at info@bmwdiagnosticheademulator.com or leave a comment below. We would love to hear from you and help you with any issues or problems you might have with the software.

      -

      Thank you for reading this article and we hope you enjoyed it and learned something new. Happy diagnosing and coding!

      -

      FAQs

      -

      Here are some frequently asked questions about BMW diagnostic head emulator v1.2:

      -
        -
      1. What is the difference between BMW diagnostic head emulator v1.2 and other BMW diagnostic software?
      2. -

        BMW diagnostic head emulator v1.2 is different from other BMW diagnostic software in that it emulates the original BMW diagnostic head (GT1 or DIS) that is used by dealers and workshops. This means that it has more functions and features than other software, such as coding and programming options. It also means that it is compatible with most BMW models from 1995 to 2010.

        -
      3. Is BMW diagnostic head emulator v1.2 legal to use?
      4. -

        BMW diagnostic head emulator v1.2 is legal to use for personal use only, as long as you don't violate any laws or regulations regarding your vehicle's performance, safety, or emissions. However, if you use it for commercial use or for modifying your vehicle's settings or data in a way that affects the performance, safety, or emissions of your vehicle, you might be breaking the law or voiding your warranty. You should always check the laws and regulations in your area before using BMW diagnostic head emulator v1.2 or any other diagnostic software.

        -
      5. Is BMW diagnostic head emulator v1.2 safe to use?
      6. -

        BMW diagnostic head emulator v1.2 is safe to use as long as you use it correctly and responsibly. You should always backup your vehicle's original settings and data before coding or programming anything with the software, and restore them if something goes wrong or if you are not satisfied with the changes. You should also use a good quality OBD2 interface that supports K-line or CAN protocols and has a compatible firmware with the software. You should also avoid using the software when your vehicle or your computer has a low battery voltage or a unstable power supply.

        -
      7. How much does BMW diagnostic head emulator v1.2 cost?
      8. -

        BMW diagnostic head emulator v1.2 is a free software that you can download from various sources online, such as or . However, you might need to pay for some additional features or functions that are not included in the free version, such as updates, support, or premium options. You can find more information about the pricing and the payment methods on the website of the developer or the seller of the software.

        -
      9. Where can I get BMW diagnostic head emulator v1.2?
      10. -

        You can get BMW diagnostic head emulator v1.2 from various sources online, such as or . However, you should be careful and cautious when downloading the software from unknown or untrusted sources, as they might contain viruses, malware, or spyware that can harm your computer or your vehicle. You should always scan the file for viruses and malware before installing it, and use a reputable antivirus software on your computer.

        -
      11. How can I learn more about BMW diagnostic head emulator v1.2?
      12. -

        You can learn more about BMW diagnostic head emulator v1.2 by accessing additional resources and support online. There are many online forums, blogs, videos, and guides that can help you learn more about the software and how to use it effectively. Some examples are , , , etc. You can also join some online communities and groups of BMW enthusiasts and users of BMW diagnostic head emulator v1.2, such as , , , etc.

        b2dd77e56b
        -
        -
        \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Download RD Sharma Class 11 Maths Book PDF for Free Volume I and II.md b/spaces/raedeXanto/academic-chatgpt-beta/Download RD Sharma Class 11 Maths Book PDF for Free Volume I and II.md deleted file mode 100644 index 7baa4ecfb774b8d7887ef2ce9c273d65c1b5deef..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Download RD Sharma Class 11 Maths Book PDF for Free Volume I and II.md +++ /dev/null @@ -1,141 +0,0 @@ -
        -

        RD Sharma Mathematics Class 11 PDF Download: A Comprehensive Guide

        -

        If you are a student of class 11th who is preparing for the CBSE board exams or any competitive exams like JEE Main, NEET, etc., then you must be looking for a good book to study mathematics. Mathematics is a subject that requires a lot of practice and conceptual clarity to score well in exams. One of the most popular and recommended books for class 11th mathematics is RD Sharma Mathematics Class 11.

        -

        rdsharmamathematicsclass11pdfdownload


        Download File » https://tinourl.com/2uL0xd



        -

        RD Sharma Mathematics Class 11 is a comprehensive textbook that covers the entire syllabus prescribed by the CBSE (Central Board of Secondary Education) for class 11th. It is written by R.D. Sharma, who is a renowned author and teacher of mathematics. He has written many books for different classes and levels of mathematics.

        -

        In this article, we will tell you everything you need to know about RD Sharma Mathematics Class 11 PDF download. We will explain what is RD Sharma Mathematics Class 11, why should you download it, how to download it for free, what are its features, what are its contents, and some frequently asked questions. By the end of this article, you will have a clear idea about how to use RD Sharma Mathematics Class 11 PDF for your exam preparation.

        -

        Introduction

        -

        What is RD Sharma Mathematics Class 11?

        -

        RD Sharma Mathematics Class 11 is a textbook that covers the entire syllabus of class 11th mathematics as prescribed by the CBSE. It is divided into two volumes: Volume I and Volume II. Volume I covers the topics related to algebra, trigonometry, complex numbers, etc., while Volume II covers the topics related to coordinate geometry, calculus, statistics, probability, etc.

        -

        rd sharma class 11 maths book pdf free download
        -rd sharma class 11 solutions pdf download
        -rd sharma class 11 maths volume 1 pdf download
        -rd sharma class 11 maths volume 2 pdf download
        -rd sharma class 11 textbook pdf download
        -rd sharma class 11 maths book online
        -rd sharma class 11 maths book price
        -rd sharma class 11 maths book review
        -rd sharma class 11 maths book contents
        -rd sharma class 11 maths book chapters
        -rd sharma class 11 maths book syllabus
        -rd sharma class 11 maths book cbse
        -rd sharma class 11 maths book ncert
        -rd sharma class 11 maths book solutions
        -rd sharma class 11 maths book examples
        -rd sharma class 11 maths book exercises
        -rd sharma class 11 maths book summary
        -rd sharma class 11 maths book formulae
        -rd sharma class 11 maths book revision
        -rd sharma class 11 maths book questions
        -rd sharma class 11 maths book answers
        -rd sharma class 11 maths book problems
        -rd sharma class 11 maths book tricks
        -rd sharma class 11 maths book tips
        -rd sharma class 11 maths book concepts
        -rd sharma class 11 maths book fundamentals
        -rd sharma class 11 maths book topics
        -rd sharma class 11 maths book sets
        -rd sharma class 11 maths book relations
        -rd sharma class 11 maths book functions
        -rd sharma class 11 maths book trigonometry
        -rd sharma class 11 maths book complex numbers
        -rd sharma class 11 maths book quadratic equations
        -rd sharma class 11 maths book permutations and combinations
        -rd sharma class 11 maths book binomial theorem
        -rd sharma class 11 maths book sequences and series
        -rd sharma class 11 maths book coordinate geometry
        -rd sharma class 11 maths book straight lines and circles
        -rd sharma class 11 maths book parabola ellipse and hyperbola
        -rd sharma class 11 maths book introduction to three dimensional geometry
        -rd sharma class 11 maths book limits and derivatives
        -rd sharma class 11 maths book mathematical reasoning
        -rd sharma class 11 maths book statistics and probability
        -how to download rd sharma class 11 pdf for free[^1^]
        -where to buy rd sharma class 11 pdf online[^2^]
        -why to use rd sharma class 11 pdf for exam preparation[^3^]
        -what are the benefits of using rd sharma class 11 pdf for learning[^3^]
        -what are the features of using rd sharma class 11 pdf for studying[^3^]
        -what are the best alternatives to using rd sharma class 11 pdf for practice[^3^]

        -

        RD Sharma Mathematics Class 11 is designed in a way that helps the students to learn the concepts and methods of solving problems in an easy and effective way. The book follows a logical sequence of topics and explains each topic in detail with examples and illustrations. The book also provides a large number of practice questions and exercises for each topic to help the students master the concepts and apply them in different situations.

        -

        RD Sharma Mathematics Class 11 is not only useful for the CBSE board exams but also for various competitive exams like JEE Main, NEET, etc. The book covers all the topics that are important for these exams and also provides some tips and tricks to solve them quickly and accurately.

        -

        Why should you download RD Sharma Mathematics Class 11 PDF?

        -

        There are many reasons why you should download RD Sharma Mathematics Class 11 PDF for your exam preparation. Some of them are:

        -
          -
        • RD Sharma Mathematics Class 11 PDF is available for free on various websites and platforms. You can easily download it on your device and access it anytime and anywhere without any hassle.
        • -
        • RD Sharma Mathematics Class 11 PDF is compatible with any device like laptop, tablet, smartphone, etc. You can read it on any screen size and adjust the font size and brightness according to your preference.
        • -
        • RD Sharma Mathematics Class 11 PDF saves you a lot of time and money that you would otherwise spend on buying or renting a hard copy of the book. You can also save paper and contribute to environmental conservation by using a digital copy of the book.
        • -
        • RD Sharma Mathematics Class 11 PDF allows you to highlight, bookmark, annotate, search, etc., on the book as per your convenience. You can also share it with your friends and classmates easily through email or social media.
        • -
        • RD Sharma Mathematics Class 11 PDF helps you to prepare for your exams in a smart way. You can revise the topics quickly by using the summary and revision sections provided in the book. You can also practice the questions and exercises by using the solutions provided in the book or online.
        • -
        -

        How to download RD Sharma Mathematics Class 11 PDF for free?

        -

        If you want to download RD Sharma Mathematics Class 11 PDF for free, then you can follow these simple steps:

        -
          -
        1. Go to any website or platform that offers RD Sharma Mathematics Class 11 PDF for free download. Some of them are:
        2. - -
        3. Select the volume or part of RD Sharma Mathematics Class 11 that you want to download.
        4. -
        5. Click on the download button or link provided on the website or platform.
        6. -
        7. Choose the format (PDF) and location (device) where you want to save the file.
        8. -
        9. Wait for a few seconds or minutes until the file is downloaded completely.
        10. -
        11. Open the file on your device using any PDF reader or browser.
        12. -
        13. Enjoy reading and learning from RD Sharma Mathematics Class 11 PDF.
        14. -
        -

        Features of RD Sharma Mathematics Class 11 PDF

        -

        Covers the latest CBSE syllabus

        -

        One of the main features of RD Sharma Mathematics Class 11 PDF is that it covers the latest syllabus prescribed by the CBSE for class 11th mathematics. The book follows the guidelines and recommendations given by the NCERT (National Council of Educational Research and Training) which is the apex body for curriculum development in India. The book also adheres to the latest exam pattern and marking scheme followed by the CBSE.

        -

        Provides detailed explanations and examples

        -

        Another feature of RD Sharma Mathematics Class 11 PDF is that it provides detailed explanations and examples for each topic covered in the book. The book explains each concept in a simple and lucid language that makes it easy for the students to understand. The book also provides numerous examples that illustrate how to apply the concepts in different situations and scenarios. The examples are solved step by step with proper reasoning and logic that helps the students to learn how to solve problems correctly.

        -

        Includes a large number of practice questions and exercises

        -

        A third feature of RD Sharma Mathematics Class 11 PDF is that it includes a large number of practice questions and exercises for each topic covered in the book. The book provides various types of questions such as objective type, short answer type, long answer type, multiple choice type, etc., that test the knowledge and skills of the students on different aspects of mathematics. The questions are graded according to their difficulty level from easy to hard so that students can practice according to their level of preparation. The exercises are also divided into two categories: solved exercises and unsolved exercises. The solved exercises provide solutions for some selected questions while the unsolved exercises provide questions for self-practice.

        -

        Contains summary and revision sections

        -

        A fourth feature of RD Sharma Mathematics Class 11 PDF is that it contains summary and revision sections at the end of each chapter or topic covered in the main points and formulae of the chapter or topic. The revision section provides some important questions and answers that help the students to revise the chapter or topic quickly and effectively. These sections are very useful for the students who want to refresh their memory and recall the concepts before exams.

        -

        Offers solutions for all the problems

        -

        A fifth feature of RD Sharma Mathematics Class 11 PDF is that it offers solutions for all the problems given in the book. The solutions are provided at the end of each volume or part of the book. The solutions are written in a clear and concise manner that helps the students to understand how to solve the problems step by step. The solutions also provide alternative methods and shortcuts to solve some problems faster and easier. The solutions are very helpful for the students who want to check their answers and learn from their mistakes.

        -

        Contents of RD Sharma Mathematics Class 11 PDF

        -

        Volume I: Chapters 1 to 21

        -

        The first volume of RD Sharma Mathematics Class 11 PDF covers the topics related to algebra, trigonometry, complex numbers, etc. The chapters included in this volume are:

        -

        Sets, Relations, Functions, etc.

        -

        This chapter introduces the basic concepts of sets, relations, functions, etc., that are essential for mathematics. It covers topics such as types of sets, operations on sets, Venn diagrams, types of relations, types of functions, domain and range of functions, inverse functions, composition of functions, etc. It also provides some applications of these concepts in real life situations.

        -

        Trigonometric Functions and Equations

        -

        This chapter deals with the trigonometric functions and equations that are used to study the properties and behavior of angles and triangles. It covers topics such as measurement of angles, trigonometric ratios and identities, graphs of trigonometric functions, values of trigonometric functions at sum or difference of angles, transformation formulae, values of trigonometric functions at multiples and submultiples of angles, sine and cosine formulae and their applications, trigonometric equations, etc. It also provides some applications of these concepts in geometry and physics.

        -

        Complex Numbers and Quadratic Equations

        -

        This chapter explores the complex numbers and quadratic equations that are used to represent and solve some algebraic expressions and equations that cannot be solved by real numbers. It covers topics such as introduction to complex numbers, operations on complex numbers, modulus and argument of complex numbers, polar form and Euler form of complex numbers, roots of complex numbers, quadratic equations and their solutions by factorization, completing the square and quadratic formula methods, nature and discriminant of roots of quadratic equations, etc. It also provides some applications of these concepts in geometry and engineering.

        -

        Permutations, Combinations and Binomial Theorem

        -

        This chapter explains the permutations, combinations and binomial theorem that are used to count and arrange objects in different ways. It covers topics such as fundamental principle of counting, factorial notation, permutations and their properties, combinations and their properties, binomial theorem for positive integral index, binomial theorem for any index, general term in binomial expansion, middle term in binomial expansion, properties of binomial coefficients, applications of binomial theorem in probability, etc. It also provides some applications of these concepts in statistics and cryptography.

        -

        Sequences and Series

        -

        This chapter discusses the sequences and series that are used to represent and analyze patterns and trends in mathematics. It covers topics such as introduction to sequences and series, arithmetic progression (AP) and its general term, sum of n terms of an AP, arithmetic mean (AM) and its properties, geometric progression (GP) and its general term, sum of n terms of a GP, geometric mean (GM) and its properties, some special series such as harmonic progression (HP), arithmetic-geometric progression (AGP), etc. It also provides some applications of these concepts in finance and physics.

        -

        Volume II: Chapters 22 to 33

        -

        The second volume of RD Sharma Mathematics Class 11 PDF covers the topics related to coordinate geometry, calculus, statistics, probability, etc. The chapters included in this volume are:

        -

        Coordinate Geometry

        -

        This chapter introduces the coordinate geometry that is used to study the shapes and positions of geometric figures in a plane or space using coordinates. It covers topics such as brief review of cartesian system of rectangular coordinates, the straight lines and their various forms and properties, the circle and its standard equation and properties, the parabola and its standard equation and properties, the ellipse and its standard equation and properties, the hyperbola and its standard equation and properties, introduction to 3-D coordinate geometry, etc. It also provides some applications of these concepts in engineering and astronomy.

        -

        Limits and Derivatives

        -

        This chapter deals with the limits and derivatives that are used to study the behavior and rate of change of functions. It covers topics such as introduction to limits, algebraic operations on limits, evaluation of limits using standard results, sandwich theorem, introduction to derivatives, derivatives using first principle method, derivatives using rules method, derivatives using chain rule method, derivatives using implicit function method, derivatives using logarithmic differentiation method, etc. It also provides some applications of these concepts in physics and economics.

        -

        Mathematical Reasoning

        -and evaluate logical arguments and statements in mathematics. It covers topics such as introduction to statements, types of statements, negation of statements, compound statements, logical connectives, truth tables, tautologies and contradictions, logical equivalence and implication, contrapositive and converse of statements, quantifiers and their negations, etc. It also provides some applications of these concepts in computer science and cryptography.

        -

        Statistics and Probability

        -

        This chapter discusses the statistics and probability that are used to collect, organize, analyze and interpret data and measure the uncertainty and likelihood of events. It covers topics such as introduction to statistics, measures of central tendency such as mean, median and mode, measures of dispersion such as range, variance and standard deviation, graphical representation of data such as histograms, frequency polygons and ogives, introduction to probability, basic concepts and terms of probability such as sample space, events, outcomes, etc., types of events such as mutually exclusive events, independent events, etc., probability of events using classical definition, empirical definition and axiomatic definition methods, etc. It also provides some applications of these concepts in social science and medicine.

        -

        Conclusion

        -

        In this article, we have given you a comprehensive guide on RD Sharma Mathematics Class 11 PDF download. We have explained what is RD Sharma Mathematics Class 11, why should you download it, how to download it for free, what are its features, what are its contents and some frequently asked questions. We hope that this article has helped you to understand how to use RD Sharma Mathematics Class 11 PDF for your exam preparation.

        -

        RD Sharma Mathematics Class 11 is one of the best books for class 11th mathematics that covers the entire CBSE syllabus and also prepares you for various competitive exams like JEE Main, NEET, etc. It provides detailed explanations and examples for each topic and also includes a large number of practice questions and exercises for each topic. It also contains summary and revision sections for quick revision and solutions for all the problems for self-checking.

        -

        If you want to download RD Sharma Mathematics Class 11 PDF for free, then you can go to any website or platform that offers it for free download. You can also access the solutions for RD Sharma Mathematics Class 11 online or offline. You can read RD Sharma Mathematics Class 11 PDF on any device like laptop, tablet or smartphone and also highlight, bookmark or annotate it as per your convenience.

        -

        RD Sharma Mathematics Class 11 PDF is a valuable resource for your exam preparation that will help you to score well in mathematics. You should read it thoroughly and practice it regularly to master the concepts and methods of solving problems in mathematics. You should also revise it before exams to recall the formulae and concepts quickly and accurately.

        -

        FAQs

        -

        Here are some frequently asked questions about RD Sharma Mathematics Class 11 PDF download:

        -
          -
        1. Q: Is RD Sharma Mathematics Class 11 PDF available for free download?
        2. -
        3. A: Yes, RD Sharma Mathematics Class 11 PDF is available for free download on various websites and platforms. You can go to any website or platform that offers it for free download and follow the steps given in this article to download it on your device.
        4. -
        5. Q: Is RD Sharma Mathematics Class 11 PDF compatible with any device?
        6. -
        7. A: Yes, RD Sharma Mathematics Class 11 PDF is compatible with any device like laptop, tablet or smartphone. You can read it on any screen size and adjust the font size and brightness according to your preference.
        8. -
        9. Q: Is RD Sharma Mathematics Class 11 PDF useful for competitive exams?
        10. -
        11. A: Yes, RD Sharma Mathematics Class 11 PDF is useful for competitive exams like JEE Main, NEET, etc. The book covers all the topics that are important for these exams and also provides some tips and tricks to solve them quickly and accurately.
        12. -
        13. Q: How many volumes or parts are there in RD Sharma Mathematics Class 11 PDF?
        14. -
        15. A: There are two volumes or parts in RD Sharma Mathematics Class 11 PDF: Volume I and Volume II. Volume I covers the topics related to algebra, trigonometry, complex numbers, etc., while Volume II covers the topics related to coordinate geometry, calculus, statistics, probability, etc.
        16. -
        17. Q: How many chapters or topics are there in each volume or part of RD Sharma Mathematics Class 11 PDF?
        18. -
        19. A: There are 21 chapters or topics in Volume I and 12 chapters or topics in Volume II of RD Sharma Mathematics Class 11 PDF. The chapters or topics are listed in this article under the section "Contents of RD Sharma Mathematics Class 11 PDF".
        20. -
        -

        0a6ba089eb
        -
        -
        \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Fifa 2002 Game Free Download PATCHED Full Version.md b/spaces/raedeXanto/academic-chatgpt-beta/Fifa 2002 Game Free Download PATCHED Full Version.md deleted file mode 100644 index 952ed5366fd8f5c06793b7694c1944491499d108..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Fifa 2002 Game Free Download PATCHED Full Version.md +++ /dev/null @@ -1,35 +0,0 @@ -
        -

        Fifa 2002 Game Free Download Full Version: A Classic Football Game for PC

        -

        Fifa 2002 is a football video game released in 2001 by EA Sports. It is the ninth game in the Fifa series and features many improvements and innovations over its predecessors. In this article, we will review some of the features of Fifa 2002 and provide a link to download the full version of the game for free.

        -

        Features of Fifa 2002

        -
          -
        • Power bars for passes were introduced, allowing the player to control the strength and accuracy of their passes.
        • -
        • Dribbling was reduced to make the game more challenging and realistic.
        • -
        • The game included club emblems for many European clubs as well as major Dutch clubs such as PSV, AFC Ajax and Feyenoord.
        • -
        • The game also featured the Swiss Super League for the first time, at the cost of excluding the Greek League.
        • -
        • A card reward system licensed from Panini was also introduced, where the player could unlock a star player card after winning a certain competition.
        • -
        • The game also included a bonus game with the nations that had automatically qualified for the 2002 World Cup (France, Japan and South Korea), where the player could try to improve their FIFA ranking by playing international friendlies.
        • -
        • Many of the international teams in the game were not licensed, and some of them had only numbers as player names.
        • -
        -

        How to Download and Install Fifa 2002

        -

        To download and install Fifa 2002 for free, follow these steps:

        -

        Fifa 2002 Game Free Download Full Version


        DOWNLOAD ✸✸✸ https://tinourl.com/2uL12J



        -
          -
        1. Click on this link to download the game: FIFA_Football_2002_Win_Preinstalled_EN.zip
        2. -
        3. Extract the game using Winrar or 7zip.
        4. -
        5. Open the "FIFA Football 2002" folder and then the "Game" folder.
        6. -
        7. Double click on "RegSetup" to complete the registry.
        8. -
        9. Double click on "fifa2002" icon to play the game.
        10. -
        11. Enjoy!
        12. -
        -

        Note: This is an old game that may not run properly on newer operating systems. You may need to use compatibility mode or a virtual machine to run it. Also, this is an unofficial download link that may contain viruses or malware. Download at your own risk.

        - -

        Gameplay of Fifa 2002

        -

        Fifa 2002 is a football simulation game that aims to recreate the experience of playing in the 2002 World Cup. The game features 32 national teams that qualified for the tournament, as well as 9 teams that did not qualify but were included as unlockable teams. The game also features 20 official stadiums from Japan and South Korea, as well as realistic weather effects and crowd noises.

        -

        The gameplay of Fifa 2002 is noticeably slower and more deliberate than previous games in the series. Players behave more like human beings, taking time to control the ball, accelerate, and lose stamina. The game also introduces power bars for passes and shots, allowing the player to adjust the strength and accuracy of their actions. The game also features a new free kick system, where the player can curve the ball with the right analog stick.

        -

        The game has several modes of play, such as World Cup, Friendly, Custom League, and Custom Tournament. The World Cup mode allows the player to choose a team and play through the qualifying rounds and the final tournament. The Friendly mode allows the player to play a single match against any team of their choice. The Custom League and Custom Tournament modes allow the player to create their own competitions with their own rules and teams.

        -

        Reception of Fifa 2002

        -

        Fifa 2002 received generally positive reviews from critics and fans alike. The game was praised for its improved graphics, sound, and gameplay, as well as its variety of modes and teams. The game was also commended for its realistic representation of the 2002 World Cup atmosphere and excitement. Some of the criticisms of the game were its lack of online multiplayer, its occasional glitches and bugs, and its difficulty level.

        -

        The game was a commercial success, selling over 4 million copies worldwide. It was also nominated for several awards, such as Best Sports Game at the BAFTA Games Awards and Best Console Sports Game at the Interactive Achievement Awards. The game is considered by many to be one of the best football games ever made, and a classic in the Fifa series.

        81aa517590
        -
        -
        \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Guided Reading Activity 7 2 Answers Us History Rar Discover the Secrets of History with Flashcards and Quizzes.md b/spaces/raedeXanto/academic-chatgpt-beta/Guided Reading Activity 7 2 Answers Us History Rar Discover the Secrets of History with Flashcards and Quizzes.md deleted file mode 100644 index b308d413deafbd8e5eae34404b5e5132db150571..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Guided Reading Activity 7 2 Answers Us History Rar Discover the Secrets of History with Flashcards and Quizzes.md +++ /dev/null @@ -1,151 +0,0 @@ - -

        Guided Reading Activity 7-2 Answers US History Rar

        -

        If you are a student of US history, you might have encountered guided reading activity 7-2 in your textbook or workbook. This activity covers some of the key events and issues that shaped the formation of the United States Constitution in the late 18th century. In this article, we will provide you with some background information, tips, and answers to help you complete this activity and learn more about US history.

        -

        Introduction

        -

        What is guided reading activity 7-2?

        -

        Guided reading activity 7-2 is a set of questions that accompany a section of text from a US history textbook or workbook. The text is usually divided into three parts, each focusing on a different aspect of the topic. The questions are designed to test your comprehension, analysis, and critical thinking skills. You can find guided reading activity 7-2 in various sources, such as Quizlet, Quizlet, or Quizlet.

        -

        Guided Reading Activity 7 2 Answers Us History Rar


        Download ––– https://tinourl.com/2uL2ys



        -

        Why is it important to study US history?

        -

        Studying US history is important for several reasons. First, it helps you understand how the United States came to be what it is today, and how its political, social, and economic systems work. Second, it helps you appreciate the diversity and complexity of the American people, culture, and values. Third, it helps you develop a sense of citizenship and civic responsibility, as well as a respect for democracy and human rights. Fourth, it helps you learn from the past successes and failures of the nation, and apply them to current and future challenges.

        -

        How to find the answers to guided reading activity 7-2?

        -

        To find the answers to guided reading activity 7-2, you need to do two things: read and research. First, you need to read the text carefully and attentively, paying attention to the main ideas, facts, and details. Second, you need to research additional information from reliable sources, such as books, websites, or videos. You can use online tools such as Bing or Wikipedia to help you with your research. However, you should not copy and paste from other sources without giving proper credit. You should also check your answers with your teacher or classmates to make sure they are correct and complete.

        -

        Main Body

        -

        Section 1: The Articles of Confederation and the Constitutional Convention

        -

        Subsection 1: The problems of the Articles of Confederation

        -

        The Articles of Confederation were the first constitution of the United States, adopted by the Continental Congress in 1777 and ratified by all 13 states by 1781. They created a loose confederation of sovereign states with a weak central government. However, they soon proved to be inadequate for governing a large and diverse nation. Some of the problems of the Articles of Confederation were:

        -
          -
        • Lack of executive and judicial branches: The central government had no president or courts to enforce laws or settle disputes.
        • -
        • Lack of power to tax or regulate trade: The central government had no authority to raise revenue or control commerce among states or with foreign nations.
        • -
        • Lack of national currency or army: The central government had no power to coin money or maintain a standing army.
        • -
        • Lack of unity and cooperation: The states often acted independently or even against each other's interests.
        • -
        • Lack of flexibility and stability: The central government needed unanimous consent from all states to amend or change any part of the Articles.
        • -
        -

        Subsection 2: The achievements of the Articles of Confederation

        -

        Despite their flaws, the Articles of Confederation also had some achievements that contributed to the development of the United States. Some of these achievements were:

        -

        Guided Reading Activity 7-2 Flashcards | Quizlet
        -Guided Reading Activity 7.2 Flashcards | Quizlet
        -Guided Reading Activity 7-2 US History Depression
        -Guided Reading Activity 7.2 US History Modernism
        -Guided Reading Activity 7-2 Shays's Rebellion
        -Guided Reading Activity 7-2 Great Compromise
        -Guided Reading Activity 7-2 Edward Hopper
        -Guided Reading Activity 7-2 F. Scott Fitzgerald
        -Guided Reading Activity 7-2 The Jazz Singer
        -Guided Reading Activity 7-2 T.S Eliot
        -Guided Reading Activity 7-2 Eugene O'Neil
        -Guided Reading Activity 7-2 John Dos Passos
        -Guided Reading Activity 7-2 Amy Lowell
        -Guided Reading Activity 7-2 Edna St. Vincent Millay
        -Guided Reading Activity 7-2 Ezra Pound
        -Guided Reading Activity 7-2 William Carlos Williams
        -Guided Reading Activity 7.2 Bohemian Artists
        -Guided Reading Activity 7.2 Manhattan Greenwich Village
        -Guided Reading Activity 7.2 Chicago's South Side
        -Guided Reading Activity 7.2 Carl Sandberg
        -Guided Reading Activity 7.2 Radio and Motion Pictures
        -Guided Reading Activity 7.2 Baseball and Boxing
        -Guided Reading Activity 7.2 Warren G. Harding Election
        -Guided Reading Activity 7.2 Quakers and Slavery
        -Guided Reading Activity 7.2 James Madison and Alexander Hamilton
        -Guided Reading Activity 7.2 Constitutional Convention Philadelphia
        -Guided Reading Activity 7.2 Virginia Plan and Court System
        -Guided Reading Activity 7.2 Roger Sherman and Senate
        -Guided Reading Activity 7.2 Slave Trade and Southern States
        -Guided Reading Activity 7.2 Elbridge Gerry and Bill of Rights
        -US History Chapter 7 Section 2 Flashcards | Quizlet
        -US History Chapter 7 Section 2 Modernism and Disenchantment
        -US History Chapter 7 Section 2 Hollow Men and Empty Dreams
        -US History Chapter 7 Section 2 The Great Gatsby and Superficiality
        -US History Chapter 7 Section 2 U.S.A Trilogy and Innovation
        -US History Chapter 7 Section 2 Leisure Time and Spending Money
        -US History Chapter 7 Section 3 Flashcards | Quizlet[^1^]

        -
          -
        • Winning independence from Britain: The Articles provided a framework for conducting diplomacy and negotiating peace with Britain after the Revolutionary War.
        • -
        • Establishing territorial expansion: The Articles facilitated the acquisition and organization of new lands west of the Appalachian Mountains through laws such as the Land Ordinance of 1785 and the Northwest Ordinance of 1787.
        • -
        • Promoting democracy and republicanism: The Articles reflected the ideals of self-government and popular sovereignty that inspired the American Revolution.
        • -
        • Fostering state sovereignty and experimentation: The Articles allowed each state to retain its own rights and powers, as well as experiment with different forms of government and policies.
        • -
        • Laying the foundation for a new constitution: The Articles revealed the weaknesses and strengths of confederalism, which influenced the drafting of a new constitution that balanced federalism with state rights.
        • -
        -

        Subsection 3: The events that led to the Constitutional Convention

        -

        The problems of the Articles of Confederation became more evident and urgent in the mid-1780s. Several events highlighted the need for a stronger national government that could address the economic, political, and social challenges facing the nation. Some of these events were:

        -
          -
        • The Annapolis Convention (1786): A meeting of delegates from five states to discuss trade issues and propose amendments to the Articles. However, the convention failed to attract enough attendance or support to make any significant changes.
        • -
        • The Shays' Rebellion (1786-1787): A violent uprising of farmers in western Massachusetts who protested against high taxes, foreclosures, and debt imprisonment. The rebellion exposed the inability of the central government to protect public order and security.
        • -
        • The Philadelphia Convention (1787): A meeting of delegates from 12 states (except Rhode Island) to revise the Articles. However, the convention decided to abandon the Articles altogether and draft a new constitution that created a federal system of government with three branches: legislative, executive, and judicial.
        • -
        -

        Section 2: The Virginia Plan and the Great Compromise

        -

        Subsection 1: The features of the Virginia Plan

        -

        The Virginia Plan was a proposal for a new constitution that was presented by James Madison at the Philadelphia Convention. It was based on the idea that the national government should have more power than the state governments, and that the representation in the legislature should be based on the population or wealth of each state. Some of the features of the Virginia Plan were:

        -
          -
        • A bicameral legislature: The plan called for two houses of Congress: a lower house elected by the people, and an upper house chosen by the lower house from nominees submitted by the state legislatures.
        • -
        • A proportional representation: The plan proposed that the number of seats in both houses of Congress should be determined by the population or wealth of each state, giving more influence to the large and wealthy states.
        • -
        • A strong executive: The plan suggested that a single person or a council should be elected by the national legislature to serve as the chief executive of the nation, with broad powers to administer laws and appoint officials.
        • -
        • A national judiciary: The plan proposed that a supreme court and inferior courts should be established by the national legislature, with judges serving for life and having jurisdiction over national and state laws.
        • -
        • A supremacy clause: The plan declared that the national constitution and laws should be the supreme law of the land, binding on all states and citizens.
        • -
        -

        Subsection 2: The objections of the small states to the Virginia Plan

        -

        The Virginia Plan was supported by the large and populous states, such as Virginia, Pennsylvania, and Massachusetts. However, it faced strong opposition from the small and less populous states, such as New Jersey, Delaware, and Connecticut. Some of the objections of the small states to the Virginia Plan were:

        -
          -
        • A loss of sovereignty: The small states feared that the Virginia Plan would reduce their power and autonomy in the national government, and make them dependent on the will of the large states.
        • -
        • A violation of equality: The small states argued that the Virginia Plan was unfair and undemocratic, because it gave more representation and influence to the large states based on their population or wealth, rather than treating all states equally as sovereign entities.
        • -
        • A threat of tyranny: The small states warned that the Virginia Plan would create a powerful and centralized national government that could abuse its authority and oppress the rights and interests of the states and the people.
        • -
        -

        Subsection 3: The solution of the Great Compromise

        -

        The conflict between the large and small states over representation threatened to deadlock the Constitutional Convention. To resolve this problem, Roger Sherman of Connecticut proposed an agreement that came to be known as the Great Compromise or the Connecticut Compromise. Some of the features of the Great Compromise were:

        -
          -
        • A bicameral legislature with mixed representation: The compromise retained the idea of two houses of Congress, but changed the basis of representation in each house. In the lower house or the House of Representatives, representation would be proportional to the population of each state, as in the Virginia Plan. In the upper house or the Senate, representation would be equal for all states, with each state having two senators, as in the New Jersey Plan.
        • -
        • A compromise on taxation and slavery: The compromise also settled a dispute over how to count enslaved persons for purposes of representation and taxation. The Southern states wanted to count enslaved persons as part of their population to increase their representation in Congress, but not for taxation. The Northern states wanted to count enslaved persons for taxation, but not for representation. The compromise agreed that each enslaved person would count as three-fifths of a free person for both representation and taxation.
        • -
        • A balance of power between federal and state governments: The compromise aimed to create a federal system of government that divided power between a national government and state governments. The national government would have delegated powers that were enumerated or listed in the Constitution, such as regulating interstate and foreign commerce, coining money, declaring war, and making treaties. The state governments would have reserved powers that were not granted to the national government or prohibited to them by the Constitution, such as regulating intrastate commerce, establishing local governments, and providing for public health and safety. The national and state governments would share concurrent powers that both could exercise, such as levying taxes, borrowing money, and enforcing laws.
        • -
        -

        Section 3: The Slave Trade and the Bill of Rights

        -

        Subsection 1: The controversy over slavery and the slave trade

        -

        Slavery was one of the most divisive and contentious issues at the Constitutional Convention. The Southern states depended on slavery for their economic and social stability, and wanted to protect their institution and interests. The Northern states were more opposed to slavery on moral and political grounds, and wanted to limit its expansion and influence. The controversy over slavery and the slave trade centered on three main questions:

        -
          -
        • Should enslaved persons be counted for representation in Congress?
        • -
        • Should Congress have power to regulate or abolish the slave trade?
        • -
        • Should Congress have power to interfere with slavery in existing or new states?
        • -
        -

        Subsection 2: The compromise over slavery and the slave trade

        -

        To avoid a breakdown of the Constitutional Convention over slavery and the slave trade, the delegates reached several compromises that satisfied both sides for the time being. Some of the compromises over slavery and the slave trade were:

        -
          -
        • The Three-Fifths Compromise: As mentioned earlier, this compromise agreed that each enslaved person would count as three-fifths of a free person for purposes of representation and taxation.
        • -
        • The Slave Trade Clause: This compromise allowed Congress to regulate or prohibit the importation of new enslaved persons after 1808, but not before. It also prohibited any tax or duty on such importation exceeding ten dollars per person.
        • -
        • The Fugitive Slave Clause: This compromise required that any person held to service or labor in one state who escaped to another state should be delivered up on claim of the party to whom such service or labor was due.
        • -
        -

        Subsection 3: The demand for a Bill of Rights and its ratification

        -

        The Constitution was signed by 39 delegates on September 17, 1787, but it still needed to be ratified by nine out of the 13 states before it could become effective. The ratification process sparked a fierce debate between two groups: the Federalists, who supported the Constitution, and the Anti-Federalists, who opposed it. One of the main arguments of the Anti-Federalists was that the Constitution lacked a Bill of Rights, a list of guarantees for individual rights and liberties against potential abuses by the national government. Some examples of rights that they wanted to protect were freedom of speech, religion, press, assembly, and petition; right to bear arms; right to trial by jury; right to due process; and right to privacy. The Federalists argued that a Bill of Rights was unnecessary because:

        -
          -
        • The Constitution itself was a bill of rights that limited the powers of the national government and protected state rights.
        • -
        • The people had natural rights that could not be taken away by any government.
        • -
        • The enumeration of certain rights might imply that those were the only rights that people had.
        • -
        • The addition of a bill of rights might require another constitutional convention that could endanger the whole ratification process.
        • -
        -

        However, some Federalists also recognized that a bill of rights might be needed to secure public support and ensure ratification. Therefore, they promised that if the Constitution was ratified without amendments, they would propose amendments adding a bill of rights in the first session of Congress. This promise persuaded some Anti-Federalists to change their minds and vote for ratification. By June 1788, nine states had ratified the Constitution, making it effective among those states. However, some important states such as New York and Virginia still remained undecided. To win them over, James Madison wrote a series of essays called The Federalist Papers, explaining and defending various aspects of the Constitution. He also pledged to introduce a bill of rights as soon as he was elected to Congress from Virginia. With his efforts and those of other Federalists such as Alexander Hamilton and John Jay, New York and Virginia ratified the Constitution by narrow margins in July 1788. North Carolina followed in November 1789, after Congress had proposed 12 amendments for ratification by three-fourths (or nine) of the states. The last state to ratify was Rhode Island in May 1790.

        -

        As promised, James Madison introduced a bill of rights in the first session of Congress in June 1789. He based his proposal on the state declarations of rights, especially those of Virginia and Massachusetts, and on the recommendations of state ratifying conventions. He also consulted Thomas Jefferson, who was serving as the U.S. minister to France, and who sent him a list of suggestions. After a lengthy debate in both the House of Representatives and the Senate, Congress approved 12 amendments in September 1789 and sent them to the states for ratification. By December 1791, 10 amendments had been ratified by the required number of states and became known as the Bill of Rights. The Bill of Rights added specific guarantees of personal freedoms and rights, clear limitations on the government's power in judicial and other proceedings, and explicit declarations that all powers not specifically granted to Congress by the Constitution are reserved for the states or the people.

        -

        Conclusion

        -

        Summary of the main points

        -

        In this article, we have discussed guided reading activity 7-2 answers US history rar, a set of questions that accompany a section of text from a US history textbook or workbook. The text covers some of the key events and issues that shaped the formation of the United States Constitution in the late 18th century. We have provided you with some background information, tips, and answers to help you complete this activity and learn more about US history. We have organized our article into three main sections:

        -
          -
        • Section 1: The Articles of Confederation and the Constitutional Convention. We have explained the problems and achievements of the Articles of Confederation, and the events that led to the Constitutional Convention.
        • -
        • Section 2: The Virginia Plan and the Great Compromise. We have described the features of the Virginia Plan, the objections of the small states to it, and the solution of the Great Compromise.
        • -
        • Section 3: The Slave Trade and the Bill of Rights. We have discussed the controversy over slavery and the slave trade, the compromise over them, and the demand for a bill of rights and its ratification.
        • -
        -

        Relevance of guided reading activity 7-2 to today's world

        -

        Guided reading activity 7-2 is relevant to today's world because it helps us understand how the United States Constitution was created and what it means for us as citizens. The Constitution is not only a historical document, but also a living one that guides our government, protects our rights, and shapes our society. By studying how the Constitution was drafted and ratified, we can appreciate the challenges and compromises that were involved in its making, as well as the vision and values that inspired its framers. By knowing how the Constitution has been amended over time, we can recognize how it has adapted to changing circumstances and needs, as well as how it has preserved its core principles. By learning how to interpret and apply the Constitution to various situations and controversies, we can participate in the ongoing dialogue and debate about its meaning and application.

        -

        FAQs

        -

        Here are some frequently asked questions and answers about guided reading activity 7-2 answers US history rar:

        -
          -
        1. Q: What is the difference between the Articles of Confederation and the Constitution?
          -A: The Articles of Confederation were the first constitution of the United States, adopted in 1777 and ratified in 1781. They created a loose confederation of sovereign states with a weak central government. The Constitution was the second and current constitution of the United States, drafted in 1787 and ratified in 1788. It created a federal system of government that divided power between a national government and state governments, with three branches: legislative, executive, and judicial.
        2. -
        3. Q: What is the difference between the Virginia Plan and the New Jersey Plan?
          -A: The Virginia Plan was a proposal for a new constitution that was presented by James Madison at the Philadelphia Convention. It was based on the idea that the national government should have more power than the state governments, and that the representation in the legislature should be based on the population or wealth of each state. The New Jersey Plan was a proposal for a new constitution that was presented by William Paterson at the Philadelphia Convention. It was based on the idea that the national government should have limited power over the state governments, and that the representation in the legislature should be equal for all states.
        4. -
        5. Q: What is the difference between federalism and confederalism?
          -A: Federalism is a system of government that divides power between a central or national government and regional or state governments. Each level of government has some areas of authority that are exclusive to it, and some areas of authority that are shared with other levels of government. Confederalism is a system of government that consists of a league or alliance of independent states that retain their sovereignty and delegate some powers to a common body. The central or confederal government has only those powers that are granted to it by the states, and can be revoked by them at any time.
        6. -
        7. Q: What is the difference between an amendment and an article?
          -A: An amendment is a change or addition to an existing document, such as a constitution or a law. An article is a section or part of a document, such as a constitution or a law, that deals with a specific topic or issue.
        8. -
        9. Q: What is the difference between ratification and approval?
          -A: Ratification is the formal process of confirming or validating an agreement or document by giving consent or approval. Approval is the act of agreeing to or accepting something as satisfactory or suitable.
        10. -
        -

        0a6ba089eb
        -
        -
        \ No newline at end of file diff --git a/spaces/rajesh1729/mercury-jupyternotebooks/README.md b/spaces/rajesh1729/mercury-jupyternotebooks/README.md deleted file mode 100644 index 79ebfb4604c579acaad218f9995b39d72d5888bb..0000000000000000000000000000000000000000 --- a/spaces/rajesh1729/mercury-jupyternotebooks/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Mercury Jupyternotebooks -emoji: 🐢 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.8 -app_file: app.py -pinned: false -license: afl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/rbarman/Openvino_Text_Detection/app.py b/spaces/rbarman/Openvino_Text_Detection/app.py deleted file mode 100644 index b2453fae92a2b0b3df7d2e0f8595d318c6834a21..0000000000000000000000000000000000000000 --- a/spaces/rbarman/Openvino_Text_Detection/app.py +++ /dev/null @@ -1,119 +0,0 @@ -import cv2 -import matplotlib.pyplot as plt -import numpy as np -from openvino.runtime import Core -import gradio as gr - -##### -#Load pretrained model -##### -ie = Core() -model = ie.read_model(model="model/horizontal-text-detection-0001.xml") -compiled_model = ie.compile_model(model=model, device_name="CPU") -input_layer_ir = compiled_model.input(0) -output_layer_ir = compiled_model.output("boxes") - -##### -#Inference -##### -def predict(img: np.ndarray, threshold) -> str: - # input: numpy array of image in RGB (see defaults for https://www.gradio.app/docs/#image) - - # Text detection models expect an image in BGR format. - image = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) - # N,C,H,W = batch size, number of channels, height, width. - N, C, H, W = input_layer_ir.shape - # Resize the image to meet network expected input sizes. - resized_image = cv2.resize(image, (W, H)) - # Reshape to the network input shape. - input_image = np.expand_dims(resized_image.transpose(2, 0, 1), 0) - - - # Create an inference request. - boxes = compiled_model([input_image])[output_layer_ir] - # Remove zero only boxes. - boxes = boxes[~np.all(boxes == 0, axis=1)] - print(f'detected {len(boxes)} things') - result = convert_result_to_image(image, resized_image, boxes, threshold=threshold, conf_labels=False) - - #plt.figure(figsize=(10, 6)) - #plt.axis("off") - #plt.imshow(result) - #print(f'result is: {type(result)}') - #print(result.shape) - #print(result) - - result_fp = 'temp_result.jpg' - cv2.imwrite(result_fp, cv2.cvtColor(result, cv2.COLOR_BGR2RGB)) - return result_fp - - -# For each detection, the description is in the [x_min, y_min, x_max, y_max, conf] format: -# The image passed here is in BGR format with changed width and height. To display it in colors expected by matplotlib, use cvtColor function -def convert_result_to_image(bgr_image, resized_image, boxes, threshold=0.3, conf_labels=True): - # Define colors for boxes and descriptions. - colors = {"red": (255, 0, 0), "green": (0, 255, 0)} - - # Fetch the image shapes to calculate a ratio. - (real_y, real_x), (resized_y, resized_x) = bgr_image.shape[:2], resized_image.shape[:2] - ratio_x, ratio_y = real_x / resized_x, real_y / resized_y - - # Convert the base image from BGR to RGB format. - rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB) - - # Iterate through non-zero boxes. - for box in boxes: - # Pick a confidence factor from the last place in an array. - conf = box[-1] - if conf > threshold: - # Convert float to int and multiply corner position of each box by x and y ratio. - # If the bounding box is found at the top of the image, - # position the upper box bar little lower to make it visible on the image. - (x_min, y_min, x_max, y_max) = [ - int(max(corner_position * ratio_y, 10)) if idx % 2 - else int(corner_position * ratio_x) - for idx, corner_position in enumerate(box[:-1]) - ] - - # Draw a box based on the position, parameters in rectangle function are: image, start_point, end_point, color, thickness. - rgb_image = cv2.rectangle(rgb_image, (x_min, y_min), (x_max, y_max), colors["green"], 3) - - # Add text to the image based on position and confidence. - # Parameters in text function are: image, text, bottom-left_corner_textfield, font, font_scale, color, thickness, line_type. - if conf_labels: - rgb_image = cv2.putText( - rgb_image, - f"{conf:.2f}", - (x_min, y_min - 10), - cv2.FONT_HERSHEY_SIMPLEX, - 0.8, - colors["red"], - 1, - cv2.LINE_AA, - ) - - return rgb_image - -##### -#Gradio Setup -##### - -title = "Text Detection" -description = "Text Detection with OpenVino model" -examples = ['test.jpg'] -interpretation='default' -enable_queue=True - -gr.Interface( - fn=predict, - inputs=[ - gr.inputs.Image(), - gr.Slider(minimum=0, maximum=1, value=.3) - ], - outputs=gr.outputs.Image(type='filepath'), - title=title, - description=description, - #examples=examples, - interpretation=interpretation, - enable_queue=enable_queue - ).launch() \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Black Code Facebook Password Stealer Download 2021.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Black Code Facebook Password Stealer Download 2021.md deleted file mode 100644 index d255344f0022d6e0629cb702ea9742776e735cd3..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Black Code Facebook Password Stealer Download 2021.md +++ /dev/null @@ -1,9 +0,0 @@ - -

        facebook offers a type of two-factor authentication that uses the user’s mobile phone to add an extra layer of security to their accounts. the feature, called app-only login, requires users to log in with a phone number or a one-time-use code sent via sms text message. without that extra step, attackers can use the same passwords and security questions to quickly login to multiple accounts.

        -

        the new method makes social media much more accessible than it was before, and the convenience of app-only login makes it tempting for users to open attachments that purport to be installers for apps they’ve been told they need to download. the problem is that the documents are malicious and have the potential to steal or crack passwords to log in to the user’s accounts.

        -

        Black Code Facebook Password Stealer Download


        Downloadhttps://urlgoal.com/2uCKlo



        -

        if someone can get the password to your facebook account, they can create a facebook profile in your name. then, they can log into your account whenever they want, without any more verification than a facebook password. they can also change your profile picture to look like your own, and they can use your profile to make any type of online purchase.

        -

        that’s why we believe that a facebook password stealer app is so dangerous. we’ve already seen so many fake facebook password stealer apps in the google play store, and most of them are free to download. this is an indication that attackers have figured out a way to create a legitimate-looking app that steals your facebook passwords.

        -

        because these apps are very convincing, they can get past security checks and pass through google play’s detection system. when a suspicious user downloads one of these apps, facebook will detect that the device has been used to log into a facebook account and block the app.

        899543212b
        -
        -
        \ No newline at end of file diff --git "a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Emulador De PSX Para Pc Windows 10 32 Y\302\24064bits.md" "b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Emulador De PSX Para Pc Windows 10 32 Y\302\24064bits.md" deleted file mode 100644 index 1439462d03db5c16ad4d06ad4fe4de30188ca782..0000000000000000000000000000000000000000 --- "a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Emulador De PSX Para Pc Windows 10 32 Y\302\24064bits.md" +++ /dev/null @@ -1,6 +0,0 @@ -

        Emulador De PSX Para Pc Windows 10 32 y 64bits


        DOWNLOAD ►►► https://urlgoal.com/2uCMtd



        -
        - 4d29de3e1b
        -
        -
        -

        diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Giorgia Colombo Nuda.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Giorgia Colombo Nuda.md deleted file mode 100644 index d8aa40054d5d9020e1b5909af7f1dcfd47c2cecd..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Giorgia Colombo Nuda.md +++ /dev/null @@ -1,6 +0,0 @@ -

        giorgia colombo nuda


        Download File >>> https://urlgoal.com/2uCN6v



        - - d5da3c52bf
        -
        -
        -

        diff --git a/spaces/renumics/cifar100-enriched/Dockerfile b/spaces/renumics/cifar100-enriched/Dockerfile deleted file mode 100644 index 908bcdb00346b0d11ad9ae6b6d4bdbe215dec89c..0000000000000000000000000000000000000000 --- a/spaces/renumics/cifar100-enriched/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -FROM python:3.9 - -WORKDIR /code -ENV HOME=/code -# COPY ./requirements.txt /code/requirements.txt -# RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -RUN apt install curl -RUN pip install pip -U - -RUN pip install pydantic==1.10.8 renumics-spotlight==1.2.0rc2 - -RUN pip install datasets - -COPY . . -RUN mkdir -p /code/.cache -RUN chmod -R 777 /code -RUN python prepare.py -CMD ["python", "run.py"] diff --git a/spaces/rgres/Seg2Sat/static/_app/immutable/error.svelte-d9523301.js b/spaces/rgres/Seg2Sat/static/_app/immutable/error.svelte-d9523301.js deleted file mode 100644 index 1c200845989d5bbde3173a928c2ca48d13743a81..0000000000000000000000000000000000000000 --- a/spaces/rgres/Seg2Sat/static/_app/immutable/error.svelte-d9523301.js +++ /dev/null @@ -1 +0,0 @@ -import{S as w,i as y,s as z,e as E,t as v,c as d,a as b,h as P,d as o,g as u,J as R,j as N,k as S,l as C,m as j,E as H}from"./chunks/index-bcf2726a.js";function J(r){let l,t=r[1].frame+"",a;return{c(){l=E("pre"),a=v(t)},l(f){l=d(f,"PRE",{});var s=b(l);a=P(s,t),s.forEach(o)},m(f,s){u(f,l,s),R(l,a)},p(f,s){s&2&&t!==(t=f[1].frame+"")&&N(a,t)},d(f){f&&o(l)}}}function h(r){let l,t=r[1].stack+"",a;return{c(){l=E("pre"),a=v(t)},l(f){l=d(f,"PRE",{});var s=b(l);a=P(s,t),s.forEach(o)},m(f,s){u(f,l,s),R(l,a)},p(f,s){s&2&&t!==(t=f[1].stack+"")&&N(a,t)},d(f){f&&o(l)}}}function A(r){let l,t,a,f,s=r[1].message+"",c,k,n,p,i=r[1].frame&&J(r),_=r[1].stack&&h(r);return{c(){l=E("h1"),t=v(r[0]),a=S(),f=E("pre"),c=v(s),k=S(),i&&i.c(),n=S(),_&&_.c(),p=C()},l(e){l=d(e,"H1",{});var m=b(l);t=P(m,r[0]),m.forEach(o),a=j(e),f=d(e,"PRE",{});var q=b(f);c=P(q,s),q.forEach(o),k=j(e),i&&i.l(e),n=j(e),_&&_.l(e),p=C()},m(e,m){u(e,l,m),R(l,t),u(e,a,m),u(e,f,m),R(f,c),u(e,k,m),i&&i.m(e,m),u(e,n,m),_&&_.m(e,m),u(e,p,m)},p(e,[m]){m&1&&N(t,e[0]),m&2&&s!==(s=e[1].message+"")&&N(c,s),e[1].frame?i?i.p(e,m):(i=J(e),i.c(),i.m(n.parentNode,n)):i&&(i.d(1),i=null),e[1].stack?_?_.p(e,m):(_=h(e),_.c(),_.m(p.parentNode,p)):_&&(_.d(1),_=null)},i:H,o:H,d(e){e&&o(l),e&&o(a),e&&o(f),e&&o(k),i&&i.d(e),e&&o(n),_&&_.d(e),e&&o(p)}}}function F({error:r,status:l}){return{props:{error:r,status:l}}}function B(r,l,t){let{status:a}=l,{error:f}=l;return r.$$set=s=>{"status"in s&&t(0,a=s.status),"error"in s&&t(1,f=s.error)},[a,f]}class G extends w{constructor(l){super(),y(this,l,B,A,z,{status:0,error:1})}}export{G as default,F as load}; diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/core/hook/memory_profiler_hook.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/core/hook/memory_profiler_hook.py deleted file mode 100644 index a473061b566f92f4bee6280ec33875e2c50a51dd..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/core/hook/memory_profiler_hook.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmcv.runner.hooks import HOOKS, Hook - - -@HOOKS.register_module() -class MemoryProfilerHook(Hook): - """Memory profiler hook recording memory information including virtual - memory, swap memory, and the memory of the current process. - - Args: - interval (int): Checking interval (every k iterations). - Default: 50. - """ - - def __init__(self, interval=50): - try: - from psutil import swap_memory, virtual_memory - self._swap_memory = swap_memory - self._virtual_memory = virtual_memory - except ImportError: - raise ImportError('psutil is not installed, please install it by: ' - 'pip install psutil') - - try: - from memory_profiler import memory_usage - self._memory_usage = memory_usage - except ImportError: - raise ImportError( - 'memory_profiler is not installed, please install it by: ' - 'pip install memory_profiler') - - self.interval = interval - - def after_iter(self, runner): - if self.every_n_iters(runner, self.interval): - # in Byte - virtual_memory = self._virtual_memory() - swap_memory = self._swap_memory() - # in MB - process_memory = self._memory_usage()[0] - factor = 1024 * 1024 - runner.logger.info( - 'Memory information ' - 'available_memory: ' - f'{round(virtual_memory.available / factor)} MB, ' - 'used_memory: ' - f'{round(virtual_memory.used / factor)} MB, ' - f'memory_utilization: {virtual_memory.percent} %, ' - 'available_swap_memory: ' - f'{round((swap_memory.total - swap_memory.used) / factor)}' - ' MB, ' - f'used_swap_memory: {round(swap_memory.used / factor)} MB, ' - f'swap_memory_utilization: {swap_memory.percent} %, ' - 'current_process_memory: ' - f'{round(process_memory)} MB') diff --git a/spaces/rorallitri/biomedical-language-models/logs/Adobe Photoshop Cs6 Extended Serial Number Free.md b/spaces/rorallitri/biomedical-language-models/logs/Adobe Photoshop Cs6 Extended Serial Number Free.md deleted file mode 100644 index c7bda517550b2c4ce9230cb86bd813c4240c530b..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Adobe Photoshop Cs6 Extended Serial Number Free.md +++ /dev/null @@ -1,6 +0,0 @@ -

        adobe photoshop cs6 extended serial number free


        Download ---> https://tinurll.com/2uzncx



        - -024x768 (1280x800 recommended) resolution display with 16-bit color and 256MB (512MB recommended, required for Photoshop Extended) of ... 1fdad05405
        -
        -
        -

        diff --git a/spaces/rorallitri/biomedical-language-models/logs/Download UPDATED Odnoklassniki Ru Ok Hack.md b/spaces/rorallitri/biomedical-language-models/logs/Download UPDATED Odnoklassniki Ru Ok Hack.md deleted file mode 100644 index a37afa14615f250317a92d11c2436f5d42547015..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Download UPDATED Odnoklassniki Ru Ok Hack.md +++ /dev/null @@ -1,20 +0,0 @@ -
        -

        When the user downloads the Apk file from ApkSoul.net, we will check the relevant APK file on Google Play and let the user download it directly. The games and applications uploaded to our website are safe and harmless to users.

        -

        OK (ru.ok.android) is a premium app on Android, download the latest version of OK Hack Mod (Premium Unlocked/VIP/PRO) 2022 for Android. This premium app can be played for free and does not require root.

        -

        download odnoklassniki ru ok hack


        Download Ziphttps://tinurll.com/2uzmct



        -

        OK MOD APK (Premium Unlocked/VIP/PRO) APK + OBB 2022 can be downloaded and installed on your android device with android version 4.1 or higher. Download this premium app using your favorite browser and click install to install the premium app. Downloading (ru.ok.android) APK + DATA of OK (Premium Unlocked/VIP/PRO) from ApkSoul.net is easier and faster.

        -

        Recently, we have seen an increasing number of reports from iPhone users about their calendars filling up with junk events. These events are most often either pornographic in nature, or claim that the device has been infected or hacked, and in all cases they contain malicious links. This phenomenon is known as "calendar spam."

        -

        Whether you do or don't subscribe to the calendar, the page will go back to the fake captcha. Tapping the captcha a second time, and clicking either OK or Cancel, will result in your browser being redirected to a scam page claiming your iPhone is infected or that hackers are watching you.

        -

        If you have been impacted, your iPhone has fortunately not actually been hacked or infected (regardless of what the messages claim), and there is a simple solution. You can just delete the subscribed calendars.

        -

        File Name: Video downloader for ok.ru 9 APK + Mod (Unlocked) for Android
        Mod info: Unlocked
        File size: 2.51 MB
        To provide better download speed, we recommend dFast - fastest mod downloader to download this file. dFast is the fastest downloader for millions of free mods. You can enjoy 3x faster speed than normal downloads.

        -

        Social network Odnoklassniki is the most popular site for communication and dating on the Russian-speaking Internet. It is also briefly called OK, OD or ODD. You can get into this social network through the address www.ok.ru or www.odnoklassniki.ru.

        -

        -

        If you already have an account, but you simply cannot get into it, you do not need to register again. Otherwise, you will lose all downloaded photos, correspondence, achievements in games and other data. Better try to restore the old profile through the support service.

        -

        The procedure for restoring a profile in classmates is as follows:

        Recovering a hacked page is not such a big problem, it is easy to solve. We present to your attention also another case when the profile was deleted.

        -

        There are a lot of such utilities. You do not need to download them - the entrance to the site is carried out online. Just type in the address of the site you want (in this case "Odnoklassniki") into the program's search bar and press "Enter". You enter the site not from your IP address, but through a proxy server.

        -

        Each registered user on the Odnoklassniki social network site has his own personal page, which he can access using the login form located on the website www.odnoklassniki.ru, which looks like this in the browser:

        -

        If you want to download the latest version of TamTam APK, then you must come to apkmody. In apkmody you can download TamTam Mod APK v2.33.3 for free. Next is a detailed introduction about TamTam Mod APK v2.33.3.

        -

        TamTam Mod APK is the PRO version of TamTam APK. By using the TamTam Mod APK, you can easily complete any tasks and requirements in it. Often you need to spend a lot of time or money to get rewards easily, but by using TamTam Mod APK, you often achieve your goals in a very short time. TamTam Mod APK is a great way for you to outshine your competition. Now in apkmody you can download TamTam APK v2.33.3 for free. This process doesn't cost anything, and you can use it with confidence.

        -

        If you don't want to download the TamTam mod APK version, you can also easily download the TamTam APK in Apkmody. apkmody will update the TamTam APK version in the fastest time. Users can easily update TamTam APK without downloading Google Play.

        aaccfb2cb3
        -
        -
        \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/Ibps It Officer Books Pdf Free Download and Burner Nitro Harbor The Ultimate Combo for IT Aspirants.md b/spaces/rorallitri/biomedical-language-models/logs/Ibps It Officer Books Pdf Free Download and Burner Nitro Harbor The Ultimate Combo for IT Aspirants.md deleted file mode 100644 index 392637b26d440c38cdac4a20ded441d21f2d868b..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Ibps It Officer Books Pdf Free Download and Burner Nitro Harbor The Ultimate Combo for IT Aspirants.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Saare Jahaan Se Mehnga tamil torrent download


        DOWNLOAD ->>->>->> https://tinurll.com/2uzmfR



        - - aaccfb2cb3
        -
        -
        -

        diff --git a/spaces/rorallitri/biomedical-language-models/logs/Informationen zum Laden von macOS - Apple Support (DE) [1].md b/spaces/rorallitri/biomedical-language-models/logs/Informationen zum Laden von macOS - Apple Support (DE) [1].md deleted file mode 100644 index 909eb35f2360ba0eab378d46bd9273065cbc38d8..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Informationen zum Laden von macOS - Apple Support (DE) [1].md +++ /dev/null @@ -1,9 +0,0 @@ -
        -

        macOS Monterey ist mit einigen älteren Mac-Modellen nicht kompatibel, so dass die Möglichkeit besteht, dass es auf Ihrem Mac einfach nicht funktioniert. Hier ist eine vollständige Liste der Macs, die mit macOS 12 kompatibel sind:

        -

        Wenn das Problem nur auftritt, wenn Sie versuchen, auf Apple TV zu streamen, sollten Sie Ihren Mac als erstes näher an das Apple TV heranbringen. Das funktioniert nicht? Stellen Sie sicher, dass beide mit demselben Netzwerk verbunden sind. Vielleicht hat sich eines der Geräte versehentlich mit einem anderen WLAN verbunden, und jetzt hängen Sie wegen dieses trivialen Problems fest.

        -

        os x maverick download funktioniert nicht


        Downloadhttps://tinurll.com/2uznXs



        -

        Wenn Sie Ihre AirPods Pro mit dem Mac verbinden, wird im Bluetooth-Menü der Prozentsatz der Batterie für die Ohrhörer und das Gehäuse angezeigt. Zumindest hat das bei mir unter macOS Big Sur so funktioniert. Seit dem Upgrade auf Monterey wird im Bluetooth-Menü zwar die Verbindung zu meinen AirPods angezeigt, nicht aber der Stand der Batterie. Mich persönlich stört das, also behebe ich das Problem mit ToothFairy.

        -

        Wir haben auch schon Fälle erlebt, in denen ToothFairy bei der Behebung von Bluetooth-Verbindungsproblemen geholfen hat. Dies geschieht, wenn das systemeigene Dienstprogramm aus irgendeinem Grund Probleme hat. Wenn Bluetooth also nicht funktioniert, versuchen Sie einfach, Ihr Gerät mit ToothFairy zu verbinden.

        -

        Erzwingen Sie das Schließen der VictronConnect-App. Anweisungen: wie für iPhone und iPad, wie für Android. Wenn Sie sich nicht sicher sind, wie dies zu tun ist, dann funktioniert auch die Deinstallation und Neuinstallation der App)

        aaccfb2cb3
        -
        -
        \ No newline at end of file diff --git a/spaces/runa91/barc_gradio/src/stacked_hourglass/datasets/__init__.py b/spaces/runa91/barc_gradio/src/stacked_hourglass/datasets/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/satish2004/myaichanti2/README.md b/spaces/satish2004/myaichanti2/README.md deleted file mode 100644 index a4ba932141b984335677aac1d3489cc154a8051b..0000000000000000000000000000000000000000 --- a/spaces/satish2004/myaichanti2/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: 'Ai assistant of gpt,nizamabad ' -emoji: 🚀 -colorFrom: red -colorTo: yellow -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/Burghezul Gentilom De Moliere Pdf 25.md b/spaces/scedlatioru/img-to-music/example/Burghezul Gentilom De Moliere Pdf 25.md deleted file mode 100644 index 23108bf280173f0b333e16e5934a94ac4fe683af..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Burghezul Gentilom De Moliere Pdf 25.md +++ /dev/null @@ -1,21 +0,0 @@ - -

        How to Download Burghezul Gentilom De Moliere Pdf 25 for Free

        -

        Burghezul Gentilom (The Bourgeois Gentleman) is a comedy-ballet by the French playwright Moliere, first performed in 1670. It tells the story of a wealthy merchant who tries to imitate the manners and culture of the nobility, but ends up making a fool of himself.

        -

        If you are interested in reading this classic work of satire, you might be wondering how to download Burghezul Gentilom De Moliere Pdf 25 for free. Pdf 25 is a format that allows you to read ebooks on various devices, such as computers, tablets, and smartphones.

        -

        Burghezul Gentilom De Moliere Pdf 25


        Download Ziphttps://gohhs.com/2uEyI9



        -

        Fortunately, there are some websites that offer free downloads of Burghezul Gentilom De Moliere Pdf 25. Here are some of them:

        -
          -
        • Archive.org: This website provides access to millions of books, movies, music, and other digital content that are in the public domain or have been uploaded by users. You can find Burghezul Gentilom De Moliere Pdf 25 here[^1^], along with an audio version of the play.
        • -
        • Scribd.com: This website is a platform for sharing and reading documents, books, magazines, and more. You can find Burghezul Gentilom De Moliere Pdf 25 here[^3^], but you will need to sign up for a free trial or a subscription to download it.
        • -
        -

        These are some of the websites that offer free downloads of Burghezul Gentilom De Moliere Pdf 25. However, you should always be careful when downloading files from the internet, as some of them might contain viruses or malware that can harm your device or compromise your privacy. Always scan the files before opening them and use a reliable antivirus software.

        -

        We hope this article has helped you find what you were looking for. If you have any questions or feedback, please let us know in the comments below. Thank you for using Bing!

        - -

        If you want to know more about the plot and the characters of Burghezul Gentilom, here is a brief summary:

        -

        The play revolves around Monsieur Jourdain, a rich but foolish merchant who dreams of becoming a nobleman. He hires various teachers to instruct him in music, dance, philosophy, and fencing, but he fails to learn anything and only makes himself ridiculous. He also tries to woo a young and beautiful marchioness, Dorimene, who is actually in love with his friend Cléonte. Cléonte pretends to be a Turkish prince and arranges a fake ceremony to make Jourdain believe that he has been made a nobleman by the Sultan.

        -

        Meanwhile, Jourdain's wife, Madame Jourdain, tries to bring some sense into her husband and stop his extravagant spending. She also supports the love affair between their daughter Lucile and Cléonte's servant Covielle, who are both clever and witty. They manage to trick Jourdain into giving his consent for their marriage, by making him think that Covielle is also a Turkish nobleman.

        -

        The play ends with a festive ballet, in which Jourdain is mocked by everyone, but remains oblivious to his own folly.

        -

        -

        Burghezul Gentilom is a masterpiece of comedy, satire, and social criticism. Moliere exposes the absurdity and hypocrisy of the bourgeoisie, who aspire to imitate the aristocracy without having any of their virtues or values. He also mocks the pretentiousness and ignorance of the pseudo-intellectuals, who exploit Jourdain's vanity and gullibility. He shows that true nobility is not a matter of birth or wealth, but of character and intelligence.

        d5da3c52bf
        -
        -
        \ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/Gorilla Film Production Software Serial Number.md b/spaces/scedlatioru/img-to-music/example/Gorilla Film Production Software Serial Number.md deleted file mode 100644 index 94d1d2f562691ce7e7394322fd74c5e465b6277b..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Gorilla Film Production Software Serial Number.md +++ /dev/null @@ -1,6 +0,0 @@ -

        gorilla film production software serial number


        Download Ziphttps://gohhs.com/2uEzNH



        -
        -YOU CAN DOWNLOAD INPUT DEVICES TO ROBOTIC TOOL OR YOU CAN USING THEIR VERY SIMPLY TO. DOWNLOAD: 91edad2d00. Film Production Software Serial Number. Gorilla Film Production Software Serial Number. 5 item. gorilla film production software serial number. &#128283; DOWNLOAD: Film Production Software Serial Number ?️. 5 item. gorilla film production software serial number. DOWNLOAD: 91edad2d00. YOU CAN DOWNLOAD INPUT DEVICES TO ROBOTIC TOOL OR YOU CAN USING THEIR VERY SIMPLY TO. Gorilla Film Production Software Serial Number. 5 item. gorilla film production software serial number. &#128283; DOWNLOAD: Film Production Software Serial Number ?️. 5 item. gorilla film production software serial number. DOWNLOAD: 91edad2d00. YOU CAN DOWNLOAD INPUT DEVICES TO ROBOTIC TOOL OR YOU CAN USING THEIR VERY SIMPLY TO. DOWNLOAD: 91edad2d00. Film Production Software Serial Number. Gorilla Film Production Software Serial Number. 5 item. gorilla film production software serial number. &#128283; DOWNLOAD: Film Production Software Serial Number ?️. 5 item. gorilla film production software serial number. DOWNLOAD: 91edad2d00. YOU CAN DOWNLOAD INPUT DEVICES TO ROBOTIC TOOL OR YOU CAN USING THEIR VERY SIMPLY TO. Gorilla Film Production Software Serial Number. 5 item. gorilla film production software serial number. &#128283; DOWNLOAD: Film Production Software Serial Number ?️. 5 item. gorilla film production software serial number. DOWNLOAD: 91edad2d00. YOU CAN DOWNLOAD INPUT DEVICES TO ROBOTIC TOOL OR YOU CAN USING THEIR VERY SIMPLY TO. DOWNLOAD: 91edad2d00. Film Production Software Serial Number. Gorilla Film Production Software Serial Number. 5 item. gorilla film production software serial number. &#128283; DOWNLOAD: Film Production Software Serial Number ?️. 5 item. gorilla film production software serial number. DOWNLOAD: 91edad2d00. YOU CAN DOWNLOAD INPUT DEVICES TO ROBOTIC TOOL OR YOU CAN USING THEIR VERY SIMPLY TO 4fefd39f24
        -
        -
        -

        diff --git a/spaces/scedlatioru/img-to-music/example/Zambezia Full Movie In Hindi 78.md b/spaces/scedlatioru/img-to-music/example/Zambezia Full Movie In Hindi 78.md deleted file mode 100644 index 1153a853a6207f5a4e7e4b7f23e1618faca0d04a..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Zambezia Full Movie In Hindi 78.md +++ /dev/null @@ -1,6 +0,0 @@ -

        zambezia full movie in hindi 78


        Download ===== https://gohhs.com/2uEAp3



        - -66. NORTHEAST AFRICA. General. 77. 66. Eritrea. 78. 67. Ethiopia. 79-87. 67. Somalia ... Zambezia = ISSN 0379-0622. - Harare. Vol. ... des réflexions - dont des entretiens - sur le film africain, son évolution, ses difficultés, le ... Mahjoub, whose life has been full of transcultural journeys, sets up a central tension between the ... 1fdad05405
        -
        -
        -

        diff --git a/spaces/segments-tobias/conex/espnet/optimizer/chainer.py b/spaces/segments-tobias/conex/espnet/optimizer/chainer.py deleted file mode 100644 index 0fb6f4b3fab082926c66a81f8aa4cf19e7a6b849..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/optimizer/chainer.py +++ /dev/null @@ -1,98 +0,0 @@ -"""Chainer optimizer builders.""" -import argparse - -import chainer -from chainer.optimizer_hooks import WeightDecay - -from espnet.optimizer.factory import OptimizerFactoryInterface -from espnet.optimizer.parser import adadelta -from espnet.optimizer.parser import adam -from espnet.optimizer.parser import sgd - - -class AdamFactory(OptimizerFactoryInterface): - """Adam factory.""" - - @staticmethod - def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Register args.""" - return adam(parser) - - @staticmethod - def from_args(target, args: argparse.Namespace): - """Initialize optimizer from argparse Namespace. - - Args: - target: for pytorch `model.parameters()`, - for chainer `model` - args (argparse.Namespace): parsed command-line args - - """ - opt = chainer.optimizers.Adam( - alpha=args.lr, - beta1=args.beta1, - beta2=args.beta2, - ) - opt.setup(target) - opt.add_hook(WeightDecay(args.weight_decay)) - return opt - - -class SGDFactory(OptimizerFactoryInterface): - """SGD factory.""" - - @staticmethod - def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Register args.""" - return sgd(parser) - - @staticmethod - def from_args(target, args: argparse.Namespace): - """Initialize optimizer from argparse Namespace. - - Args: - target: for pytorch `model.parameters()`, - for chainer `model` - args (argparse.Namespace): parsed command-line args - - """ - opt = chainer.optimizers.SGD( - lr=args.lr, - ) - opt.setup(target) - opt.add_hook(WeightDecay(args.weight_decay)) - return opt - - -class AdadeltaFactory(OptimizerFactoryInterface): - """Adadelta factory.""" - - @staticmethod - def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Register args.""" - return adadelta(parser) - - @staticmethod - def from_args(target, args: argparse.Namespace): - """Initialize optimizer from argparse Namespace. - - Args: - target: for pytorch `model.parameters()`, - for chainer `model` - args (argparse.Namespace): parsed command-line args - - """ - opt = chainer.optimizers.AdaDelta( - rho=args.rho, - eps=args.eps, - ) - opt.setup(target) - opt.add_hook(WeightDecay(args.weight_decay)) - return opt - - -OPTIMIZER_FACTORY_DICT = { - "adam": AdamFactory, - "sgd": SGDFactory, - "adadelta": AdadeltaFactory, -} diff --git a/spaces/sgxz/bingo/src/components/turn-counter.tsx b/spaces/sgxz/bingo/src/components/turn-counter.tsx deleted file mode 100644 index 08a9e488f044802a8600f4d195b106567c35aab4..0000000000000000000000000000000000000000 --- a/spaces/sgxz/bingo/src/components/turn-counter.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import React from 'react' -import { Throttling } from '@/lib/bots/bing/types' - -export interface TurnCounterProps { - throttling?: Throttling -} - -export function TurnCounter({ throttling }: TurnCounterProps) { - if (!throttling) { - return null - } - - return ( -
        -
        - {throttling.numUserMessagesInConversation} - - {throttling.maxNumUserMessagesInConversation} -
        -
        -
        - ) -} diff --git a/spaces/shigel/aiemo/README.md b/spaces/shigel/aiemo/README.md deleted file mode 100644 index 13743311009cfc9a5f1f8f463188f92f82298055..0000000000000000000000000000000000000000 --- a/spaces/shigel/aiemo/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 感情診断(β) -emoji: 🌖 -colorFrom: yellow -colorTo: red -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -duplicated_from: najimino/aicv ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download BombSquad APK and Enjoy Explosive Multiplayer Action Games.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download BombSquad APK and Enjoy Explosive Multiplayer Action Games.md deleted file mode 100644 index 10db2c6274af858e2228d72d86218cd1e1d6fb31..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download BombSquad APK and Enjoy Explosive Multiplayer Action Games.md +++ /dev/null @@ -1,135 +0,0 @@ -
        -

        BombSquad Download APK: How to Enjoy Explosive Fun on Your Android Device

        -

        If you are looking for a fun and exciting game to play with your friends or family, you should try BombSquad. BombSquad is a multiplayer action game that lets you blow up your opponents in various mini-games ranging from capture-the-flag to hockey. You can play BombSquad on your Android device by downloading and installing the BombSquad APK file. In this article, we will show you how to do that, as well as how to play BombSquad on your PC with MuMu Player.

        -

        bombsquad download apk


        Download Zip 🆗 https://ssurll.com/2uNUlw



        -

        What is BombSquad?

        -

        A multiplayer action game with mini-games and bombs

        -

        BombSquad is a game developed by Eric Froemling that was released in 2011. It is a game that combines action, comedy, and explosions in a colorful and cartoonish style. You can play BombSquad with up to 8 players locally or online, using touch screens, gamepads, or even phones and tablets as controllers. You can choose from a variety of characters, such as pirates, ninjas, barbarians, insane chefs, and more. You can also customize your own character with different outfits and accessories.

        -

        Features of BombSquad

        -

        BombSquad has many features that make it a fun and addictive game. Some of them are:

        -
          -
        • A huge variety of mini-games and play modes. You can play classic modes like flag-capture, survival, and deathmatch, or try extra modes like AI mode, racing, climbing, puzzle, etc.
        • -
        • Gratuitous explosions and advanced ragdoll physics. You can use different types of bombs, such as sticky bombs, ice bombs, impact bombs, etc., to blast your enemies away. You can also enjoy the hilarious effects of the ragdoll physics as your character flies, falls, and face-plants.
        • -
        • A competitive game suitable for friends and family. You can challenge your friends or family members in a friendly or fierce competition. You can also team up with them to take down other players or bots.
        • -
        • A customizable soundtrack. You can choose from a variety of music genres to suit your mood and preference. You can also create your own playlist with your favorite songs.
        • -
        -

        How to download and install BombSquad APK on your Android device

        -

        Steps to download and install BombSquad APK

        -

        If you want to play BombSquad on your Android device, you need to download and install the BombSquad APK file. Here are the steps to do that:

        -

        bombsquad apk free download
        -bombsquad mod apk download
        -bombsquad game download for android
        -bombsquad pro apk download
        -bombsquad remote apk download
        -bombsquad latest version apk download
        -bombsquad multiplayer apk download
        -bombsquad hack apk download
        -bombsquad online apk download
        -bombsquad offline apk download
        -bombsquad vr apk download
        -bombsquad 1.7.19 apk download
        -bombsquad 1.7.18 apk download
        -bombsquad 1.7.17 apk download
        -bombsquad 1.6.9 apk download
        -bombsquad 1.5.29 apk download
        -bombsquad 1.4.150 apk download
        -bombsquad 1.4.149 apk download
        -bombsquad 1.4.148 apk download
        -bombsquad 1.4.147 apk download
        -bombsquad android tv apk download
        -bombsquad android game apk download
        -bombsquad android 5.0+ apk download
        -bombsquad android 4.4+ apk download
        -bombsquad android 4.0+ apk download
        -bombsquad android 2.3+ apk download
        -bombsquad xapk file download
        -bombsquad apks file download
        -bombsquad obb file download
        -bombsquad data file download
        -bombsquad full version apk download
        -bombsquad unlocked version apk download
        -bombsquad premium version apk download
        -bombsquad cracked version apk download
        -bombsquad patched version apk download
        -bombsquad modded version apk download
        -bombsquad unlimited version apk download
        -bombsquad mega mod version apk download
        -bombsquad pro edition version apk download
        -bombsquad no ads version apk download
        -how to install bombsquad xapk file on android device?
        -how to install bombsquad apks file on android device?
        -how to install bombsquad obb file on android device?
        -how to install bombsquad data file on android device?
        -how to play bombsquad online with friends on android device?
        -how to play bombsquad offline with friends on android device?
        -how to play bombsquad with controller on android device?
        -how to play bombsquad with phone or tablet as controller on android device?
        -how to play bombsquad on android tv with gamepad?
        -how to play bombsquad vr on android device with vr headset?

        -
          -
        1. Go to [1](https://apkcombo.com/bombsquad/net.froemling.bombsquad/) or [2](https://mobile.softpedia.com/apk/bombsquad/) and click on the "Download APK" button.
        2. -
        3. Wait for the download to finish and then open the downloaded file.
        4. -
        5. If you see a warning message saying "Install blocked", go to your device settings and enable "Unknown sources" under security or privacy options.
        6. -
        7. Tap on "Install" and wait for the installation to complete.
        8. -
        9. Launch the game and enjoy!
        10. -
        -

        Tips and tricks for playing BombSquad

        -

        If you want to improve your skills and have more fun in playing BombSquad, here are some tips and tricks you can use:

        -
          -
        • Learn the controls and the mechanics of the game. You can practice in the tutorial mode or the AI mode to get familiar with the game. You can also adjust the settings to suit your preferences, such as the camera angle, the sensitivity, the sound effects, etc.
        • -
        • Use different types of bombs and power-ups wisely. Each bomb and power-up has its own advantages and disadvantages. For example, sticky bombs can stick to your enemies or objects, but they can also stick to you or your teammates. Power-ups can give you extra abilities, such as speed, shield, or health, but they can also be stolen by your opponents.
        • -
        • Use the environment to your advantage. You can use the terrain, the obstacles, and the items to create traps, hide, or escape. You can also interact with some objects, such as barrels, crates, or vehicles, to cause more damage or chaos.
        • -
        • Communicate and cooperate with your teammates. You can use the chat or voice feature to communicate with your teammates. You can also use gestures and emotes to express yourself. You can work together with your teammates to plan strategies, share resources, or support each other.
        • -
        • Have fun and be creative. You can create your own custom games with your own rules and settings. You can also join or host online games with other players from around the world. You can also watch replays of your games or other players' games to learn from them or laugh at them.
        • -
        -

        How to play BombSquad on PC with MuMu Player

        -

        What is MuMu Player?

        -

        MuMu Player is an Android emulator that allows you to run Android apps and games on your PC. It is a fast, smooth, and powerful emulator that supports high-definition graphics and multiple instances. You can use MuMu Player to play BombSquad on your PC with a bigger screen and better controls.

        -

        How to download and install MuMu Player on your PC

        -

        If you want to play BombSquad on your PC with MuMu Player, you need to download and install MuMu Player on your PC first. Here are the steps to do that:

        -
          -
        1. Go to [3](https://mumu.163.com/en/) and click on the "Download" button.
        2. -
        3. Wait for the download to finish and then run the installer file.
        4. -
        5. Follow the instructions on the screen to complete the installation.
        6. -
        7. Launch MuMu Player and sign in with your Google account or create a new one.
        8. -
        -

        How to run BombSquad on MuMu Player

        -

        After you have installed MuMu Player on your PC, you can run BombSquad on it by following these steps:

        -
          -
        1. Open MuMu Player and click on the "Play Store" icon on the home screen.
        2. -
        3. Search for "BombSquad" in the Play Store and install it.
        4. -
        5. Once the installation is done, click on the "BombSquad" icon on the home screen or in the app drawer.
        6. -
        7. Enjoy playing BombSquad on your PC with MuMu Player!
        8. -
        -

        Conclusion

        -

        BombSquad is a fun and exciting game that you can play with your friends or family on your Android device or PC. You can download and install BombSquad APK on your Android device by following the steps we have shown you in this article. You can also play BombSquad on your PC with MuMu Player by downloading and installing MuMu Player on your PC first. We hope you have enjoyed this article and learned something new. Now go ahead and have some explosive fun with BombSquad!

        -

        FAQs

        -

        Here are some frequently asked questions about BombSquad:

        -
          -
        • Q: Is BombSquad free?
        • -
        • A: Yes, BombSquad is free to download and play. However, it contains some in-app purchases that you can buy with real money or tickets. Tickets are earned by playing the game or watching ads.
        • -
        • Q: How many players can play BombSquad?
        • -
        • A: BombSquad supports up to 8 players locally or online. You can also play solo against bots or in AI mode.
        • -
        • Q: Can I play BombSquad offline?
        • -
        • A: Yes, you can play BombSquad offline in local mode or AI mode. However, you need an internet connection to play online mode or access some features such as leaderboards, achievements, etc.
        • -
        • Q : How can I contact the developer of BombSquad?
        • -
        • A: You can contact the developer of BombSquad, Eric Froemling, by visiting his website [4](https://www.froemling.net/) or sending him an email at eric@froemling.net.
        • -
        • Q: What are the system requirements for BombSquad?
        • -
        • A: The system requirements for BombSquad are as follows:
        • -
        - - - - - - - - - - - - - -
        PlatformMinimum Requirements
        AndroidAndroid 4.4 or higher, 1 GB of RAM, 100 MB of storage space
        PCWindows 7 or higher, Mac OS X 10.7 or higher, Linux, 2 GB of RAM, 200 MB of storage space

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Game Ejen Ali Agents 39 Arena Versi Terbaru !NEW!.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Game Ejen Ali Agents 39 Arena Versi Terbaru !NEW!.md deleted file mode 100644 index 89004054f40e95d0ac751bcdf55d7a581c736ae4..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Game Ejen Ali Agents 39 Arena Versi Terbaru !NEW!.md +++ /dev/null @@ -1,137 +0,0 @@ -
        -

        How to Download Ejen Ali: Agents' Arena Versi Terbaru

        -

        If you are a fan of action-packed games, you might have heard of Ejen Ali: Agents' Arena, a high octane 3v3 battle arena game based on the popular animated series Ejen Ali. In this game, you can choose from a multitude of agents, each with their own unique attributes and skill sets, and cooperate with other friends or players online in a 3v3 multiplayer match. You can also collect and unlock new upgrades, explore new maps, modes, and characters, and enjoy stunning graphics and sound effects.

        -

        download game ejen ali agents 39; arena versi terbaru


        Download ……… https://ssurll.com/2uO0nd



        -

        Ejen Ali: Agents' Arena is one of the most downloaded games on Google Play Store and App Store, with over 500K downloads and positive reviews from players. The game is constantly updated with new features and improvements, making it more fun and exciting for gamers of all ages.

        -

        In this article, we will show you what's new in the latest version of Ejen Ali: Agents' Arena, and how you can download it on your Android, iOS, or PC devices.

        -

        What's New in the Latest Version of Ejen Ali: Agents' Arena?

        -

        The latest version of Ejen Ali: Agents' Arena is version 1.0.0.6, which was released on September 26, 2021. This version has several new features and improvements that make the game more enjoyable and challenging. Here are some of the highlights of the latest version of Ejen Ali: Agents' Arena:

        -

        New Characters and Skills

        -

        The latest version of Ejen Ali: Agents' Arena introduces two new characters to the game: Ejen Rizwan and Ejen Dayang. These two agents have their own unique skills and abilities that can help you in your battles. Here are their descriptions and skills:

        -

        - - - - - - - - - - - - - - - - -
        CharacterDescriptionSkill
        Ejen RizwanA former MATA agent who is now a rogue agent. He is cunning, ruthless, and skilled in combat.His skill is Shadow Strike, which allows him to dash forward and deal damage to enemies in his path. He can also become invisible for a short duration after using his skill.
        Ejen DayangA MATA agent who specializes in stealth and infiltration. She is smart, agile, and loyal to her team.Her skill is Smoke Bomb, which allows her to throw a smoke bomb that creates a cloud of smoke around her. She can use this to escape from enemies or ambush them from behind.
        -

        In addition to these new characters, the latest version of Ejen Ali: Agents' Arena also adds new skills for some of the existing characters. For example, Ejen Bakar can now use his skill Fireball to launch a fireball that explodes on impact, Ejen Comot can now use her skill Sticky Bomb to attach a bomb to an enemy that detonates after a few seconds, and Ejen Roza can now use her skill Rose Petals to create a trail of rose petals that heal her allies and damage her enemies.

        -

        New Maps and Modes

        -

        The latest version of Ejen Ali: Agents' Arena also introduces two new maps and two new modes to the game. The new maps are Cyberaya City and MATA Academy, which offer different environments and challenges for the players. The new modes are Capture the Flag and King of the Hill, which require different strategies and teamwork from the players. Here are the descriptions of the new maps and modes:

        - - - - - - - - - - - - - - - - - - - - - -
        Map/ModeDescription
        Cyberaya CityA map that features the futuristic city of Cyberaya, where MATA headquarters is located. The map has various buildings, roads, bridges, and tunnels that provide cover and vantage points for the players.
        MATA AcademyA map that features the MATA Academy, where young agents are trained and educated. The map has various classrooms, labs, dorms, and halls that provide different obstacles and opportunities for the players.
        Capture the FlagA mode that requires the players to capture the enemy's flag and bring it back to their base, while defending their own flag from being captured. The team that captures the most flags in a given time wins.
        King of the HillA mode that requires the players to control a designated area on the map for as long as possible, while preventing the enemy from doing so. The team that controls the area for the longest time wins.
        -

        Bug Fixes and Performance Enhancements

        -

        The latest version of Ejen Ali: Agents' Arena also fixes some of the bugs and issues that were reported by the players in the previous versions. Some of these fixes include:

        -
          -
        • Fixed an issue where some players could not log in or register with their Google or Facebook accounts.
        • -
        • Fixed an issue where some players could not join or create a match with their friends or other players online.
        • -
        • Fixed an issue where some players could not see or hear their teammates or opponents during a match.
        • -
        • Fixed an issue where some players could not receive or claim their rewards or achievements after completing a match or a mission.
        • -
        • Fixed an issue where some players could not access or update their profile, inventory, or settings.
        • -
        • Improved the game's stability, speed, and compatibility with different devices and platforms.
        • -
        • Improved the game's graphics, sound effects, and user interface.
        • -
        • Improved the game's balance, fairness, and difficulty level.
        • -
        • Added more languages, currencies, and regions to the game.
        • -
        • Added more tips, tutorials, and feedback options to the game
        • -
        -

        How to Download Ejen Ali: Agents' Arena Versi Terbaru on Android Devices?

        -

        If you have an Android device, such as a smartphone or a tablet, you can easily download Ejen Ali: Agents' Arena versi terbaru on your device by following these simple steps:

        -

        Step 1: Go to Google Play Store

        -

        First, you need to go to Google Play Store, which is the official app store for Android devices. You can find it on your device's home screen or app drawer, or you can search for it on your device's browser. Once you open Google Play Store, you need to search for Ejen Ali: Agents' Arena in the search bar. You can also use this link to go directly to the game's page on Google Play Store.

        -

        Step 2: Tap on Install Button

        -

        Next, you need to tap on the install button, which is a green button with a white arrow on it. This will start the download and installation process of the game on your device. You may need to accept some permissions and terms of service before the installation begins. The game's size is about 300 MB, so make sure you have enough space and a stable internet connection before downloading it.

        -

        Step 3: Launch the Game and Enjoy

        -

        Finally, you need to launch the game and enjoy playing it. You can find the game's icon on your device's home screen or app drawer, or you can go back to Google Play Store and tap on the open button. Once you launch the game, you will be asked to sign in with your Google or Facebook account, or create a new account with your email address. You will also be asked to choose your preferred language and region. After that, you can start playing the game and join other players online in a 3v3 battle arena.

        -

        How to Download Ejen Ali: Agents' Arena Versi Terbaru on iOS Devices?

        -

        If you have an iOS device, such as an iPhone or an iPad, you can also download Ejen Ali: Agents' Arena versi terbaru on your device by following these simple steps:

        -

        Step 1: Go to App Store

        -

        First, you need to go to App Store, which is the official app store for iOS devices. You can find it on your device's home screen or app drawer, or you can search for it on your device's browser. Once you open App Store, you need to search for Ejen Ali: Agents' Arena in the search bar. You can also use this link to go directly to the game's page on App Store.

        -

        Step 2: Tap on Get Button

        -

        Next, you need to tap on the get button, which is a blue button with a white arrow on it. This will start the download and installation process of the game on your device. You may need to enter your Apple ID and password before the installation begins. The game's size is about 300 MB, so make sure you have enough space and a stable internet connection before downloading it.

        -

        Step 3: Launch the Game and Enjoy

        -

        Finally, you need to launch the game and enjoy playing it. You can find the game's icon on your device's home screen or app drawer, or you can go back to App Store and tap on the open button. Once you launch the game, you will be asked to sign in with your Apple ID, Google or Facebook account, or create a new account with your email address. You will also be asked to choose your preferred language and region. After that, you can start playing the game and join other players online in a 3v3 battle arena.

        -

        How to Download Ejen Ali: Agents' Arena Versi Terbaru on PC?

        -

        If you have a PC, such as a laptop or a desktop computer, you can also download Ejen Ali: Agents' Arena versi terbaru on your PC by using Google Play Games. Google Play Games is a service that allows you to play Android games on your PC using your Google account. Here are the steps to download Ejen Ali: Agents' Arena versi terbaru on your PC using Google Play Games:

        -

        Step 1: Go to Google Play Games Website

        -

        First, you need to go to Google Play Games website, which is the official website for Google Play Games service. You can use any browser of your choice, such as Chrome, Firefox, or Edge. Once you open Google Play Games website, you need to sign in with your Google account that you use for playing Android games.Step 2: Find Ejen Ali: Agents' Arena Game -

        Next, you need to find Ejen Ali: Agents' Arena game among other games available on Google Play Games. You can use the search bar or the categories to find the game. You can also use this link to go directly to the game's page on Google Play Games.

        -

        Step 3: Click on Play Button

        -

        Finally, you need to click on the play button, which is a green button with a white arrow on it. This will start the loading and running process of the game on your PC. You may need to allow some permissions and settings before the game starts. The game's performance and quality may vary depending on your PC's specifications and internet connection. Once the game starts, you will be asked to sign in with your Google or Facebook account, or create a new account with your email address. You will also be asked to choose your preferred language and region. After that, you can start playing the game and join other players online in a 3v3 battle arena.

        -

        Conclusion

        -

        Ejen Ali: Agents' Arena is a thrilling and addictive game that lets you experience the action and adventure of the Ejen Ali animated series. You can choose from a variety of agents, each with their own unique skills and abilities, and team up with other players online in a 3v3 multiplayer match. You can also enjoy the stunning graphics, sound effects, and animations of the game, as well as the new features and improvements that are added regularly.

        -

        If you want to download Ejen Ali: Agents' Arena versi terbaru, you can follow the steps we have provided in this article for your Android, iOS, or PC devices. The game is free to download and play, but it may contain some in-app purchases and ads that you can disable or ignore if you want.

        -

        We hope you found this article helpful and informative. If you have any questions or feedback about the game or this article, please feel free to leave a comment below. We would love to hear from you.

        -

        Now, what are you waiting for? Download Ejen Ali: Agents' Arena versi terbaru today and join the fun and excitement of being an agent!

        -

        FAQs

        -

        Here are some of the frequently asked questions about Ejen Ali: Agents' Arena versi terbaru:

        -

        Q: What are the minimum requirements for playing Ejen Ali: Agents' Arena versi terbaru?

        -

        A: The minimum requirements for playing Ejen Ali: Agents' Arena versi terbaru are as follows:

        -
          -
        • For Android devices: Android 5.0 or higher, 2 GB of RAM or more, 300 MB of free storage space or more.
        • -
        • For iOS devices: iOS 10.0 or higher, iPhone 5S or newer, iPad Air or newer, iPod Touch 6th generation or newer, 300 MB of free storage space or more.
        • -
        • For PC devices: Windows 7 or higher, Intel Core i3 processor or equivalent, 4 GB of RAM or more, 300 MB of free storage space or more, Google Chrome browser.
        • -
        -

        Q: How can I update Ejen Ali: Agents' Arena versi terbaru?

        -

        A: You can update Ejen Ali: Agents' Arena versi terbaru by following these steps:

        -
          -
        • For Android devices: Go to Google Play Store, find Ejen Ali: Agents' Arena game, tap on the update button if available.
        • -
        • For iOS devices: Go to App Store, find Ejen Ali: Agents' Arena game, tap on the update button if available.
        • -
        • For PC devices: Go to Google Play Games website, find Ejen Ali: Agents' Arena game, click on the refresh button if available.
        • -
        -

        Q: How can I contact the developers of Ejen Ali: Agents' Arena versi terbaru?

        -

        A: You can contact the developers of Ejen Ali: Agents' Arena versi terbaru by using these methods:

        -
          -
        • Email: support@ejenali.com
        • -
        • Facebook: https://www.facebook.com/ejenali/
        • -
        • Instagram: https://www.instagram.com/ejen_ali/
        • -
        • Twitter: https://twitter.com/ejen_ali
        • -
        • YouTube: https://www.youtube.com/channel/UCdM9lqGfVKJyXRtUuZMqIJg
        • -
        -

        Q: How can I play Ejen Ali: Agents' Arena versi terbaru with my friends?

        -

        A: You can play Ejen Ali: Agents' Arena versi terbaru with your friends by using these steps:

        -
          -
        • For Android and iOS devices: Go to the game's main menu, tap on the friends icon, tap on the add friend button, enter your friend's username or ID, or scan their QR code, tap on the send request button, wait for your friend to accept your request, tap on the invite button, select the mode and map you want to play, tap on the start button.
        • -
        • For PC devices: Go to the game's main menu, click on the friends icon, click on the add friend button, enter your friend's username or ID, or scan their QR code, click on the send request button, wait for your friend to accept your request, click on the invite button, select the mode and map you want to play, click on the start button.
        • -
        -

        Q: How can I get more coins and gems in Ejen Ali: Agents' Arena versi terbaru?

        -

        A: You can get more coins and gems in Ejen Ali: Agents' Arena versi terbaru by using these methods:

        -
          -
        • Play more matches and missions and win them to earn coins and gems as rewards.
        • -
        • Complete daily and weekly challenges and achievements to earn coins and gems as rewards.
        • -
        • Watch ads and videos to earn coins and gems as rewards.
        • -
        • Invite and refer your friends to play the game to earn coins and gems as rewards.
        • -
        • Purchase coins and gems with real money using in-app purchases.
        • -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Get TIDAL Music Premium for Free with APK Mod Download.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Get TIDAL Music Premium for Free with APK Mod Download.md deleted file mode 100644 index 1b1a67ee81de7386b45e212963e2e87fa151e8f5..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Get TIDAL Music Premium for Free with APK Mod Download.md +++ /dev/null @@ -1,91 +0,0 @@ -
        -

        Tidal Music APK Premium: How to Enjoy High-Quality Music Streaming for Free

        -

        Do you love music and want to listen to it in the best quality possible? Do you want to access exclusive content from your favorite artists and discover new music that suits your taste? Do you want to save money and data by downloading and playing offline content without ads or interruptions? If you answered yes to any of these questions, then you should try Tidal Music APK Premium, a modded version of the official app that unlocks all the premium features for free. In this article, we will tell you what Tidal Music is, what Tidal Music APK Premium is, how to get it, and what are the benefits of using it.

        -

        What is Tidal Music and Why You Should Try It

        -

        Tidal Music is a music streaming service that offers high-fidelity sound quality, exclusive content, and personalized recommendations. Unlike other streaming services that compress the audio files to save bandwidth, Tidal Music preserves the original sound quality of the music as intended by the artists. This means that you can hear every detail, nuance, and emotion of the music on your device.

        -

        tidal music apk premium


        Download File ✒ ✒ ✒ https://ssurll.com/2uNSBI



        -

        Tidal Music has over 70 million songs, 250,000 videos, and thousands of playlists and podcasts to suit your mood and taste. You can explore different genres, moods, activities, themes, and regions with ease. You can also enjoy exclusive content from your favorite artists, such as live performances, behind-the-scenes videos, interviews, documentaries, and more. You can also access editorial content from experts and influencers who share their insights and opinions on music and culture.

        -

        Tidal Music supports offline listening, lyrics display, track info, and suggested tracks. You can download any song, playlist, album, or video for offline listening without using your data. You can also view the lyrics of any song and sing along with it. You can also get more information about the song, such as the artist, album, genre, release date, and more. You can also discover new songs that are similar to the one you are listening to with the suggested tracks feature.

        -

        What is Tidal Music APK Premium and How to Get It

        -

        Tidal Music APK Premium is a modded version of the official app that unlocks all the premium features for free. Normally, you would have to pay a monthly subscription fee to access the premium features of Tidal Music, such as unlimited skips, play any songs or playlists, download and play offline content, and disable ads and DRM restrictions. However, with Tidal Music APK Premium, you can enjoy all these features without paying a dime.

        -

        Tidal Music APK Premium also enables you to access the high audio quality (320 KBPS) streaming option that is normally reserved for paid subscribers. This means that you can listen to music in the best quality possible on your device without compromising your data usage. You can also choose between normal, high, and master quality depending on your preference and device capability.

        -

        To get Tidal Music APK Premium, you need to follow these steps:

        -
          -
        • Use a VPN and set it to USA server when creating an account inside the app
        • -
        • Create a free account using this link
        • -
        • Upon selecting subscription, tap the "Free" tab and then continue
        • -
        • Download and install the modded app from this link
        • -
        • Sign in with your free account and enjoy the premium features
        • -
        -

        What are the Benefits of Using Tidal Music APK Premium

        -

        Tidal Music APK Premium lets you experience the best sound quality possible on your device. You can hear every detail, nuance, and emotion of the music as intended by the artists. You can also adjust the sound quality according to your preference and device capability. You can enjoy music in normal, high, or master quality depending on your choice.

        -

        Tidal Music APK Premium gives you access to exclusive content from your favorite artists, such as live performances, behind-the-scenes videos, interviews, documentaries, and more. You can also access editorial content from experts and influencers who share their insights and opinions on music and culture. You can also watch high-definition videos of concerts, festivals, and events from around the world.

        -

        Tidal Music APK Premium helps you discover new music that matches your preferences and mood with personalized recommendations and curated playlists. You can explore different genres, moods, activities, themes, and regions with ease. You can also create your own playlists and share them with your friends. You can also follow your favorite artists and get notified when they release new music or content.

        -

        tidal music premium mod apk
        -tidal music hifi songs apk
        -tidal music offline downloads apk
        -tidal music premium unlocked apk
        -tidal music apk reddit
        -tidal music apk modded
        -tidal music apk latest version
        -tidal music apk no ads
        -tidal music apk free download
        -tidal music apk for android
        -tidal music premium hack apk
        -tidal music high fidelity apk
        -tidal music unlimited skips apk
        -tidal music premium cracked apk
        -tidal music apk with lyrics
        -tidal music apk 2023
        -tidal music master quality apk
        -tidal music download songs apk
        -tidal music premium account apk
        -tidal music apk without subscription
        -tidal music mod apk xC3FFF0E[^1^]
        -tidal music 2.79.1 mod apk[^2^]
        -tidal music 2.80.0 mod apk[^3^]
        -tidal music dolby atmos apk
        -tidal music lossless audio apk
        -tidal music play any song apk
        -tidal music premium features apk
        -tidal music apk for pc
        -tidal music modded by spotify modder[^1^]
        -tidal music 70m category mod apk[^2^]
        -tidal music 82m requirements mod apk[^3^]
        -tidal music surround sound apk
        -tidal music suggested track apk
        -tidal music track info apk
        -tidal music premium trial apk
        -tidal music apk for ios
        -tidal music drm restrictions disabled apk[^1^]
        -tidal music publisher mod apk[^2^]
        -tidal music support android mod apk[^3^]
        -tidal music 360 reality audio apk
        -tidal music playlists and videos apk
        -tidal music artist radio apk
        -tidal music exclusive content apk
        -tidal music premium subscription cost apk
        -tidal music apk for firestick
        -tidal music audio and visual ads removed apk[^1^]
        -tidal music version mod apk[^2^] [^3^]
        -tidal music size mod apk[^2^] [^3^]
        -tidal music live concerts and events apk
        -tidal music podcasts and shows apk

        -

        Tidal Music APK Premium saves you money and data by allowing you to download and play offline content without ads or interruptions. You can download any song, playlist, album, or video for offline listening without using your data. You can also play them without any ads or DRM restrictions. You can also enjoy unlimited skips and play any songs or playlists you want without any limitations.

        -

        Conclusion

        -

        Tidal Music APK Premium is a great way to enjoy high-quality music streaming for free. It offers all the premium features of the official app without paying a dime. It also enables you to access the high audio quality (320 KBPS) streaming option that is normally reserved for paid subscribers. It is easy to get and use with a few simple steps.

        -

        If you love music and want to experience it in the best way possible, you should try Tidal Music APK Premium today. You will not regret it.

        -

        FAQs

        -
          -
        • Q: Is Tidal Music APK Premium safe to use?
        • -
        • A: Yes, Tidal Music APK Premium is safe to use as long as you download it from a trusted source like this link. However, you should always use a VPN when creating an account and using the app to avoid any potential issues.
        • -
        • Q: Is Tidal Music APK Premium legal?
        • -
        • A: Tidal Music APK Premium is not legal as it violates the terms and conditions of the official app. However, there is no risk of getting banned or sued as long as you use a VPN when creating an account and using the app.
        • -
        • Q: What devices are compatible with Tidal Music APK Premium?
        • -
        • A: Tidal Music APK Premium is compatible with any Android device that runs on Android 4.4 or higher. It is not compatible with iOS devices.
        • -
        • Q: How do I update Tidal Music APK Premium?
        • A: To update Tidal Music APK Premium, you need to uninstall the old version and install the new version from this link. You can also check for updates from within the app by tapping on the menu icon and then on "About".
        • -
        • Q: How do I cancel my Tidal Music APK Premium subscription?
        • -
        • A: To cancel your Tidal Music APK Premium subscription, you need to go to this link and sign in with your free account. Then, you need to tap on "Manage Subscription" and then on "Cancel Subscription". You can also cancel your subscription from within the app by tapping on the menu icon and then on "Account".
        • -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_recognition/kaldi/kaldi_initializer.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_recognition/kaldi/kaldi_initializer.py deleted file mode 100644 index 6d2a2a4b6b809ba1106f9a57cb6f241dc083e670..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_recognition/kaldi/kaldi_initializer.py +++ /dev/null @@ -1,698 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from dataclasses import dataclass -import hydra -from hydra.core.config_store import ConfigStore -import logging -from omegaconf import MISSING, OmegaConf -import os -import os.path as osp -from pathlib import Path -import subprocess -from typing import Optional - -from fairseq.data.dictionary import Dictionary -from fairseq.dataclass import FairseqDataclass - -script_dir = Path(__file__).resolve().parent -config_path = script_dir / "config" - - -logger = logging.getLogger(__name__) - - -@dataclass -class KaldiInitializerConfig(FairseqDataclass): - data_dir: str = MISSING - fst_dir: Optional[str] = None - in_labels: str = MISSING - out_labels: Optional[str] = None - wav2letter_lexicon: Optional[str] = None - lm_arpa: str = MISSING - kaldi_root: str = MISSING - blank_symbol: str = "" - silence_symbol: Optional[str] = None - - -def create_units(fst_dir: Path, in_labels: str, vocab: Dictionary) -> Path: - in_units_file = fst_dir / f"kaldi_dict.{in_labels}.txt" - if not in_units_file.exists(): - - logger.info(f"Creating {in_units_file}") - - with open(in_units_file, "w") as f: - print(" 0", file=f) - i = 1 - for symb in vocab.symbols[vocab.nspecial :]: - if not symb.startswith("madeupword"): - print(f"{symb} {i}", file=f) - i += 1 - return in_units_file - - -def create_lexicon( - cfg: KaldiInitializerConfig, - fst_dir: Path, - unique_label: str, - in_units_file: Path, - out_words_file: Path, -) -> (Path, Path): - - disambig_in_units_file = fst_dir / f"kaldi_dict.{cfg.in_labels}_disambig.txt" - lexicon_file = fst_dir / f"kaldi_lexicon.{unique_label}.txt" - disambig_lexicon_file = fst_dir / f"kaldi_lexicon.{unique_label}_disambig.txt" - if ( - not lexicon_file.exists() - or not disambig_lexicon_file.exists() - or not disambig_in_units_file.exists() - ): - logger.info(f"Creating {lexicon_file} (in units file: {in_units_file})") - - assert cfg.wav2letter_lexicon is not None or cfg.in_labels == cfg.out_labels - - if cfg.wav2letter_lexicon is not None: - lm_words = set() - with open(out_words_file, "r") as lm_dict_f: - for line in lm_dict_f: - lm_words.add(line.split()[0]) - - num_skipped = 0 - total = 0 - with open(cfg.wav2letter_lexicon, "r") as w2l_lex_f, open( - lexicon_file, "w" - ) as out_f: - for line in w2l_lex_f: - items = line.rstrip().split("\t") - assert len(items) == 2, items - if items[0] in lm_words: - print(items[0], items[1], file=out_f) - else: - num_skipped += 1 - logger.debug( - f"Skipping word {items[0]} as it was not found in LM" - ) - total += 1 - if num_skipped > 0: - logger.warning( - f"Skipped {num_skipped} out of {total} words as they were not found in LM" - ) - else: - with open(in_units_file, "r") as in_f, open(lexicon_file, "w") as out_f: - for line in in_f: - symb = line.split()[0] - if symb != "" and symb != "" and symb != "": - print(symb, symb, file=out_f) - - lex_disambig_path = ( - Path(cfg.kaldi_root) / "egs/wsj/s5/utils/add_lex_disambig.pl" - ) - res = subprocess.run( - [lex_disambig_path, lexicon_file, disambig_lexicon_file], - check=True, - capture_output=True, - ) - ndisambig = int(res.stdout) - disamib_path = Path(cfg.kaldi_root) / "egs/wsj/s5/utils/add_disambig.pl" - res = subprocess.run( - [disamib_path, "--include-zero", in_units_file, str(ndisambig)], - check=True, - capture_output=True, - ) - with open(disambig_in_units_file, "wb") as f: - f.write(res.stdout) - - return disambig_lexicon_file, disambig_in_units_file - - -def create_G( - kaldi_root: Path, fst_dir: Path, lm_arpa: Path, arpa_base: str -) -> (Path, Path): - - out_words_file = fst_dir / f"kaldi_dict.{arpa_base}.txt" - grammar_graph = fst_dir / f"G_{arpa_base}.fst" - if not grammar_graph.exists() or not out_words_file.exists(): - logger.info(f"Creating {grammar_graph}") - arpa2fst = kaldi_root / "src/lmbin/arpa2fst" - subprocess.run( - [ - arpa2fst, - "--disambig-symbol=#0", - f"--write-symbol-table={out_words_file}", - lm_arpa, - grammar_graph, - ], - check=True, - ) - return grammar_graph, out_words_file - - -def create_L( - kaldi_root: Path, - fst_dir: Path, - unique_label: str, - lexicon_file: Path, - in_units_file: Path, - out_words_file: Path, -) -> Path: - lexicon_graph = fst_dir / f"L.{unique_label}.fst" - - if not lexicon_graph.exists(): - logger.info(f"Creating {lexicon_graph} (in units: {in_units_file})") - make_lex = kaldi_root / "egs/wsj/s5/utils/make_lexicon_fst.pl" - fstcompile = kaldi_root / "tools/openfst-1.6.7/bin/fstcompile" - fstaddselfloops = kaldi_root / "src/fstbin/fstaddselfloops" - fstarcsort = kaldi_root / "tools/openfst-1.6.7/bin/fstarcsort" - - def write_disambig_symbol(file): - with open(file, "r") as f: - for line in f: - items = line.rstrip().split() - if items[0] == "#0": - out_path = str(file) + "_disamig" - with open(out_path, "w") as out_f: - print(items[1], file=out_f) - return out_path - - return None - - in_disambig_sym = write_disambig_symbol(in_units_file) - assert in_disambig_sym is not None - out_disambig_sym = write_disambig_symbol(out_words_file) - assert out_disambig_sym is not None - - try: - with open(lexicon_graph, "wb") as out_f: - res = subprocess.run( - [make_lex, lexicon_file], capture_output=True, check=True - ) - assert len(res.stderr) == 0, res.stderr.decode("utf-8") - res = subprocess.run( - [ - fstcompile, - f"--isymbols={in_units_file}", - f"--osymbols={out_words_file}", - "--keep_isymbols=false", - "--keep_osymbols=false", - ], - input=res.stdout, - capture_output=True, - ) - assert len(res.stderr) == 0, res.stderr.decode("utf-8") - res = subprocess.run( - [fstaddselfloops, in_disambig_sym, out_disambig_sym], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstarcsort, "--sort_type=olabel"], - input=res.stdout, - capture_output=True, - check=True, - ) - out_f.write(res.stdout) - except subprocess.CalledProcessError as e: - logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") - os.remove(lexicon_graph) - raise - except AssertionError: - os.remove(lexicon_graph) - raise - - return lexicon_graph - - -def create_LG( - kaldi_root: Path, - fst_dir: Path, - unique_label: str, - lexicon_graph: Path, - grammar_graph: Path, -) -> Path: - lg_graph = fst_dir / f"LG.{unique_label}.fst" - - if not lg_graph.exists(): - logger.info(f"Creating {lg_graph}") - - fsttablecompose = kaldi_root / "src/fstbin/fsttablecompose" - fstdeterminizestar = kaldi_root / "src/fstbin/fstdeterminizestar" - fstminimizeencoded = kaldi_root / "src/fstbin/fstminimizeencoded" - fstpushspecial = kaldi_root / "src/fstbin/fstpushspecial" - fstarcsort = kaldi_root / "tools/openfst-1.6.7/bin/fstarcsort" - - try: - with open(lg_graph, "wb") as out_f: - res = subprocess.run( - [fsttablecompose, lexicon_graph, grammar_graph], - capture_output=True, - check=True, - ) - res = subprocess.run( - [ - fstdeterminizestar, - "--use-log=true", - ], - input=res.stdout, - capture_output=True, - ) - res = subprocess.run( - [fstminimizeencoded], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstpushspecial], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstarcsort, "--sort_type=ilabel"], - input=res.stdout, - capture_output=True, - check=True, - ) - out_f.write(res.stdout) - except subprocess.CalledProcessError as e: - logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") - os.remove(lg_graph) - raise - - return lg_graph - - -def create_H( - kaldi_root: Path, - fst_dir: Path, - disambig_out_units_file: Path, - in_labels: str, - vocab: Dictionary, - blk_sym: str, - silence_symbol: Optional[str], -) -> (Path, Path, Path): - h_graph = ( - fst_dir / f"H.{in_labels}{'_' + silence_symbol if silence_symbol else ''}.fst" - ) - h_out_units_file = fst_dir / f"kaldi_dict.h_out.{in_labels}.txt" - disambig_in_units_file_int = Path(str(h_graph) + "isym_disambig.int") - disambig_out_units_file_int = Path(str(disambig_out_units_file) + ".int") - if ( - not h_graph.exists() - or not h_out_units_file.exists() - or not disambig_in_units_file_int.exists() - ): - logger.info(f"Creating {h_graph}") - eps_sym = "" - - num_disambig = 0 - osymbols = [] - - with open(disambig_out_units_file, "r") as f, open( - disambig_out_units_file_int, "w" - ) as out_f: - for line in f: - symb, id = line.rstrip().split() - if line.startswith("#"): - num_disambig += 1 - print(id, file=out_f) - else: - if len(osymbols) == 0: - assert symb == eps_sym, symb - osymbols.append((symb, id)) - - i_idx = 0 - isymbols = [(eps_sym, 0)] - - imap = {} - - for i, s in enumerate(vocab.symbols): - i_idx += 1 - isymbols.append((s, i_idx)) - imap[s] = i_idx - - fst_str = [] - - node_idx = 0 - root_node = node_idx - - special_symbols = [blk_sym] - if silence_symbol is not None: - special_symbols.append(silence_symbol) - - for ss in special_symbols: - fst_str.append("{} {} {} {}".format(root_node, root_node, ss, eps_sym)) - - for symbol, _ in osymbols: - if symbol == eps_sym or symbol.startswith("#"): - continue - - node_idx += 1 - # 1. from root to emitting state - fst_str.append("{} {} {} {}".format(root_node, node_idx, symbol, symbol)) - # 2. from emitting state back to root - fst_str.append("{} {} {} {}".format(node_idx, root_node, eps_sym, eps_sym)) - # 3. from emitting state to optional blank state - pre_node = node_idx - node_idx += 1 - for ss in special_symbols: - fst_str.append("{} {} {} {}".format(pre_node, node_idx, ss, eps_sym)) - # 4. from blank state back to root - fst_str.append("{} {} {} {}".format(node_idx, root_node, eps_sym, eps_sym)) - - fst_str.append("{}".format(root_node)) - - fst_str = "\n".join(fst_str) - h_str = str(h_graph) - isym_file = h_str + ".isym" - - with open(isym_file, "w") as f: - for sym, id in isymbols: - f.write("{} {}\n".format(sym, id)) - - with open(h_out_units_file, "w") as f: - for sym, id in osymbols: - f.write("{} {}\n".format(sym, id)) - - with open(disambig_in_units_file_int, "w") as f: - disam_sym_id = len(isymbols) - for _ in range(num_disambig): - f.write("{}\n".format(disam_sym_id)) - disam_sym_id += 1 - - fstcompile = kaldi_root / "tools/openfst-1.6.7/bin/fstcompile" - fstaddselfloops = kaldi_root / "src/fstbin/fstaddselfloops" - fstarcsort = kaldi_root / "tools/openfst-1.6.7/bin/fstarcsort" - - try: - with open(h_graph, "wb") as out_f: - res = subprocess.run( - [ - fstcompile, - f"--isymbols={isym_file}", - f"--osymbols={h_out_units_file}", - "--keep_isymbols=false", - "--keep_osymbols=false", - ], - input=str.encode(fst_str), - capture_output=True, - check=True, - ) - res = subprocess.run( - [ - fstaddselfloops, - disambig_in_units_file_int, - disambig_out_units_file_int, - ], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstarcsort, "--sort_type=olabel"], - input=res.stdout, - capture_output=True, - check=True, - ) - out_f.write(res.stdout) - except subprocess.CalledProcessError as e: - logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") - os.remove(h_graph) - raise - return h_graph, h_out_units_file, disambig_in_units_file_int - - -def create_HLGa( - kaldi_root: Path, - fst_dir: Path, - unique_label: str, - h_graph: Path, - lg_graph: Path, - disambig_in_words_file_int: Path, -) -> Path: - hlga_graph = fst_dir / f"HLGa.{unique_label}.fst" - - if not hlga_graph.exists(): - logger.info(f"Creating {hlga_graph}") - - fsttablecompose = kaldi_root / "src/fstbin/fsttablecompose" - fstdeterminizestar = kaldi_root / "src/fstbin/fstdeterminizestar" - fstrmsymbols = kaldi_root / "src/fstbin/fstrmsymbols" - fstrmepslocal = kaldi_root / "src/fstbin/fstrmepslocal" - fstminimizeencoded = kaldi_root / "src/fstbin/fstminimizeencoded" - - try: - with open(hlga_graph, "wb") as out_f: - res = subprocess.run( - [ - fsttablecompose, - h_graph, - lg_graph, - ], - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstdeterminizestar, "--use-log=true"], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstrmsymbols, disambig_in_words_file_int], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstrmepslocal], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstminimizeencoded], - input=res.stdout, - capture_output=True, - check=True, - ) - out_f.write(res.stdout) - except subprocess.CalledProcessError as e: - logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") - os.remove(hlga_graph) - raise - - return hlga_graph - - -def create_HLa( - kaldi_root: Path, - fst_dir: Path, - unique_label: str, - h_graph: Path, - l_graph: Path, - disambig_in_words_file_int: Path, -) -> Path: - hla_graph = fst_dir / f"HLa.{unique_label}.fst" - - if not hla_graph.exists(): - logger.info(f"Creating {hla_graph}") - - fsttablecompose = kaldi_root / "src/fstbin/fsttablecompose" - fstdeterminizestar = kaldi_root / "src/fstbin/fstdeterminizestar" - fstrmsymbols = kaldi_root / "src/fstbin/fstrmsymbols" - fstrmepslocal = kaldi_root / "src/fstbin/fstrmepslocal" - fstminimizeencoded = kaldi_root / "src/fstbin/fstminimizeencoded" - - try: - with open(hla_graph, "wb") as out_f: - res = subprocess.run( - [ - fsttablecompose, - h_graph, - l_graph, - ], - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstdeterminizestar, "--use-log=true"], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstrmsymbols, disambig_in_words_file_int], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstrmepslocal], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstminimizeencoded], - input=res.stdout, - capture_output=True, - check=True, - ) - out_f.write(res.stdout) - except subprocess.CalledProcessError as e: - logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") - os.remove(hla_graph) - raise - - return hla_graph - - -def create_HLG( - kaldi_root: Path, - fst_dir: Path, - unique_label: str, - hlga_graph: Path, - prefix: str = "HLG", -) -> Path: - hlg_graph = fst_dir / f"{prefix}.{unique_label}.fst" - - if not hlg_graph.exists(): - logger.info(f"Creating {hlg_graph}") - - add_self_loop = script_dir / "add-self-loop-simple" - kaldi_src = kaldi_root / "src" - kaldi_lib = kaldi_src / "lib" - - try: - if not add_self_loop.exists(): - fst_include = kaldi_root / "tools/openfst-1.6.7/include" - add_self_loop_src = script_dir / "add-self-loop-simple.cc" - - subprocess.run( - [ - "c++", - f"-I{kaldi_src}", - f"-I{fst_include}", - f"-L{kaldi_lib}", - add_self_loop_src, - "-lkaldi-base", - "-lkaldi-fstext", - "-o", - add_self_loop, - ], - check=True, - ) - - my_env = os.environ.copy() - my_env["LD_LIBRARY_PATH"] = f"{kaldi_lib}:{my_env['LD_LIBRARY_PATH']}" - - subprocess.run( - [ - add_self_loop, - hlga_graph, - hlg_graph, - ], - check=True, - capture_output=True, - env=my_env, - ) - except subprocess.CalledProcessError as e: - logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") - raise - - return hlg_graph - - -def initalize_kaldi(cfg: KaldiInitializerConfig) -> Path: - if cfg.fst_dir is None: - cfg.fst_dir = osp.join(cfg.data_dir, "kaldi") - if cfg.out_labels is None: - cfg.out_labels = cfg.in_labels - - kaldi_root = Path(cfg.kaldi_root) - data_dir = Path(cfg.data_dir) - fst_dir = Path(cfg.fst_dir) - fst_dir.mkdir(parents=True, exist_ok=True) - - arpa_base = osp.splitext(osp.basename(cfg.lm_arpa))[0] - unique_label = f"{cfg.in_labels}.{arpa_base}" - - with open(data_dir / f"dict.{cfg.in_labels}.txt", "r") as f: - vocab = Dictionary.load(f) - - in_units_file = create_units(fst_dir, cfg.in_labels, vocab) - - grammar_graph, out_words_file = create_G( - kaldi_root, fst_dir, Path(cfg.lm_arpa), arpa_base - ) - - disambig_lexicon_file, disambig_L_in_units_file = create_lexicon( - cfg, fst_dir, unique_label, in_units_file, out_words_file - ) - - h_graph, h_out_units_file, disambig_in_units_file_int = create_H( - kaldi_root, - fst_dir, - disambig_L_in_units_file, - cfg.in_labels, - vocab, - cfg.blank_symbol, - cfg.silence_symbol, - ) - lexicon_graph = create_L( - kaldi_root, - fst_dir, - unique_label, - disambig_lexicon_file, - disambig_L_in_units_file, - out_words_file, - ) - lg_graph = create_LG( - kaldi_root, fst_dir, unique_label, lexicon_graph, grammar_graph - ) - hlga_graph = create_HLGa( - kaldi_root, fst_dir, unique_label, h_graph, lg_graph, disambig_in_units_file_int - ) - hlg_graph = create_HLG(kaldi_root, fst_dir, unique_label, hlga_graph) - - # for debugging - # hla_graph = create_HLa(kaldi_root, fst_dir, unique_label, h_graph, lexicon_graph, disambig_in_units_file_int) - # hl_graph = create_HLG(kaldi_root, fst_dir, unique_label, hla_graph, prefix="HL_looped") - # create_HLG(kaldi_root, fst_dir, "phnc", h_graph, prefix="H_looped") - - return hlg_graph - - -@hydra.main(config_path=config_path, config_name="kaldi_initializer") -def cli_main(cfg: KaldiInitializerConfig) -> None: - container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True) - cfg = OmegaConf.create(container) - OmegaConf.set_struct(cfg, True) - initalize_kaldi(cfg) - - -if __name__ == "__main__": - - logging.root.setLevel(logging.INFO) - logging.basicConfig(level=logging.INFO) - - try: - from hydra._internal.utils import ( - get_args, - ) # pylint: disable=import-outside-toplevel - - cfg_name = get_args().config_name or "kaldi_initializer" - except ImportError: - logger.warning("Failed to get config name from hydra args") - cfg_name = "kaldi_initializer" - - cs = ConfigStore.instance() - cs.store(name=cfg_name, node=KaldiInitializerConfig) - - cli_main() diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/model_parallel/__init__.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/model_parallel/__init__.py deleted file mode 100644 index 69f21684872f72ae8ee26d9ff7d2d2b6e6d526c3..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/model_parallel/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from . import criterions, models, modules # noqa diff --git a/spaces/stomexserde/gpt4-ui/Examples/Biblia Hebrea Transliterada Pdf Free __TOP__.md b/spaces/stomexserde/gpt4-ui/Examples/Biblia Hebrea Transliterada Pdf Free __TOP__.md deleted file mode 100644 index 41604e652376520ab971043c191c976273b82fd0..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Biblia Hebrea Transliterada Pdf Free __TOP__.md +++ /dev/null @@ -1,22 +0,0 @@ - -

        How to Download the Biblia Hebrea Transliterada Pdf for Free

        -

        The Biblia Hebrea Transliterada (BHT) is a transliteration of the Hebrew Bible into Latin characters, following the pronunciation rules of Modern Hebrew. It is a useful tool for students and scholars who want to read the original text of the Scriptures without knowing the Hebrew alphabet.

        -

        There are several sources online where you can download the BHT pdf for free. Here are some of them:

        -

        Biblia Hebrea Transliterada Pdf Free


        DOWNLOAD ->>->>->> https://urlgoal.com/2uI81E



        -
          -
        • TORAH-DE YHWH HEBREO-ESPAÑOL.pdf: This is a Google Drive link that contains the BHT of the Torah (the first five books of the Bible) along with a Spanish translation. You can view it online or download it to your device[^1^].
        • -
        • Biblia Hebraica Stuttgartensia BHS: This is an Internet Archive page that contains the Biblia Hebraica Stuttgartensia (BHS), which is a critical edition of the Hebrew Bible based on the oldest manuscripts available. The BHS includes the BHT as well as various textual notes and apparatuses. You can download it in different formats, such as pdf, epub, or mobi[^2^].
        • -
        • Biblia Hebrea Transliterada Pdf Free: This is a Sway page that contains a direct link to download the BHT pdf for free. The link is hosted by MediaFire, a file-sharing service. You just need to click on the green button that says "Download" and then follow the instructions[^3^].
        • -
        -

        These are some of the options you have to download the BHT pdf for free. However, you should always be careful when downloading files from unknown sources, as they may contain viruses or malware that can harm your device. You should also respect the copyright laws and use the BHT pdf for personal or educational purposes only.

        - -

        If you want to learn more about the BHT and how it was created, you can read some of the books and articles that explain its history and methodology. For example, you can check out these sources:

        -
          -
        • Transliteration of the Hebrew Bible into English Characters: This is a book by S. D. Luzzatto that provides a comprehensive guide to the rules and principles of transliterating Hebrew into Latin characters. It also includes a table of correspondences between the Hebrew and English alphabets and a list of common Hebrew names and terms transliterated into English.
        • -
        • The Transliteration of Hebrew in the Internet Age: This is an article by Randall Buth and R. Steven Notley that discusses the challenges and opportunities of transliterating Hebrew in the digital era. It also proposes a standard system of transliteration that is compatible with Unicode and can be used across different platforms and applications.
        • -
        • The Biblia Hebraica Transliterada (BHT): A Transliteration of the Hebrew Bible into Latin Characters: This is a dissertation by David Steinberg that describes the process and rationale behind the creation of the BHT. It also analyzes the advantages and disadvantages of using transliteration as a tool for studying and teaching the Hebrew Bible.
        • -
        -

        These are some of the resources you can consult if you want to deepen your knowledge of the BHT and its benefits for learning Hebrew. You can also practice your reading skills by comparing the BHT with the original Hebrew text and noticing the differences and similarities. By doing so, you will be able to appreciate the beauty and complexity of the Hebrew language and culture.

        -

        81aa517590
        -
        -
        \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Cnc Usb Controller Software Keygen 103.md b/spaces/stomexserde/gpt4-ui/Examples/Cnc Usb Controller Software Keygen 103.md deleted file mode 100644 index 397cf47de1a0836edc8fe900d9b38d53068466bc..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Cnc Usb Controller Software Keygen 103.md +++ /dev/null @@ -1,23 +0,0 @@ - -

        How to Activate CNC USB Controller Software with Keygen

        -

        CNC USB Controller software is a program that allows you to control your CNC machine with direct step/direction signals from your PC. It uses the USB port for fast and reliable communication with most modern computers and laptops. You can use it with various types of motor drivers, such as stepper or servo motors, and it does not require any additional software like Mach3.

        -

        Cnc Usb Controller Software Keygen 103


        Download Zip ✦✦✦ https://urlgoal.com/2uIcbL



        -

        However, to use the full features of CNC USB Controller software, you need to activate it with a license. A license is a file that contains your registration key and controller serial number. You can obtain a license by purchasing it from the official website of planet-cnc.com, the developer of CNC USB Controller software[^4^]. Alternatively, you can use a keygen to generate a license file for free.

        -

        A keygen is a software tool that can create valid license files for various programs. It usually works by cracking the algorithm that the program uses to verify the license. However, using a keygen may be illegal or unethical, as it violates the terms and conditions of the software developer. Therefore, we do not recommend or endorse using a keygen to activate CNC USB Controller software.

        -

        If you still want to use a keygen to activate CNC USB Controller software, you need to follow these steps:

        -
          -
        1. Download CNC USB Controller software from the official website[^1^] or from other sources[^2^]. Install it on your PC and run it.
        2. -
        3. Download a keygen for CNC USB Controller software from online sources[^3^]. Be careful of viruses or malware that may be hidden in the keygen file. Scan it with an antivirus software before opening it.
        4. -
        5. Open the keygen and enter your controller serial number. You can find it on the back of your MK controller board or on the label of your controller box. Click on "Generate" to create a license file.
        6. -
        7. Open CNC USB Controller software and go to "Help" menu. Select "License activation" and paste your registration key that you got from the keygen. Alternatively, you can import the license file that you saved from the keygen.
        8. -
        9. Click on "Activate" and wait for the confirmation message. You should see your license information on the bottom right corner of the software window.
        10. -
        -

        Congratulations! You have successfully activated CNC USB Controller software with a keygen. However, please note that this method may not work for future versions of the software or for different types of controllers. Also, you may face legal or ethical issues if you use a keygen to activate CNC USB Controller software without paying for it.

        - -

        Now that you have activated CNC USB Controller software with a keygen, you can use it to control your CNC machine. You can create your own G-code files or import them from other sources. You can also use the built-in editor to modify or create G-code commands. You can preview your toolpath and simulate your machining process before sending it to your controller.

        -

        -

        To use CNC USB Controller software, you need to connect your PC to your MK controller board with a USB cable. You also need to connect your motor drivers and power supply to your controller board. Make sure that your controller board is compatible with your motor drivers and that they are configured correctly. You can check the manual of your controller board and motor drivers for more details.

        -

        Once you have connected everything, you can open CNC USB Controller software and select your controller type from the "Settings" menu. You can also adjust other settings, such as units, feed rate, spindle speed, etc. You can then load your G-code file or create one with the editor. You can use the buttons on the software window to control your machine, such as jog, home, zero, start, stop, etc.

        -

        CNC USB Controller software is a powerful and easy-to-use program that can help you create amazing projects with your CNC machine. However, you should always be careful and responsible when using a CNC machine. Follow the safety instructions and precautions of your machine and controller. Wear protective equipment and avoid touching any moving parts. Always supervise your machine and stop it immediately if something goes wrong.

        e93f5a0c3f
        -
        -
        \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Do Aankhen Barah Haath In Hindi Dubbed 720p Torrent HOT.md b/spaces/stomexserde/gpt4-ui/Examples/Do Aankhen Barah Haath In Hindi Dubbed 720p Torrent HOT.md deleted file mode 100644 index e57643b4e3789a78388b801557bb231e3339cd98..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Do Aankhen Barah Haath In Hindi Dubbed 720p Torrent HOT.md +++ /dev/null @@ -1,30 +0,0 @@ - -

        Do Aankhen Barah Haath: A Classic Hindi Movie You Can Watch Online

        -

        Do you love old classic Hindi movies? If yes, then you might have heard of Do Aankhen Barah Haath, a 1957 film directed by and starring V. Shantaram. The film is considered one of the best examples of Indian cinema, and has won several national and international awards. But did you know that you can watch this movie online in Hindi dubbed 720p quality? Here are some reasons why you should watch Do Aankhen Barah Haath online.

        -

        What is Do Aankhen Barah Haath about?

        -

        Do Aankhen Barah Haath (meaning Two Eyes Twelve Hands) is a story of a young jail warden, Adinath, who decides to reform six dangerous prisoners by taking them to a dilapidated country farm. He makes them work hard and teaches them the values of honesty, dignity and compassion. He also falls in love with a beautiful woman named Champa, who lives nearby. The film shows how Adinath transforms the lives of the prisoners and himself through his vision and courage.

        -

        Do Aankhen Barah Haath in hindi dubbed 720p torrent


        DOWNLOAD ✦✦✦ https://urlgoal.com/2uIaSS



        -

        Why is Do Aankhen Barah Haath a classic?

        -

        Do Aankhen Barah Haath is not just a movie, but a masterpiece of art and social message. The film explores the themes of human dignity, redemption and non-violence in a powerful and poetic way. The film also showcases the brilliant performance of V. Shantaram, who plays the role of Adinath as well as directs the film. The film also features Sandhya, who plays Champa, and Baburao Pendharkar, who plays one of the prisoners. The film has a memorable soundtrack composed by Vasant Desai, with lyrics by Bharat Vyas. The film has won several accolades, such as the National Film Award for Best Feature Film, the Silver Bear at the Berlin International Film Festival, and a nomination for the Golden Globe Award for Best Foreign Film.

        -

        How can you watch Do Aankhen Barah Haath online?

        -

        If you want to watch Do Aankhen Barah Haath online in Hindi dubbed 720p quality, you have some options. You can watch it on YouTube[^1^] [^2^], where you can find the full movie uploaded by various channels. You can also watch it on ZEE5[^3^], where you can stream it legally and enjoy other features like subtitles, recommendations and offline viewing. You can also download it from torrent sites, but we do not recommend that as it is illegal and unsafe.

        -

        -

        Conclusion

        -

        Do Aankhen Barah Haath is a classic Hindi movie that you should not miss. It is a story of hope, love and humanity that will touch your heart and inspire you. You can watch it online in Hindi dubbed 720p quality from various sources, but we suggest you watch it on ZEE5 for the best experience. So what are you waiting for? Watch Do Aankhen Barah Haath online today and enjoy this timeless gem of Indian cinema.

        - -

        What are some interesting trivia about Do Aankhen Barah Haath?

        -

        Do Aankhen Barah Haath is not only a classic movie, but also a movie with some interesting trivia behind it. Here are some of them:

        -
          -
        • The movie is based on a true story that happened in British India in Swatantrapur, Maharashtra[^3^]. The real-life jail warden was called Morarji Desai, who later became the Prime Minister of India.
        • -
        • The movie was the first Indian film to win a Golden Globe Award under the Samuel Goldwyn category[^3^]. It also won a Silver Bear at the Berlin International Film Festival and a nomination for the Academy Award for Best Foreign Language Film.
        • -
        • The song "Ae Maalik Tere Bandhe Hum" sung by Lata Mangeshkar from this movie has been adopted as the anthem by a school in Pakistan[^2^]. The song is a prayer for peace and harmony among all living beings.
        • -
        • V. Shantaram injured his eye while fighting with a bull in one of the scenes of the movie[^2^]. He had to wear an eye patch for some time after that.
        • -
        • The movie inspired several other films with similar themes, such as Subhash Ghai's Karma (1986), which also featured six prisoners being reformed by a jail warden[^2^].
        • -
        -

        What are some reviews of Do Aankhen Barah Haath?

        -

        Do Aankhen Barah Haath has received mostly positive reviews from critics and audiences alike. Here are some of them:

        -
        "Do Aankhen Barah Haath is one of the finest films ever made in India. It is a film that transcends the boundaries of language, culture and time. It is a film that speaks to the universal human spirit and its capacity for goodness. V. Shantaram's direction, performance and vision are unparalleled in Indian cinema. The film is a must-watch for anyone who loves cinema." - Rajeev Masand, CNN-IBN
        -
        "Do Aankhen Barah Haath is a masterpiece of social realism and humanism. The film depicts the struggle of a jail warden who tries to reform six hardened criminals by making them work on a farm. The film shows how love, compassion and faith can transform even the most hopeless souls. The film also has some memorable songs by Vasant Desai and Bharat Vyas, especially the iconic "Ae Maalik Tere Bandhe Hum". The film is a landmark in Indian cinema and deserves to be seen by everyone." - Anupama Chopra, Film Companion
        -
        "Do Aankhen Barah Haath is a film that challenges the conventional notions of crime and punishment. The film explores the possibility of rehabilitation and redemption through non-violence and hard work. The film also showcases the brilliant acting and directing skills of V. Shantaram, who plays the role of Adinath with conviction and grace. The film is a classic that will remain relevant for generations to come." - Raja Sen, Rediff.com

        7b8c122e87
        -
        -
        \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Download Photoshop Cs6 With Serial Number Free.md b/spaces/stomexserde/gpt4-ui/Examples/Download Photoshop Cs6 With Serial Number Free.md deleted file mode 100644 index 1a503c72c40324d0ac6eb1b7d18e8317230cf42d..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Download Photoshop Cs6 With Serial Number Free.md +++ /dev/null @@ -1,30 +0,0 @@ - -

        How to Download Photoshop CS6 with Serial Number for Free

        -

        If you are looking for a way to download Photoshop CS6 with serial number for free, you might be disappointed to know that Adobe has stopped supporting this version of the software and it is not available on their official website. However, there are still some ways to get Photoshop CS6 legally and safely, without breaking any laws or risking your computer's security.

        -

        In this article, we will show you how to get Photoshop CS6 for free outright, as well as the dangers of using cracked or pirated versions of the software. We will also review some of the best free alternatives to Photoshop CS6 that you can use for your photo editing needs.

        -

        download photoshop cs6 with serial number free


        Download File »»» https://urlgoal.com/2uI80D



        - -

        How to Get Photoshop CS6 for Free Outright

        -

        One of the ways to get Photoshop CS6 for free outright is to buy a license version on eBay or Amazon from someone who purchased the boxed version of the software at the time of its release in 2012. This way, you will get an official program that is not supported by developers but does not have any bugs or viruses that you might encounter in pirate versions. The price of Photoshop CS6 on these platforms is usually around $720[^2^].

        -

        Another way to get Photoshop CS6 for free outright is to download it from a third-party website that offers free software downloads. However, this method is not recommended as it may expose your computer to malware, spyware, or ransomware that can harm your system or steal your personal information. Moreover, downloading Photoshop CS6 from an unauthorized source may violate Adobe's terms of service and copyright laws, which can result in legal consequences or penalties.

        - -

        The Dangers of Using Cracked or Pirated Versions of Photoshop CS6

        -

        Some people may be tempted to use cracked or pirated versions of Photoshop CS6 that claim to offer the full functionality of the software without paying anything. However, these versions are illegal and unsafe, as they may contain malicious code that can damage your computer or compromise your security. Some of the risks of using cracked or pirated versions of Photoshop CS6 are:

        -

        -
          -
        • They may not work properly or crash frequently, causing you to lose your work or waste your time.
        • -
        • They may have missing features or functions that limit your creative possibilities or affect the quality of your output.
        • -
        • They may infect your computer with viruses, worms, trojans, or other malware that can slow down your system, corrupt your files, or steal your data.
        • -
        • They may expose your computer to hackers or cybercriminals who can access your personal information, such as your passwords, bank accounts, credit cards, or identity.
        • -
        • They may trigger Adobe's anti-piracy measures, such as activation errors, license revocation, or legal action.
        • -
        -

        Therefore, it is better to avoid using cracked or pirated versions of Photoshop CS6 and opt for legal and safe ways to get the software.

        - -

        The Best Free Alternatives to Photoshop CS6

        -

        If you do not want to spend money on Photoshop CS6 or risk using illegal versions of the software, you can try some of the best free alternatives to Photoshop CS6 that offer similar features and functions for photo editing. Some of these alternatives are:

        -
          -
        • GIMP: GIMP is a free and open-source image editor that can perform many tasks that Photoshop can do, such as cropping, resizing, retouching, color correction, filters, layers, masks, and more. GIMP also supports various file formats, including PSD files. GIMP has a customizable interface and a large community of users and developers who provide tutorials and plugins.
        • -
        • Paint.NET: Paint.NET is a free and easy-to-use image editor that can handle basic and advanced photo editing tasks. Paint.NET has a simple and intuitive interface that resembles Microsoft Paint but with more features and tools. Paint.NET supports layers, effects, adjustments, plugins, and unlimited undo history. Paint.NET can also open and save PSD files with some limitations.
        • -
        • Pixlr: Pixlr is a free online image editor that works in your browser without any installation or registration. Pixlr has two versions: Pixlr X and Pixlr E. Pixlr X is a simple and fast editor that can

          cec2833e83
          -
          -
          \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/FS2004 - PSS B777 Professional 2004 Free NEW Download.md b/spaces/stomexserde/gpt4-ui/Examples/FS2004 - PSS B777 Professional 2004 Free NEW Download.md deleted file mode 100644 index cad448b0cf5185407d7d9bd380e352cdc678c093..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/FS2004 - PSS B777 Professional 2004 Free NEW Download.md +++ /dev/null @@ -1,95 +0,0 @@ -
          -

          FS2004 - PSS B777 Professional 2004: A Comprehensive Guide

          -

          If you are a fan of flight simulation games, you probably know that Microsoft Flight Simulator 2004 (FS2004) is one of the most popular and realistic simulators ever created. However, even with its impressive features and graphics, FS2004 can be enhanced with various add-ons that provide more aircraft models, scenery, weather, airports, and more. One of these add-ons is FS2004 - PSS B777 Professional 2004, a flight simulation add-on that features the Boeing 777 aircraft for FS2004.

          -

          The Boeing 777 is one of the world's most advanced and widely used wide-body airliners. It has a range of up to 9,700 nautical miles (17,960 km) and can carry up to 550 passengers in a single-class configuration. It is also the first airliner to be designed entirely using computer graphics. The Boeing 777 has been in service since 1995 and has been operated by many airlines around the world.

          -

          FS2004 - PSS B777 Professional 2004 free download


          Download File - https://urlgoal.com/2uIb8n



          -

          FS2004 - PSS B777 Professional 2004 is a flight simulation add-on that recreates the Boeing 777 aircraft in FS2004 with amazing detail, accuracy, and realism. It was developed by Phoenix Simulation Software (PSS), a renowned developer of high-quality airliner simulations. It was originally released in 2005 and has been updated several times since then. It is available for purchase and download from the official website or other sources.

          -

          In this article, we will provide you with a comprehensive guide on how to install, use, and enjoy FS2004 - PSS B777 Professional 2004. We will cover the main features and benefits of this add-on, such as realistic flight model, weather radar, virtual cockpit, liveries, etc. We will also show you how to start and fly the Boeing 777 aircraft in FS2004 using this add-on. We will also compare this add-on with other Boeing 777 add-ons for FS2004 or other flight simulators. By the end of this article, you will have a better understanding of what FS2004 - PSS B777 Professional 2004 can offer you and why it is a must-have for any flight simulation enthusiast.

          -

          Installation

          -

          Before you can use FS2004 - PSS B777 Professional 2004, you need to install it on your computer. To do so, you need to follow these steps:

          -
            -
          1. Purchase and download FS2004 - PSS B777 Professional 2004 from the official website or other sources. The file size is about 61 MB.
          2. -
          3. Extract the downloaded file using a program like WinZip or WinRAR. You will get a folder named "PSS-B772" that contains several subfolders and files.
          4. -
          5. Copy the folder "PSS-B772" to your

            FS2004 main folder. This is usually located at C:\Program Files\Microsoft Games\Flight Simulator 9. If you have installed FS2004 in a different location, you need to copy the folder there.

          6. -
          7. Run the file "PSS-B772.exe" in the folder "PSS-B772". This will launch the installer that will guide you through the installation process. You need to accept the license agreement, choose the destination folder, and select the components you want to install. You can choose to install the Boeing 777-200ER, the Boeing 777-300, or both. You can also choose to install the liveries (paint schemes) of various airlines that operate the Boeing 777 aircraft.
          8. -
          9. After the installation is complete, you will see a confirmation message that says "Installation Successful". You can close the installer and launch FS2004.
          10. -
          -

          To ensure compatibility with your FS2004 version and other add-ons, you need to do the following:

          -
            -
          • Make sure that you have installed the latest FS2004 update (version 9.1). You can download it from here.
          • -
          • Make sure that you have installed the latest PSS B777 Professional 2004 update (version 1.3). You can download it from here.
          • -
          • Make sure that you have installed the latest FSUIPC module (version 3.999z9b). This is a freeware utility that enhances the interface between FS2004 and other add-ons. You can download it from here.
          • -
          -

          To use the load manager and configure the aircraft settings and preferences, you need to do the following:

          -
            -
          • Run the file "LoadManager.exe" in the folder "PSS-B772". This will launch the load manager that allows you to adjust the fuel, payload, and center of gravity of your aircraft. You can also save and load different configurations for different flights.
          • -
          • Run the file "ConfigManager.exe" in the folder "PSS-B772". This will launch the config manager that allows you to customize various options of your aircraft, such as panel layout, sound volume, display units, etc. You can also enable or disable certain features, such as weather radar, TCAS, GPWS, etc.
          • -
          -

          To troubleshoot common installation issues and errors, such as registry problems, missing files, etc., you can refer to the following sources:

          -

          -
            -
          • The user manual that comes with the add-on. You can find it in PDF format in the folder "PSS-B772\Documentation". It contains detailed information on how to install, use, and enjoy this add-on.
          • -
          • The support forum that is hosted by PSS. You can find it here. It contains many topics and posts from other users who have encountered similar problems or questions. You can also post your own queries and get help from PSS staff or other users.
          • -
          • The FAQ section that is available on the official website. You can find it here. It contains answers to some of the most frequently asked questions about this add-on.
          • -
          -

          Review

          -

          Now that you have installed FS2004 - PSS B777 Professional 2004 on your computer, you are ready to start and fly the Boeing 777 aircraft in FS2004 using this add-on. In this section, we will review some of the main aspects and features of this add-on, such as how to use the instrument panel, gauges, FMC, autopilot, and other systems in the cockpit; how to enjoy the visual and sound effects of this add-on, such as animations, textures, lighting, engine noise, etc.; and how to compare this add-on with other Boeing 777 add-ons for FS2004 or other flight simulators.

          -

          How to use the instrument panel, gauges, FMC, autopilot, and other systems in the cockpit

          -

          One of the most impressive and realistic features of FS2004 - PSS B777 Professional 2004 is the instrument panel, gauges, FMC, autopilot, and other systems in the cockpit. These are designed to simulate the actual Boeing 777 cockpit as closely as possible, with accurate and functional displays, switches, buttons, knobs, levers, etc. You can interact with these elements using your mouse or keyboard, and they will respond accordingly. You can also customize the panel layout and view angle to suit your preferences.

          -

          The instrument panel consists of six main displays: two Primary Flight Displays (PFDs), two Navigation Displays (NDs), one Engine Indication and Crew Alerting System (EICAS), and one Flight Management Computer (FMC). The PFDs show you the basic flight information, such as airspeed, altitude, attitude, heading, vertical speed, etc. The NDs show you the navigation information, such as route, waypoints, map, weather radar, traffic, etc. The EICAS shows you the engine and system information, such as fuel, hydraulics, electrics, etc. The FMC is the brain of the aircraft, where you can enter and modify the flight plan, performance data, settings, etc.

          -

          The gauges are located below the displays and show you more detailed information about the aircraft systems and status. They include the standby instruments (airspeed indicator, altimeter, attitude indicator), the fuel gauges (total fuel quantity, fuel flow), the engine gauges (N1 fan speed, EGT exhaust gas temperature), the brake temperature gauge, the flap position indicator, etc.

          -

          The FMC is a complex and powerful device that allows you to manage and control the flight of the Boeing 777 aircraft. It has a keypad and a screen where you can enter and view various data and commands. You can access different pages and menus by pressing the buttons on the sides of the screen. You can also use the FMC to interact with other systems in the cockpit, such as the autopilot, the weather radar, the TCAS (Traffic Collision Avoidance System), etc.

          -

          The autopilot is a system that can automatically fly the aircraft according to the parameters and modes that you set. It has a control panel on the glareshield where you can select and adjust various functions and settings. You can use the autopilot to control the speed, altitude, heading, vertical speed, flight level change, approach mode, etc. You can also use the autopilot to follow the flight plan that you have entered in the FMC.

          -

          Other systems in the cockpit include the communication and navigation radios (COM1/2, NAV1/2, ADF1/2, DME1/2, ATC, etc.), the audio panel (where you can select and adjust the volume of the radios and other sounds), the overhead panel (where you can control the electrical, hydraulic, pneumatic, fuel, fire, lighting, and other systems), the pedestal (where you can control the throttle, spoilers, flaps, landing gear, parking brake, etc.), and the yoke and rudder pedals (where you can manually fly the aircraft).

          -

          How to enjoy the visual and sound effects of this add-on, such as animations, textures, lighting, engine noise, etc.

          -

          Another remarkable and realistic feature of FS2004 - PSS B777 Professional 2004 is the visual and sound effects of this add-on, such as animations, textures, lighting, engine noise, etc. These are designed to simulate the actual Boeing 777 aircraft as closely as possible, with high-quality and immersive graphics and sounds. You can enjoy these effects both inside and outside the cockpit.

          -

          The animations include the moving parts of the aircraft, such as the landing gear, flaps, spoilers, ailerons, elevators, rudder, thrust reversers, etc. They also include the opening and closing of the doors and cargo bays, the extending and retracting of the stairs and jetways, the loading and unloading of passengers and baggage, etc. You can also see the smoke and contrails from the engines, the wing flexing due to turbulence or load factor, the reflection of sunlight on the windows and fuselage, etc.

          -

          The textures include the high-resolution and detailed images that cover the exterior and interior of the aircraft. They also include the liveries of various airlines that operate the Boeing 777 aircraft. You can choose from over 50 liveries that are included in this add-on or download more from here. You can also create your own liveries using a paint kit that is available here.

          -

          The lighting includes the realistic and dynamic effects that illuminate the aircraft and its surroundings. They include the landing lights, taxi lights, strobe lights, beacon lights, navigation lights, logo lights, etc. They also include the cockpit lighting, such as the panel backlighting, flood lighting, dome lighting, etc. You can also see the shadows and reflections of the aircraft and other objects on the ground and in the sky.

          -

          The engine noise includes the realistic and immersive sounds that emanate from the engines and other parts of the aircraft. They include the spooling up and down of the engines, the thrust reversers, the wind noise, the gear and flap noise, etc. They also include the cockpit sounds, such as the voice alerts, warnings, announcements, etc. You can also hear the ambient sounds, such as the airport traffic, ATC communications, etc.

          -

          How to compare this add-on with other Boeing 777 add-ons for FS2004 or other flight simulators

          -

          FS2004 - PSS B777 Professional 2004 is not the only Boeing 777 add-on for FS2004 or other flight simulators. There are other add-ons that also feature this aircraft model with different levels of quality and realism. Some of these add-ons are:

          -
            -
          • FS2004 - Wilco Boeing 777. This is another flight simulation add-on that features the Boeing 777 aircraft for FS2004. It was developed by Wilco Publishing and Abacus Software. It was released in 2006 and has been updated several times since then. It is available for purchase and download from here.
          • -
          • FSX - PMDG Boeing 777. This is a flight simulation add-on that features the Boeing 777 aircraft for Microsoft Flight Simulator X (FSX). It was developed by Precision Manuals Development Group (PMDG), a leading developer of high-fidelity airliner simulations. It was released in 2013 and has been updated several times since then. It is available for purchase and download from here.
          • -
          • X-Plane - FlightFactor Boeing 777. This is a flight simulation add-on that features the Boeing 777 aircraft for X-Plane, a realistic and advanced flight simulator. It was developed by FlightFactor, a team of experienced flight simulation developers. It was released in 2015 and has been updated several times since then. It is available for purchase and download from here.
          • -
          -

          To compare FS2004 - PSS B777 Professional 2004 with these other add-ons, we can use some criteria such as:

          - - - - - - - - - -
          CriterionFS2004 - PSS B777 Professional 2004FS2004 - Wilco Boeing 777FSX - PMDG Boeing 777X-Plane - FlightFactor Boeing 777
          Price$29.99 USD$29.95 USD$89.99 USD$59.95 USD
          VariantsB777-200ER and B777-300B777-200ER, B777-300ER, B777-Freighter, B777-LR WorldlinerB777-200LR/F and B777-300ERB777-200LR/F, B777-300ER, B777-Extended Pack (B777-200ER)
          LiveriesOver 50 included or downloadableOver 40 included or downloadableOver 20 included or downloadableOver 10 included or downloadable
          Cockpit realismHighly realistic and functional with accurate displays, gauges, FMC, autopilot, etc.Realistic but simplified with some displays, gauges, FMC, autopilot, etc.Extremely realistic and functional with accurate displays, gauges, FMC, autopilot, etc.Very realistic and functional with accurate displays, gauges, FMC, autopilot, etc.
          Visual realismHighly realistic and detailed with high-resolution textures, animations, lighting, etc.Realistic and detailed with high-resolution textures, animations, lighting, etc.Extremely realistic and detailed with high-resolution textures, animations, lighting, etc.Very realistic and detailed with high-resolution textures, animations, lighting, etc.
          Sound realismHighly realistic and immersive with engine noise, cockpit sounds, ambient sounds, etc.Realistic and immersive with engine noise, cockpit sounds, ambient sounds, etc.Extremely realistic and immersive with engine noise, cockpit sounds, ambient sounds, etc.Very realistic and immersive with engine noise, cockpit sounds, ambient sounds, etc.
          User feedbackMostly positive with some minor complaints or suggestionsMixed with some positive and some negative comments or criticismsMostly positive with some minor complaints or suggestionsMostly positive with some minor complaints or suggestions
          -

          As you can see from the table above, FS2004 - PSS B777 Professional 2004 is a very competitive and high-quality Boeing 777 add-on for FS2004. It offers a great balance between realism and performance, and it has a reasonable price and a large selection of liveries. It is not as advanced or comprehensive as some of the newer add-ons for FSX or X-Plane, but it is still one of the best options for FS2004 users who want to fly the Boeing 777 aircraft.

          -

          Conclusion

          -

          In conclusion, FS2004 - PSS B777 Professional 2004 is a flight simulation add-on that features the Boeing 777 aircraft for FS2004. It is a popular and well-made add-on that provides a realistic and enjoyable flight simulation experience. It has many features and benefits that make it worth buying and using, such as:

          -
            -
          • A highly realistic and functional cockpit with accurate displays, gauges, FMC, autopilot, etc.
          • -
          • A highly realistic and detailed visual and sound effects with high-resolution textures, animations, lighting, engine noise, etc.
          • -
          • A large selection of liveries of various airlines that operate the Boeing 777 aircraft.
          • -
          • A user-friendly and customizable load manager and config manager that allow you to adjust the fuel, payload, center of gravity, panel layout, sound volume, display units, etc.
          • -
          • A comprehensive and helpful user manual, support forum, and FAQ section that provide you with detailed information and guidance on how to install, use, and enjoy this add-on.
          • -
          -

          If you are looking for a Boeing 777 add-on for FS2004 that offers a realistic and enjoyable flight simulation experience, FS2004 - PSS B777 Professional 2004 is a great choice. It is not as advanced or comprehensive as some of the newer add-ons for FSX or X-Plane, but it is still one of the best options for FS2004 users who want to fly the Boeing 777 aircraft.

          -

          We hope that this article has given you a comprehensive guide on how to install, use, and enjoy FS2004 - PSS B777 Professional 2004. We also hope that you have learned some interesting facts and features about this add-on and the Boeing 777 aircraft. We invite you to share your feedback and comments on this add-on or ask any questions you might have. We would love to hear from you and help you with your flight simulation needs.

          -

          FAQs

          -

          Here are some of the most frequently asked questions about FS2004 - PSS B777 Professional 2004:

          -
            -
          1. Q: How can I get FS2004 - PSS B777 Professional 2004?
          2. -
          3. A: You can purchase and download FS2004 - PSS B777 Professional 2004 from the official website or other sources. The file size is about 61 MB. You can also find more information and reviews about this add-on on various flight simulation websites and forums.
          4. -
          5. Q: How can I install FS2004 - PSS B777 Professional 2004?
          6. -
          7. A: You need to extract the downloaded file using a program like WinZip or WinRAR. You will get a folder named "PSS-B772" that contains several subfolders and files. You need to copy the folder "PSS-B772" to your FS2004 main folder. You need to run the file "PSS-B772.exe" in the folder "PSS-B772". This will launch the installer that will guide you through the installation process. You need to accept the license agreement, choose the destination folder, and select the components you want to install. You can choose to install the Boeing 777-200ER, the Boeing 777-300, or both. You can also choose to install the liveries of various airlines that operate the Boeing 777 aircraft.
          8. -
          9. Q: How can I use FS2004 - PSS B777 Professional 2004?
          10. -
          11. A: You need to launch FS2004 and select the Boeing 777 aircraft from the aircraft menu. You can choose from different variants and liveries. You can also use the load manager and config manager to adjust the fuel, payload, center of gravity, panel layout, sound volume, display units, etc. You can start and fly the Boeing 777 aircraft in FS2004 using this add-on. You can use the instrument panel, gauges, FMC, autopilot, and other systems in the cockpit. You can also enjoy the visual and sound effects of this add-on, such as animations, textures, lighting, engine noise, etc.
          12. -
          13. Q: How can I compare FS2004 - PSS B777 Professional 2004 with other Boeing 777 add-ons for FS2004 or other flight simulators?
          14. -
          15. A: You can use some criteria such as price, variants, liveries, cockpit realism, visual realism, sound realism, and user feedback to compare FS2004 - PSS B777 Professional 2004 with other Boeing 777 add-ons for FS2004 or other flight simulators. You can also read some reviews and opinions from other users who have tried different add-ons and share your own experience and preferences.
          16. -
          17. Q: How can I get more help and support for FS2004 - PSS B777 Professional 2004?
          18. -
          19. A: You can refer to the user manual that comes with the add-on. You can find it in PDF format in the folder "PSS-B772\Documentation". You can also visit the support forum that is hosted by PSS. You can find it here. You can also check the FAQ section that is available on the official website. You can find it here. You can also contact PSS by email or phone if you have any questions or issues that are not covered by these sources.
          20. -

          b2dd77e56b
          -
          -
          \ No newline at end of file diff --git a/spaces/subhc/Guess-What-Moves/mask_former/modeling/transformer/transformer_predictor.py b/spaces/subhc/Guess-What-Moves/mask_former/modeling/transformer/transformer_predictor.py deleted file mode 100644 index 465d8bbcbc41245a6152aefc33f251a4c288146f..0000000000000000000000000000000000000000 --- a/spaces/subhc/Guess-What-Moves/mask_former/modeling/transformer/transformer_predictor.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/detr.py -import fvcore.nn.weight_init as weight_init -import torch -from torch import nn -from torch.nn import functional as F - -from detectron2.config import configurable -from detectron2.layers import Conv2d - -from .position_encoding import PositionEmbeddingSine -from .transformer import Transformer - - -class TransformerPredictor(nn.Module): - @configurable - def __init__( - self, - in_channels, - mask_classification=True, - *, - num_classes: int, - hidden_dim: int, - num_queries: int, - nheads: int, - dropout: float, - dim_feedforward: int, - enc_layers: int, - dec_layers: int, - pre_norm: bool, - deep_supervision: bool, - mask_dim: int, - enforce_input_project: bool, - ): - """ - NOTE: this interface is experimental. - Args: - in_channels: channels of the input features - mask_classification: whether to add mask classifier or not - num_classes: number of classes - hidden_dim: Transformer feature dimension - num_queries: number of queries - nheads: number of heads - dropout: dropout in Transformer - dim_feedforward: feature dimension in feedforward network - enc_layers: number of Transformer encoder layers - dec_layers: number of Transformer decoder layers - pre_norm: whether to use pre-LayerNorm or not - deep_supervision: whether to add supervision to every decoder layers - mask_dim: mask feature dimension - enforce_input_project: add input project 1x1 conv evens if input - channels and hidden dim is identical - """ - super().__init__() - - # self.mask_classification = mask_classification - self.mask_classification = False - - # positional encoding - N_steps = hidden_dim // 2 - self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True) - - transformer = Transformer( - d_model=hidden_dim, - dropout=dropout, - nhead=nheads, - dim_feedforward=dim_feedforward, - num_encoder_layers=enc_layers, - num_decoder_layers=dec_layers, - normalize_before=pre_norm, - return_intermediate_dec=deep_supervision, - ) - - self.num_queries = num_queries - self.transformer = transformer - hidden_dim = transformer.d_model - - self.query_embed = nn.Embedding(num_queries, hidden_dim) - - if in_channels != hidden_dim or enforce_input_project: - self.input_proj = Conv2d(in_channels, hidden_dim, kernel_size=1) - weight_init.c2_xavier_fill(self.input_proj) - else: - self.input_proj = nn.Sequential() - self.aux_loss = deep_supervision - - # output FFNs - if self.mask_classification: - self.class_embed = nn.Linear(hidden_dim, num_classes + 1) - self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3) - - if self.num_queries != 2: - self.mask_querie_comb = nn.Sequential( - Conv2d(self.num_queries, self.num_queries, kernel_size=1, activation=F.relu), - Conv2d(self.num_queries, 2, kernel_size=5, activation=F.relu) - ) - - @classmethod - def from_config(cls, cfg, in_channels, mask_classification): - ret = {} - ret["in_channels"] = in_channels - ret["mask_classification"] = mask_classification - - ret["num_classes"] = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES - ret["hidden_dim"] = cfg.MODEL.MASK_FORMER.HIDDEN_DIM - ret["num_queries"] = cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES - # Transformer parameters: - ret["nheads"] = cfg.MODEL.MASK_FORMER.NHEADS - ret["dropout"] = cfg.MODEL.MASK_FORMER.DROPOUT - ret["dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD - ret["enc_layers"] = cfg.MODEL.MASK_FORMER.ENC_LAYERS - ret["dec_layers"] = cfg.MODEL.MASK_FORMER.DEC_LAYERS - ret["pre_norm"] = cfg.MODEL.MASK_FORMER.PRE_NORM - ret["deep_supervision"] = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION - ret["enforce_input_project"] = cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ - - ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM - - return ret - - def forward(self, x, mask_features): - pos = self.pe_layer(x) - - src = x - mask = None - hs, memory = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos) - - if self.mask_classification: - outputs_class = self.class_embed(hs) - out = {"pred_logits": outputs_class[-1]} - else: - out = {} - - if self.aux_loss: - # [l, bs, queries, embed] - mask_embed = self.mask_embed(hs) - outputs_seg_masks = torch.einsum("lbqc,bchw->lbqhw", mask_embed, mask_features) - pred_masks = outputs_seg_masks[-1] - if self.num_queries > 2: - pred_masks = self.mask_querie_comb(pred_masks) - out["pred_masks"] = pred_masks - out["aux_outputs"] = self._set_aux_loss( - outputs_class if self.mask_classification else None, outputs_seg_masks - ) - else: - # FIXME h_boxes takes the last one computed, keep this in mind - # [bs, queries, embed] - mask_embed = self.mask_embed(hs[-1]) - outputs_seg_masks = torch.einsum("bqc,bchw->bqhw", mask_embed, mask_features) - out["pred_masks"] = outputs_seg_masks - return out - - @torch.jit.unused - def _set_aux_loss(self, outputs_class, outputs_seg_masks): - # this is a workaround to make torchscript happy, as torchscript - # doesn't support dictionary with non-homogeneous values, such - # as a dict having both a Tensor and a list. - if self.mask_classification: - return [ - {"pred_logits": a, "pred_masks": b} - for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1]) - ] - else: - return [{"pred_masks": b} for b in outputs_seg_masks[:-1]] - - -class MLP(nn.Module): - """Very simple multi-layer perceptron (also called FFN)""" - - def __init__(self, input_dim, hidden_dim, output_dim, num_layers): - super().__init__() - self.num_layers = num_layers - h = [hidden_dim] * (num_layers - 1) - self.layers = nn.ModuleList( - nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) - ) - - def forward(self, x): - for i, layer in enumerate(self.layers): - x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) - return x diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Autodesk Shape Modeling Plugin For Rhino 5 Crack.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Autodesk Shape Modeling Plugin For Rhino 5 Crack.md deleted file mode 100644 index ca065b9042e73d3c8eb284272f6b22f29b5c8463..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Autodesk Shape Modeling Plugin For Rhino 5 Crack.md +++ /dev/null @@ -1,73 +0,0 @@ - -

          Autodesk Shape Modeling Plugin For Rhino 5 Crack: A Review

          - -

          If you are looking for a powerful tool to create and modify Class A shapes in Rhino 5, you might want to check out the Autodesk Shape Modeling Plugin. This plugin is fully integrated in the 3D graphics of Rhino 5 (64-bit version) and offers a range of functionality to enhance your surface design and analysis workflow. In this article, we will review some of the features and benefits of this plugin, as well as how to install and activate it using a crack.

          -

          Autodesk Shape Modeling Plugin For Rhino 5 Crack


          Download Zip --->>> https://cinurl.com/2uEXui



          - -

          What is the Autodesk Shape Modeling Plugin For Rhino 5?

          - -

          The Autodesk Shape Modeling Plugin For Rhino 5 is a plugin that extends the capabilities of Rhino 5 for creating and modifying high-quality surfaces. It is based on the Virtual Shape technology developed by Autodesk, which is also used in other Autodesk products such as Alias and Fusion 360. The plugin allows you to easily create and edit Class A shapes, which are smooth and continuous surfaces that meet high standards of quality and aesthetics.

          - -

          What are the features and benefits of the Autodesk Shape Modeling Plugin For Rhino 5?

          - -

          Some of the features and benefits of the Autodesk Shape Modeling Plugin For Rhino 5 are:

          - -
            -
          • Simple and fast control point modeling with numerous additional options, such as extrapolation, law curves, and smooth handles.
          • -
          • Curve and surface matching functionality up to G3 ("Flow"), which ensures smooth transitions between surfaces with different curvature.
          • -
          • Blend and Multiblend functionality up to G3 ("Flow"), which allows you to create smooth blends between multiple surfaces with different shapes and directions.
          • -
          • Graphical handles for easy modification of the created geometry, such as moving, scaling, rotating, and twisting.
          • -
          • Integrated analyses in almost all functions to evaluate the resulting geometry, such as deviation analysis, surface matching analysis, global matching analysis, sections, distance analysis, and draft angle analysis.
          • -
          • Quick user-controlled surface creation on polygon meshes, which enables you to convert mesh data into smooth NURBS surfaces.
          • -
          • Curve sketching functionality, which allows you to draw freeform curves on any surface or plane.
          • -
          • Curve and surface approximation to simplify the geometry, which reduces the number of control points and knots while maintaining the shape quality.
          • -
          • All analyses can be saved across sessions in the Rhino-3DM-file, which preserves your work history and allows you to review or modify it later.
          • -
          - -

          How to install and activate the Autodesk Shape Modeling Plugin For Rhino 5 using a crack?

          - -

          If you want to try out the Autodesk Shape Modeling Plugin For Rhino 5 without paying for it, you can use a crack to bypass the license verification process. However, be aware that using a crack may be illegal and risky, as it may contain viruses or malware that can harm your computer or compromise your data. Therefore, we do not recommend or endorse using a crack for this plugin. Use it at your own risk and responsibility.

          - -

          That being said, here are the steps to install and activate the Autodesk Shape Modeling Plugin For Rhino 5 using a crack:

          - -
            -
          1. Download the plugin installer from one of the links provided in the search results above. Make sure you download the correct version for your operating system (Windows 64-bit).
          2. -
          3. Extract the downloaded file using a program like WinRAR or 7-Zip. You should get a folder named "ADShaModPlugRhino2014".
          4. -
          5. Run the setup.exe file inside the folder and follow the instructions to install the plugin on your computer. You may need to restart your computer after the installation.
          6. -
          7. Download the crack file from one of the links provided in the search results above. Make sure you download the correct version for your plugin (2014 x64).
          8. -
          9. Extract the crack file using a program like WinRAR or 7-Zip. You should get a file named "ShapeModeling.dll".
          10. -
          11. Copy the crack file and paste it into the folder where you installed the plugin. By default, this should be "C:\Program Files\Autodesk\ShapeModeling\2014". Replace the original file when prompted.
          12. -
          13. Launch Rhino 5 and go to Tools > Options > Plug-ins. Find the Autodesk Shape Modeling Plug-in for Rhino 5 in the list and make sure it is enabled.
          14. -
          15. You should now be able to use the plugin without any license restrictions. Enjoy!
          16. -
          - -

          Conclusion

          - -

          The Autodesk Shape Modeling Plugin For Rhino 5 is a great plugin for anyone who wants to create and modify Class A shapes in Rhino 5. It offers a range of functionality that enhances your surface design and analysis workflow. However, if you want to use it legally, you will need to purchase a license from Autodesk or an authorized reseller. Alternatively, you can use a crack to activate it for free, but this may be illegal and risky. Therefore, we advise you to use caution and discretion when using a crack for this plugin.

          -

          - -

          We hope this article has been helpful for you. If you have any questions or comments, feel free to leave them below. Thank you for reading!

          -

          What are the system requirements for the Autodesk Shape Modeling Plugin For Rhino 5?

          - -

          The Autodesk Shape Modeling Plugin For Rhino 5 is compatible with Windows 7, 8, and 10 (64-bit versions only). It requires Rhino 5 (64-bit version only) and a minimum of 4 GB of RAM. It also requires a graphics card that supports OpenGL 2.0 or higher. The plugin does not support Mac OS X or Linux operating systems.

          - -

          What are the alternatives to the Autodesk Shape Modeling Plugin For Rhino 5?

          - -

          If you are looking for other plugins that can help you create and modify Class A shapes in Rhino 5, you may want to consider the following alternatives:

          - -
            -
          • T-Splines: This plugin allows you to create and edit organic and freeform shapes using subdivision surfaces. It also supports conversion between NURBS and subdivision surfaces.
          • -
          • XNurbs: This plugin allows you to create and edit complex and smooth surfaces using NURBS technology. It also supports advanced surface blending and matching functions.
          • -
          • VSR Shape Modeling: This plugin allows you to create and edit Class A shapes using NURBS technology. It also supports advanced surface analysis and optimization functions.
          • -
          - -

          What are the advantages and disadvantages of using a crack for the Autodesk Shape Modeling Plugin For Rhino 5?

          - -

          Using a crack for the Autodesk Shape Modeling Plugin For Rhino 5 may seem tempting, as it allows you to use the plugin for free without paying for a license. However, there are also some drawbacks and risks associated with using a crack. Here are some of the advantages and disadvantages of using a crack for this plugin:

          - -
            -
          • Advantages: You can save money by not buying a license. You can access all the features and functions of the plugin without any restrictions. You can use the plugin on any computer without needing an internet connection.
          • -
          • Disadvantages: You may violate the intellectual property rights of Autodesk or other parties. You may expose your computer or data to viruses or malware that may be hidden in the crack file. You may experience errors or bugs that may affect the performance or quality of the plugin. You may not receive any updates or support from Autodesk or other sources.
          • -

          3cee63e6c2
          -
          -
          \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Exelis ENVI 5.0 License Crack ((EXCLUSIVE)).md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Exelis ENVI 5.0 License Crack ((EXCLUSIVE)).md deleted file mode 100644 index da3377a7b730c78ef43030c55ba73201a5cdad12..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Exelis ENVI 5.0 License Crack ((EXCLUSIVE)).md +++ /dev/null @@ -1,54 +0,0 @@ -

          Exelis ENVI 5.0 license crack


          Download ⚹⚹⚹ https://cinurl.com/2uEYw9



          -
          -Please see the "Licensing Guide" for more information. - -By default, the License server will report to the License server a particular usage based on memory thresholds. This functionality may be disabled in the License server configuration by selecting the "Usage Thresholds" tab. - -In addition, license reports can be configured to include additional information by selecting the "Custom Reports" tab. - -License Server Agent - -The License Server Agent is a component that runs in the background and provides the essential functionalities to keep the License server up-to-date, perform additional diagnostic tests, and download the latest collection of license files. - -The License Server Agent (LSA) is installed along with the License server. - -Note: Only one LSA process can be installed at a time on any machine (unless you have multiple licenses). - -License Server Verifier - -The License Server Verifier (LSV) is a component that runs in the background and verifies that a copy of the license files is installed in a local folder on the License server, and reports any license file that is missing. - -The LSV is installed along with the License server. - -License Server Agent (LSA) Verifier - -The License Server Agent (LSA) Verifier (LSV) is a component that runs in the background and verifies that the License Server Agent (LSA) is installed, and reports any missing components. - -Configuration - -The License server is a node in the license server network, and this network is part of the same network as the Exelis Real Application Clusters (REAC).  - -Exelis provides a free License server configuration wizard that can be used to set up the License server. This wizard is not available on Windows. - -Licensing Configuration page - -The Licensing Configuration page provides a user interface to the License server.  - -Usage reports - -The Usage reports page is used to view all the key usage reports that are configured on the License server. The reports are listed on this page in the order of the scheduled recurrence time. - -Licensing reports - -The Licensing reports page is used to view the selected reports that are configured on the License server.  - -Available System Actions - -The Available System Actions page provides an overview of the system actions available to users and administrators. - -System action items - -The System Action Items page provides 4fefd39f24
          -
          -
          -

          diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Xcom Ew Console Commands.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Xcom Ew Console Commands.md deleted file mode 100644 index bcfdfc374283c9de49a6e35b203accca7ae6bc0c..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Xcom Ew Console Commands.md +++ /dev/null @@ -1,23 +0,0 @@ - -

          How to Use Console Commands on Xcom Enemy Within

          -

          Xcom Enemy Within is an expansion pack for Xcom Enemy Unknown, a turn-based strategy game where you command a squad of soldiers against an alien invasion. Sometimes, you may encounter bugs, glitches, or difficulties that make the game frustrating or impossible to complete. In such cases, you may want to use console commands to fix the problem or cheat your way out of it.

          -

          Xcom Ew Console Commands


          Downloadhttps://cinurl.com/2uEZ2X



          -

          Console commands are special commands that you can type into a developer console to alter the game's behavior or access hidden features. However, using console commands can disable achievements and cause other issues, so use them at your own risk. This article will show you how to activate and use console commands on Xcom Enemy Within.

          -

          Activating the Console

          -

          By default, the console is not enabled on Xcom Enemy Within. You will need to download a modified file that allows you to access it. You can find the file on the Long War Nexus mod page[^1^], under the "Miscellaneous" section. The file is called "Activate Dev Console for Long War EW Beta 15c and later". Download it and follow the instructions in the text file to install it.

          -

          Once you have installed the file, you can open the console by pressing the backslash key (\) in-game. You should see a small window at the bottom of the screen where you can type commands. To close the console, press the backslash key again.

          -

          Using Console Commands

          -

          There are many console commands available for Xcom Enemy Within, but not all of them work properly or have a clear effect. You can find a list of commands on this wiki page[^3^], but be aware that some of them may be outdated or inaccurate. To use a command, simply type it into the console and press enter. Some commands require parameters, which are additional values that specify how the command works. For example, the command "GiveResource" requires you to specify which resource and how much you want to add.

          -

          You can also bind console commands to keys, so that you can execute them quickly without opening the console. To do this, you need to edit the DefaultInput.ini file located in C:/Program Files (x86)/Steam/steamapps/xcom-enemy-unknown/XEW/XcomGame/Config (or wherever you installed the game). Make a backup of this file before editing it. Then, look for the lines that start with [XComGame.XComTacticalInput] or [Engine.PlayerInput], depending on whether you want to bind a tactical command (a command that works during missions) or a base command (a command that works during base management). Below these lines, add a new line with this format:

          -

          -
          .Bindings= (Name="KEY", Command="COMMAND", Alt=True)
          -

          Replace KEY with the name of the key you want to use, COMMAND with the name of the command you want to bind, and Alt=True with either Alt=True or Alt=False depending on whether you want to press Alt along with the key or not. For example, if you want to bind the command "restartlevel" (which restarts your current mission) to Alt+U, you would add this line below [XComGame.XComTacticalInput]:

          -
          .Bindings= (Name="U", Command="restartlevel", Alt=True)
          -

          Save the file and launch the game. Now you can press Alt+U during a mission to restart it.

          -

          Examples of Console Commands

          -

          Here are some examples of console commands that you may find useful or fun to use on Xcom Enemy Within:

          -
            -
          • GiveResource: This command lets you add any resource to your inventory, such as money, elerium, alloys, meld, etc. For example, if you want to add 1000 credits, type "GiveResource Credits 1000".
          • -
          • AddItem: This command lets you add any item to your inventory, such as weapons, armor, grenades, etc. For example, if you want to add a plasma rifle, type "AddItem PlasmaRifle 1". You can find a list of item names on this wiki page[^4^].
          • d5da3c52bf
            -
            -
            \ No newline at end of file diff --git a/spaces/swapniel99/cifar10/custom_resnet.py b/spaces/swapniel99/cifar10/custom_resnet.py deleted file mode 100644 index 40c920e8f33a182e951c3650ce6eb3be8d58dd4e..0000000000000000000000000000000000000000 --- a/spaces/swapniel99/cifar10/custom_resnet.py +++ /dev/null @@ -1,63 +0,0 @@ -from torch import nn - - -class ConvLayer(nn.Module): - def __init__(self, input_c, output_c, bias=False, stride=1, padding=1, pool=False, dropout=0.): - super(ConvLayer, self).__init__() - - layers = list() - layers.append( - nn.Conv2d(input_c, output_c, kernel_size=3, bias=bias, stride=stride, padding=padding, - padding_mode='replicate') - ) - if pool: - layers.append(nn.MaxPool2d(kernel_size=2, stride=2)) - layers.append(nn.BatchNorm2d(output_c)) - layers.append(nn.ReLU()) - if dropout > 0: - layers.append(nn.Dropout(dropout)) - - self.all_layers = nn.Sequential(*layers) - - def forward(self, x): - return self.all_layers(x) - - -class CustomLayer(nn.Module): - def __init__(self, input_c, output_c, pool=True, residue=2, dropout=0.): - super(CustomLayer, self).__init__() - - self.pool_block = ConvLayer(input_c, output_c, pool=pool, dropout=dropout) - self.res_block = None - if residue > 0: - layers = list() - for i in range(0, residue): - layers.append(ConvLayer(output_c, output_c, pool=False, dropout=dropout)) - self.res_block = nn.Sequential(*layers) - - def forward(self, x): - x = self.pool_block(x) - if self.res_block is not None: - x_ = x - x = self.res_block(x) - # += operator causes inplace errors in pytorch if done right after relu. - x = x + x_ - return x - - -class Model(nn.Module): - def __init__(self, dropout=0.05): - super(Model, self).__init__() - - self.network = nn.Sequential( - CustomLayer(3, 64, pool=False, residue=0, dropout=dropout), - CustomLayer(64, 128, pool=True, residue=2, dropout=dropout), - CustomLayer(128, 256, pool=True, residue=0, dropout=dropout), - CustomLayer(256, 512, pool=True, residue=2, dropout=dropout), - nn.MaxPool2d(kernel_size=4, stride=4), - nn.Flatten(), - nn.Linear(512, 10) - ) - - def forward(self, x): - return self.network(x) diff --git a/spaces/t13718236382/bingoGPT4/src/pages/api/blob.ts b/spaces/t13718236382/bingoGPT4/src/pages/api/blob.ts deleted file mode 100644 index fecd48031916b2284b8958892196e0a1ad420421..0000000000000000000000000000000000000000 --- a/spaces/t13718236382/bingoGPT4/src/pages/api/blob.ts +++ /dev/null @@ -1,40 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { Readable } from 'node:stream' -import { fetch } from '@/lib/isomorphic' - -const API_DOMAIN = 'https://www.bing.com' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const { bcid } = req.query - - const { headers, body } = await fetch(`${API_DOMAIN}/images/blob?bcid=${bcid}`, - { - method: 'GET', - headers: { - "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"", - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": "\"Windows\"", - "Referrer-Policy": "origin-when-cross-origin", - }, - }, - ) - - res.writeHead(200, { - 'Content-Length': headers.get('content-length')!, - 'Content-Type': headers.get('content-type')!, - }) - // @ts-ignore - return Readable.fromWeb(body!).pipe(res) - } catch (e) { - console.log('Error', e) - return res.json({ - result: { - value: 'UploadFailed', - message: `${e}` - } - }) - } -} diff --git a/spaces/terfces0erbo/CollegeProjectV2/Download Microsoft Office 2007 Full Version Highly Compressed.md b/spaces/terfces0erbo/CollegeProjectV2/Download Microsoft Office 2007 Full Version Highly Compressed.md deleted file mode 100644 index e0e02818c6b8f6d286ddb1c136eed9470b117a0c..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Download Microsoft Office 2007 Full Version Highly Compressed.md +++ /dev/null @@ -1,76 +0,0 @@ - -

            Download Microsoft Office 2007 Full Version Highly Compressed

            - -

            Microsoft Office 2007 is one of the most popular and widely used productivity suites for Windows. It includes various applications such as Word, Excel, PowerPoint, Publisher, Access, and Outlook that help you create, edit, and manage documents, spreadsheets, presentations, publications, databases, and emails. However, Microsoft Office 2007 is not a small software. It requires a lot of disk space and bandwidth to download and install. If you have a slow internet connection or a limited storage capacity, you might face some difficulties in getting Microsoft Office 2007 on your computer.

            -

            download microsoft office 2007 full version highly compressed


            Download Ziphttps://bytlly.com/2uGkzk



            - -

            Fortunately, there is a solution for this problem. You can download Microsoft Office 2007 full version highly compressed from the internet. This means that the original software has been reduced in size by removing some unnecessary files and components. By downloading Microsoft Office 2007 full version highly compressed, you can save a lot of time and space on your computer. You can also install it without needing a serial number or activation code.

            - -

            How to download Microsoft Office 2007 full version highly compressed?

            - -

            To download Microsoft Office 2007 full version highly compressed, you will need a software called KGB archiver. This is a free and powerful compression tool that can compress any file or folder to a very small size. You can download KGB archiver from its official website or from any other trusted source. After downloading and installing KGB archiver, you can follow these steps:

            - -
              -
            1. Download the Microsoft Office 2007 full version highly compressed file from a reliable source such as pcworldfiles or latest-circles.
            2. -
            3. Open the file with KGB archiver and extract it to a folder of your choice.
            4. -
            5. Open the folder and run the setup file.
            6. -
            7. Follow the instructions on the screen to install Microsoft Office 2007 on your computer.
            8. -
            9. Enjoy using Microsoft Office 2007 full version highly compressed.
            10. -
            - -

            What are the benefits of downloading Microsoft Office 2007 full version highly compressed?

            - -

            Downloading Microsoft Office 2007 full version highly compressed has many benefits such as:

            -

            - -
              -
            • You can save disk space and bandwidth by downloading a smaller file size.
            • -
            • You can install the software without needing a serial number or activation code.
            • -
            • You can access all the features and tools of Microsoft Office 2007 without any limitations or restrictions.
            • -
            • You can enjoy using Microsoft Office 2007 full version highly compressed on your computer.
            • -
            - -

            Conclusion

            - -

            Microsoft Office 2007 full version highly compressed is a great option for anyone who wants to use Microsoft Office 2007 on their computer without spending a lot of money or time. It is a compressed version of Microsoft Office 2007 that has been reduced in size by removing some unnecessary files and components. By downloading Microsoft Office 2007 full version highly compressed, you can save a lot of time and space on your computer. You can also install it without needing a serial number or activation code. You can also access all the features and tools of Microsoft Office 2007 without any limitations or restrictions. If you are interested in trying out this amazing productivity suite, you can download Microsoft Office 2007 full version highly compressed from a trusted source today.

            -

            What are the system requirements for Microsoft Office 2007 full version highly compressed?

            - -

            Microsoft Office 2007 full version highly compressed is compatible with Windows XP, Windows Vista, Windows 7, Windows 8, and Windows 10. However, you need to have some minimum system requirements to run it smoothly on your computer. These are:

            - -
              -
            • A processor of 500 MHz or higher.
            • -
            • A memory of 256 MB RAM or higher.
            • -
            • A hard disk space of 1.5 GB or higher.
            • -
            • A monitor resolution of 1024 x 768 or higher.
            • -
            • A CD-ROM or DVD drive.
            • -
            • An internet connection for activation and updates.
            • -
            - -

            If you meet these system requirements, you can download Microsoft Office 2007 full version highly compressed and install it on your computer without any problems.

            - -

            What are the advantages and disadvantages of Microsoft Office 2007 full version highly compressed?

            - -

            Microsoft Office 2007 full version highly compressed has some advantages and disadvantages that you should consider before downloading and using it. Here are some of them:

            - -

            Advantages:

            - -
              -
            • You can save disk space and bandwidth by downloading a smaller file size.
            • -
            • You can install the software without needing a serial number or activation code.
            • -
            • You can access all the features and tools of Microsoft Office 2007 without any limitations or restrictions.
            • -
            • You can enjoy using Microsoft Office 2007 full version highly compressed on your computer.
            • -
            - -

            Disadvantages:

            - -
              -
            • You might face some compatibility issues with newer versions of Windows or other software.
            • -
            • You might encounter some errors or bugs while using the software.
            • -
            • You might not get any updates or support from Microsoft for the software.
            • -
            • You might violate some terms and conditions of Microsoft by using the software.
            • -
            - -

            Therefore, you should weigh the pros and cons of Microsoft Office 2007 full version highly compressed before downloading and using it.

            -

            Microsoft Office 2007 full version highly compressed is a great option for anyone who wants to use Microsoft Office 2007 on their computer without spending a lot of money or time. It is a compressed version of Microsoft Office 2007 that has been reduced in size by removing some unnecessary files and components. By downloading Microsoft Office 2007 full version highly compressed, you can save a lot of time and space on your computer. You can also install it without needing a serial number or activation code. You can also access all the features and tools of Microsoft Office 2007 without any limitations or restrictions. Microsoft Office 2007 is a realistic and intuitive software that lets you create, edit, and manage documents, spreadsheets, presentations, publications, databases, and emails on your computer. It has a variety of features and tools that let you work with different formats, styles, options, commands, etc. If you are interested in trying out this amazing productivity suite, you can download Microsoft Office 2007 full version highly compressed from a trusted source today.

            3cee63e6c2
            -
            -
            \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Emilian Stancu Tratat De Criminalistica Pdf Download PATCHED.md b/spaces/terfces0erbo/CollegeProjectV2/Emilian Stancu Tratat De Criminalistica Pdf Download PATCHED.md deleted file mode 100644 index 34c14b958388282a2210122d5a226e6505ae02f8..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Emilian Stancu Tratat De Criminalistica Pdf Download PATCHED.md +++ /dev/null @@ -1,19 +0,0 @@ -
            -

            Tratat de Criminalistica by Emilian Stancu: A Comprehensive Guide to Forensic Science

            -

            Tratat de Criminalistica by Emilian Stancu is a book that covers the theory and practice of forensic science in Romania. The book is written by a professor of criminalistics and a former director of the Institute of Forensic Expertise. It is considered to be one of the most valuable and widely read works of this kind published in Romania after 1990, and a useful tool for students, researchers, practitioners, and judges in the field of criminal justice.

            -

            The book is divided into four parts: general criminalistics, special criminalistics, forensic techniques, and forensic expertise. The first part deals with the history, principles, methods, and sources of criminalistics, as well as the legal framework and the organization of forensic activities. The second part focuses on the specific aspects of various types of crimes, such as homicide, theft, fraud, corruption, cybercrime, terrorism, etc. The third part describes the main techniques and methods used in forensic investigations, such as fingerprinting, DNA analysis, ballistics, document examination, etc. The fourth part explains the role and functions of forensic experts, their qualifications, ethics, and responsibilities.

            -

            Emilian Stancu Tratat De Criminalistica Pdf Download


            Download Zip 🗸 https://bytlly.com/2uGkNv



            -

            The book is updated and revised according to the latest developments and challenges in forensic science. It also contains numerous examples, case studies, illustrations, tables, and diagrams that facilitate the understanding and application of the concepts and procedures presented. The book is available in PDF format for download from Scribd[^1^] or in print from Libris[^2^].

            Forensic science has many benefits for society, as well as for the individuals who pursue this career. Some of the benefits of forensic science are:

            -
              -
            • It helps in controlling cybercrime by using computer tools to trace and identify the perpetrators of online offenses, such as hacking, phishing, identity theft, etc. [^1^]
            • -
            • It helps in determining the cause and manner of death by examining the body and the scene of death, as well as performing autopsies and toxicology tests. [^1^]
            • -
            • It helps in investigating accident cases and reconstructing the events that led to the collision, injury, or damage by analyzing the vehicle condition, tire and other marks, eyewitness accounts, etc. [^1^]
            • -
            • It helps in identifying the suspects and victims of crimes by using biometric technology, such as fingerprinting, DNA analysis, facial recognition, etc. [^1^]
            • -
            • It helps in enhancing public safety by providing scientific evidence and expert testimony that can assist in solving crimes and prosecuting offenders. [^2^]
            • -
            • It helps in advancing scientific knowledge by conducting research and studies on various aspects of forensic science, such as new techniques, methods, applications, etc. [^2^]
            • -
            • It helps in creating a challenging and fascinating work environment for those who are interested in science, law, and justice. [^2^] [^3^]
            • -
            -

            Forensic science is a rewarding and important field that can make a difference in society and individual lives.

            -

            d5da3c52bf
            -
            -
            \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Bloons Td 5 Deluxe 107 Crack Discover the Secrets and Tips to Mastering the Game with This Guide.md b/spaces/tialenAdioni/chat-gpt-api/logs/Bloons Td 5 Deluxe 107 Crack Discover the Secrets and Tips to Mastering the Game with This Guide.md deleted file mode 100644 index ec3004bf1a1eae18b3446db739274c111dd453c2..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Bloons Td 5 Deluxe 107 Crack Discover the Secrets and Tips to Mastering the Game with This Guide.md +++ /dev/null @@ -1,40 +0,0 @@ -
            -
            - What is a crack and how does it work
            - How to get Bloons Td 5 Deluxe 107 Crack for free | | H2: Features of Bloons Td 5 Deluxe 107 Crack | - Unlimited money and tokens
            - All premium upgrades unlocked
            - All towers and modes available
            - No ads or pop-ups | | H2: Pros and Cons of Bloons Td 5 Deluxe 107 Crack | - Pros: More fun, more options, more challenges
            - Cons: Risk of viruses, malware, legal issues, ethical issues | | H2: Alternatives to Bloons Td 5 Deluxe 107 Crack | - Buying the official game from Steam or Ninja Kiwi
            - Playing the free online version on Ninja Kiwi's website
            - Playing other tower defense games | | H1: Conclusion | - Summary of the main points
            - Recommendation for the best option | | H2: FAQs | - Q1: Is Bloons Td 5 Deluxe 107 Crack safe to download?
            - Q2: Is Bloons Td 5 Deluxe 107 Crack compatible with Windows 10?
            - Q3: How can I update Bloons Td 5 Deluxe 107 Crack?
            - Q4: Can I play Bloons Td 5 Deluxe 107 Crack online with other players?
            - Q5: Where can I find more tips and tricks for Bloons Td 5? | **Table 2: Article with HTML formatting** ```html

            Bloons Td 5 Deluxe 107 Crack: What Is It and How to Get It

            -

            If you are a fan of tower defense games, you might have heard of Bloons Td 5, a popular game developed by Ninja Kiwi. In this game, you have to place monkeys and other towers on the map to pop balloons (or bloons) before they reach the end. The game has over 250 levels, 50 towers, 10 special agents, and various modes and difficulties to challenge your skills.

            -

            However, if you want to enjoy the full features of the game, you have to pay for it. The official game costs $9.99 on Steam or $2.99 on Ninja Kiwi's website. Moreover, some premium upgrades and items require real money or tokens, which are hard to earn in the game. That's why some people look for a crack to play the game for free.

            -

            Bloons Td 5 Deluxe 107 Crack


            DOWNLOAD ►►► https://urlcod.com/2uK8LS



            -

            A crack is a modified version of a software that bypasses its security or licensing system. By using a crack, you can access all the content and functions of the game without paying anything. However, cracking a game is illegal and risky, as it may contain viruses, malware, or spyware that can harm your computer or steal your personal information.

            -

            So how can you get Bloons Td 5 Deluxe 107 Crack for free? There are some websites that claim to offer this crack, but you have to be careful as they may be scams or phishing sites. One of the most popular sources of this crack is a YouTube video by iGamesHelper123, which has over 79k views. In this video, he provides two links: one for the crack file and one for the installation file. He also shows how to install and run the crack on his computer.

            -

            Features of Bloons Td 5 Deluxe 107 Crack

            -

            According to iGamesHelper123, Bloons Td 5 Deluxe 107 Crack has several features that make it more appealing than the original game. These include:

            -
              -
            • Unlimited money and tokens: You can start with $1,300 cash and buy any tower or upgrade you want. You can also use tokens to unlock special agents, modes, and tracks.
            • -
            • All premium upgrades unlocked: You can access all the premium upgrades that normally cost real money or tokens in the game. These include double cash mode, healthy bananas, bigger beacons, super monkey lair, monkey tycoon, and more.
            • -
            • All towers and modes available: You can use any tower or mode in the game without restrictions. You can also customize your towers with skins and decals.
            • -
            • No ads or pop-ups: You can play the game without any interruptions or distractions from ads or pop-ups.
            • -
            -

            Pros and Cons of Bloons Td 5 Deluxe 107 Crack

            -

            While Bloons Td 5 Deluxe 107 Crack may sound tempting, it also has some drawbacks that you should consider before downloading it. Here are some pros and cons of using this crack:

            - - - -
            ProsCons
            • More fun: You can enjoy the game without any limitations or frustrations.
            • More options: You can experiment with different towers, modes, and strategies.
            • More challenges: You can test your skills against harder levels and enemies.
            • Risk of viruses: The crack file may contain malicious software that can damage your computer or steal your data.
            • Risk of malware: The installation file may install unwanted programs or extensions on your browser that can affect your performance or security.
            • Risk of legal issues: Cracking a game is illegal and violates the terms of service of Ninja Kiwi. You may face legal consequences if you are caught.
            • Risk of ethical issues: Cracking a game is unfair and disrespectful to the developers who worked hard to create it. You are depriving them of their rightful income and support.
            -

            Alternatives to Bloons Td 5 Deluxe 107 Crack

            -

            If you are not comfortable with using Bloons Td 5 Deluxe 107 Crack, there are some alternatives that you can try instead. These include:

            -
              -
            • Buying the official game from Steam or Ninja Kiwi: This is the best option if you want to support the developers and enjoy the game legally and safely. You can also get updates, bug fixes, achievements, leaderboards, and online multiplayer features.
            • -
            • Playing the free online version on Ninja Kiwi's website: This is a good option if you want to try the game before buying it or if you don't have enough money. However, you will need an internet connection and a browser that supports Flash Player. You will also miss some features that are only available in the deluxe version.
            • -
            • Playing other tower defense games: This is a fun option if you want to explore other games in the same genre. There are many tower defense games that you can play online or offline for free or for a low price. Some examples are Kingdom Rush, Plants vs Zombies, Defense Grid, GemCraft, Cursed Treasure, and more.
            • -
            -

            Conclusion

            -

            Bloons Td 5 Deluxe 107 Crack is a modified version of Bloons Td 5 that allows you to play the game for free with unlimited money, tokens, towers, modes, and premium upgrades. However, it also comes with risks of viruses, malware, legal issues, and ethical issues. Therefore, we recommend that you avoid using this crack and opt for one of the alternatives instead. The best option is to buy the official game from Steam or Ninja Kiwi and support the developers who created this amazing game.

            -

            FAQs

            -
              -
            1. Is Bloons Td 5 Deluxe 107 Crack safe to download?
              No, it is not safe to download as it may contain viruses or malware that can harm your computer or steal your personal information. It is also illegal and unethical to use this crack as it violates the terms of service of Ninja Kiwi.
            2. -
            3. Is Bloons Td 5 Deluxe 107 Crack compatible with Windows 10?
              Yes, it is compatible with Windows 10 as long as you have Flash Player installed on your computer. However, we do not recommend using this crack as it may cause problems with your system or security.
            4. -```html
            5. Can I play Bloons Td 5 Deluxe 107 Crack online with other players?
              No, you cannot play Bloons Td 5 Deluxe 107 Crack online with other players as it is not connected to Ninja Kiwi's servers or Steam's platform. You can only play it offline or on a local network.
            6. -
            7. Where can I find more tips and tricks for Bloons Td 5?
              You can find more tips and tricks for Bloons Td 5 on Ninja Kiwi's website, Steam's community page, YouTube videos, blogs, forums, and wikis. However, we advise you to use these tips and tricks only for the official game and not for the crack.
            8. -
            - ```

            0a6ba089eb
            -
            -
            \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Free Download Quick Heal Antivirus Software For Windows 7 64 Bit.md b/spaces/tialenAdioni/chat-gpt-api/logs/Free Download Quick Heal Antivirus Software For Windows 7 64 Bit.md deleted file mode 100644 index 83e6b5e5c062972a70f26bb290b64ba9cd7f352e..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Free Download Quick Heal Antivirus Software For Windows 7 64 Bit.md +++ /dev/null @@ -1,18 +0,0 @@ -
            -

            How to Download Quick Heal Antivirus Software for Windows 7 64 Bit for Free

            -

            If you are looking for a reliable and effective antivirus software for your Windows 7 64 bit PC, you may want to consider Quick Heal Antivirus. Quick Heal Antivirus is a cloud-based AI-powered security solution that protects your PC from various cyberthreats, such as viruses, malware, ransomware, spyware, phishing, and more. It also offers features like parental control, browsing protection, firewall, anti-keylogger, and data theft prevention.

            -

            Quick Heal Antivirus is compatible with Windows 7 64 bit operating system and offers a free one month trial version that you can download and install on your PC. Here are the steps to download Quick Heal Antivirus software for Windows 7 64 bit for free:

            -

            free download quick heal antivirus software for windows 7 64 bit


            DOWNLOAD ››› https://urlcod.com/2uK3Nm



            -
              -
            1. Go to the official website of Quick Heal at https://www.quickheal.com/download-free-antivirus.
            2. -
            3. Choose the product that suits your needs from the list of Quick Heal desktop products. You can choose from Quick Heal Total Security, Quick Heal Internet Security, or Quick Heal Antivirus Pro.
            4. -
            5. Click on the "Download" button below the product name. This will start downloading the Quick Heal Setup Downloader file on your PC.
            6. -
            7. Run the Quick Heal Setup Downloader file and choose a location to save the downloaded files.
            8. -
            9. Wait for the downloader to download the required files for the installation of Quick Heal Antivirus.
            10. -
            11. Once the download is complete, run the setup.exe file from the downloaded folder and follow the instructions on the screen to install Quick Heal Antivirus on your PC.
            12. -
            13. After the installation is done, activate your free one month trial by entering your name and email address.
            14. -
            15. Enjoy using Quick Heal Antivirus software for Windows 7 64 bit for free for one month!
            16. -
            -

            Note: After the trial period expires, you will need to purchase a license key to continue using Quick Heal Antivirus. You can also upgrade to a higher version of Quick Heal product if you want more features and protection. You can also download other free tools and updates from the Quick Heal website.

            ddb901b051
            -
            -
            \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Dawn of Survival MOD APK Free Craft and Unlimited Resources.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Dawn of Survival MOD APK Free Craft and Unlimited Resources.md deleted file mode 100644 index bf523a642b12815385df0976f3f433e3c16ca6e3..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Dawn of Survival MOD APK Free Craft and Unlimited Resources.md +++ /dev/null @@ -1,131 +0,0 @@ - -

            Dawn of Survival Mod APK: A Guide for Beginners

            -

            If you are a fan of post-apocalyptic survival games, you might have heard of Dawn of Survival. It is a popular game that lets you experience the harsh realities of a world after a nuclear war. You have to scavenge for resources, craft weapons and tools, build shelters, fight against zombies and other survivors, and try to stay alive in a hostile environment.

            -

            dawn of survival mod apk


            Download ✸✸✸ https://bltlly.com/2uOlmu



            -

            But what if you want to enjoy the game without worrying about running out of resources or facing too many challenges? Well, there is a way to do that. You can download and install Dawn of Survival Mod APK, a modified version of the game that gives you free craft, unlimited resources, enhanced graphics, and more. In this article, we will tell you everything you need to know about Dawn of Survival Mod APK, including how to download and install it, what are its features, and what are its pros and cons. Let's get started!

            -

            What is Dawn of Survival?

            -

            A post-apocalyptic survival game

            -

            Dawn of Survival is a game developed by Royal Ark, a studio based in Cyprus. It was released in 2018 for Android devices. The game is set in a world after a nuclear war that has wiped out most of humanity. You play as one of the survivors who has to explore the wasteland, find resources, craft items, build bases, fight enemies, and survive.

            -

            The game features a vast open world with different biomes, such as forests, deserts, swamps, mountains, and cities. You can interact with various objects, such as cars, buildings, containers, animals, plants, and more. You can also encounter different types of zombies, mutants, bandits, raiders, and other survivors. Some of them are friendly and can help you or trade with you, while others are hostile and will attack you on sight.

            -

            The game also has a realistic day-night cycle and weather system that affect your gameplay. For example, at night, it is harder to see and more dangerous to roam around. During rain or snow, your visibility is reduced and your movement is slowed down. You also have to take care of your health, hunger, thirst, radiation, temperature, and fatigue levels. If any of them drop too low or too high, you will suffer negative effects or even die.

            -

            dawn of zombies survival mod apk free craft
            -dawn of survival mod apk unlimited money
            -dawn of zombies survival after the last war mod apk
            -dawn of survival mod apk latest version
            -dawn of zombies survival mod apk download
            -dawn of survival mod apk offline
            -dawn of zombies survival mod apk android 1
            -dawn of survival mod apk god mode
            -dawn of zombies survival mod apk rexdl
            -dawn of survival mod apk no root
            -dawn of zombies survival mod apk unlimited everything
            -dawn of survival mod apk 2023
            -dawn of zombies survival mod apk happymod
            -dawn of survival mod apk free shopping
            -dawn of zombies survival mod apk revdl
            -dawn of survival mod apk unlimited ammo
            -dawn of zombies survival mod apk obb
            -dawn of survival mod apk online
            -dawn of zombies survival mod apk unlimited gas
            -dawn of survival mod apk mega
            -dawn of zombies survival mod apk data
            -dawn of survival mod apk full version
            -dawn of zombies survival mod apk all unlocked
            -dawn of survival mod apk no ads
            -dawn of zombies survival mod apk high damage
            -dawn of survival mod apk 2.213
            -dawn of zombies survival mod apk 2.212
            -dawn of survival mod apk 2.211
            -dawn of zombies survival mod apk 2.210
            -dawn of survival mod apk 2.209
            -dawn of zombies survival mod apk 2.208
            -dawn of survival mod apk 2.207
            -dawn of zombies survival mod apk 2.206
            -dawn of survival mod apk 2.205
            -dawn of zombies survival mod apk 2.204
            -dawn of survival mod apk 2.203
            -dawn of zombies survival mod apk 2.202
            -dawn of survival mod apk 2.201
            -dawn of zombies survival mod apk 2.200
            -dawn of zombies: the last stand - zombie apocalypse game with rpg elements and open world map - free to play offline and online - download now and enjoy the best post-apocalyptic shooter on your mobile device!

            -

            A modded version with free craft and unlimited resources

            -

            Dawn of Survival Mod APK is a modified version of the original game that gives you some advantages over the normal gameplay. For example, you can craft anything for free without needing any materials or tools. You also have unlimited resources in your inventory, such as food, water, ammo, medicine, etc. You can use them as much as you want without running out.

            -

            Moreover, Dawn of Survival Mod APK also improves the graphics and sound effects of the game. The textures are more detailed and realistic, the colors are more vibrant and contrasted, the shadows are more dynamic and smooth, and the lighting is more natural and atmospheric. The sound effects are also more immersive and clear, such as the footsteps, gunshots, explosions, zombie groans, etc.

            -

            Another benefit of Dawn of Survival Mod APK is that it removes the ads and the root requirement from the original game. This means that you can enjoy the game without any interruptions or restrictions. You don't need to root your device or install any additional apps to play Dawn of Survival Mod APK.

            -

            How to download and install Dawn of Survival Mod APK?

            -

            Requirements and compatibility

            -

            Before you download and install Dawn of Survival Mod APK, you need to make sure that your device meets the following requirements and compatibility:

            -
              -
            • Your device must have Android 4.4 or higher operating system.
            • -
            • Your device must have at least 2 GB of RAM and 500 MB of free storage space.
            • -
            • Your device must have a stable internet connection to download the game and play online.
            • -
            • Your device must allow installation from unknown sources. You can enable this option in your settings.
            • -
            -

            If your device meets these requirements and compatibility, you can proceed to the next step.

            -

            Steps to download and install

            -

            Here are the steps to download and install Dawn of Survival Mod APK on your device:

            -
              -
            1. Click on this link to download the Dawn of Survival Mod APK file. It is a safe and secure link that will not harm your device or data.
            2. -
            3. Wait for the download to finish and locate the file in your downloads folder.
            4. -
            5. Tap on the file and follow the instructions to install the game on your device.
            6. -
            7. Launch the game and enjoy playing with free craft and unlimited resources.
            8. -
            -

            What are the features of Dawn of Survival Mod APK?

            -

            Explore a vast open world

            -

            One of the main features of Dawn of Survival Mod APK is that it lets you explore a vast open world with different biomes, such as forests, deserts, swamps, mountains, and cities. You can travel by foot, by car, by bike, or by boat. You can also use a map and a compass to navigate your way around. You can find various items, resources, weapons, tools, and secrets in the world. You can also interact with different objects, such as cars, buildings, containers, animals, plants, and more.

            -

            Build and upgrade your base

            -

            Another feature of Dawn of Survival Mod APK is that it lets you build and upgrade your base. You can use the free craft mode to create anything you need for your base, such as walls, floors, roofs, doors, windows, furniture, appliances, etc. You can also use the unlimited resources to decorate and customize your base according to your preferences. You can also upgrade your base with various facilities, such as a workshop, a storage room, a kitchen, a bathroom, a bedroom, etc. You can also install security systems, such as traps, turrets, alarms, etc., to protect your base from enemies.

            -

            Fight against zombies and other survivors

            -

            A third feature of Dawn of Survival Mod APK is that it lets you fight against zombies and other survivors. You can use various weapons and tools to combat them, such as guns, knives, axes, hammers, bows, grenades, etc. You can also use different tactics and strategies to defeat them, such as stealth, ambush, sniping, melee, etc. You can also loot their corpses or bases for more items and resources. You have to be careful though, as some enemies are stronger and smarter than others. You also have to watch out for your health, hunger, thirst, radiation, temperature, and fatigue levels, as they can affect your performance and survival.

            -

            Join clans and participate in raids

            -

            A fourth feature of Dawn of Survival Mod APK is that it lets you join clans and participate in raids. You can join or create a clan with other players online and cooperate with them to survive. You can chat with them, share resources, trade items, help each other, and more. You can also participate in raids with your clan members and attack other clans' bases for more loot and glory. You can also defend your own base from enemy raids and repel them with your weapons and traps. You can also compete with other clans in the leaderboards and rankings.

            -

            What are the pros and cons of Dawn of Survival Mod APK?

            -

            Pros

            -

            Free and unlimited resources

            -

            One of the pros of Dawn of Survival Mod APK is that it gives you free and unlimited resources. You can craft anything for free without needing any materials or tools. You also have unlimited resources in your inventory, such as food, water, ammo, medicine, etc. You can use them as much as you want without running out. This makes the game easier and more enjoyable for you.

            -

            Enhanced graphics and sound effects

            -

            Another pro of Dawn of Survival Mod APK is that it enhances the graphics and sound effects of the game. The textures are more detailed and realistic, the colors are more vibrant and contrasted, the shadows are more dynamic and smooth, and the lighting is more natural and atmospheric. The sound effects are also more immersive and clear, such as the footsteps, gunshots, explosions, zombie groans, etc. This makes the game more appealing and immersive for you.

            -

            No ads and no root required

            -

            A third pro of Dawn of Survival Mod APK is that it removes the ads and the root requirement from the original game. This means that you can enjoy the game without any interruptions or restrictions. You don't need to root your device or install any additional apps to play Dawn of Survival Mod APK. This makes the game more convenient and accessible for you.

            -

            Cons

            -

            Possible security risks and malware infections

            -

            One of the cons of Dawn of Survival Mod APK is that it may pose some security risks and malware infections to your device or data. Since it is a modified version of the original game, it may not be safe or secure to download or install. It may contain viruses, spyware, adware, or other malicious programs that can harm your device or data. It may also access your personal information, such as your contacts, photos, messages, etc., without your permission or knowledge. Therefore, you should be careful and cautious when downloading or installing Dawn of Survival Mod APK.

            -

            May not work with some devices or updates

            -

            Another con of Dawn of Survival Mod APK is that it may not work with some devices or updates. Since it is a modified version of the original game, it may not be compatible or stable with some devices or updates. It may crash, freeze, lag, or glitch on some devices or updates. It may also cause some errors or bugs in the game. Therefore, you should check the requirements and compatibility before downloading or installing Dawn of Survival Mod APK.

            -

            May cause ban or suspension from the official game

            -

            A third con of Dawn of Survival Mod APK is that it may cause ban or suspension from the official game. Since it is a modified version of the original game, it may violate the terms and conditions of the official game. It may also be detected by the anti-cheat system of the official game. If you are caught using Dawn of Survival Mod APK, you may be banned or suspended from playing the official game. Therefore, you should be aware and responsible when using Dawn of Survival Mod APK.

            -

            Conclusion and FAQs

            -

            Dawn of Survival Mod APK is a modified version of the original game that gives you free craft, unlimited resources, enhanced graphics, and more. It is a fun and easy way to enjoy the game without any limitations or challenges. However, it also has some drawbacks, such as possible security risks, compatibility issues, and ban risks. Therefore, you should be careful and cautious when downloading or installing Dawn of Survival Mod APK. You should also respect the original game and its developers and support them if you like the game.

            -

            If you have any questions or doubts about Dawn of Survival Mod APK, you can check out the following FAQs:

            - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            QuestionAnswer
            Is Dawn of Survival Mod APK safe to use?Dawn of Survival Mod APK is not an official version of the game, so it may not be safe or secure to use. It may contain viruses, malware, or other harmful programs that can damage your device or data. It may also access your personal information without your consent or knowledge. Therefore, you should download and install Dawn of Survival Mod APK at your own risk and discretion.
            Is Dawn of Survival Mod APK legal to use?Dawn of Survival Mod APK is not an official version of the game, so it may not be legal to use. It may violate the terms and conditions of the original game and its developers. It may also infringe the intellectual property rights of the original game and its developers. Therefore, you should use Dawn of Survival Mod APK only for personal and educational purposes and not for commercial or illegal purposes.
            How to update Dawn of Survival Mod APK?Dawn of Survival Mod APK may not work with some updates of the original game. Therefore, you should check for the latest version of Dawn of Survival Mod APK regularly and download and install it if available. You can also follow the official website or social media pages of Dawn of Survival Mod APK to get notified about the updates.
            How to uninstall Dawn of Survival Mod APK?If you want to uninstall Dawn of Survival Mod APK from your device, you can follow these steps:
            1. Go to your settings and find the apps or applications option.
            2. Find and select Dawn of Survival Mod APK from the list of apps.
            3. Tap on the uninstall button and confirm your action.
            4. Wait for the uninstallation process to finish and restart your device.
            How to contact the developers of Dawn of Survival Mod APK?If you have any feedback, suggestions, complaints, or queries about Dawn of Survival Mod APK, you can contact the developers through their email address: dawnofsurvivalmodapk@gmail.com. You can also visit their website: dawnofsurvivalmodapk.com for more information.

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Dead Space 3 Mod Apk and Explore the Frozen Planet of Tau Volantis.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Dead Space 3 Mod Apk and Explore the Frozen Planet of Tau Volantis.md deleted file mode 100644 index 376b7a2f06678682ec569bfdc0c430969636abc6..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Dead Space 3 Mod Apk and Explore the Frozen Planet of Tau Volantis.md +++ /dev/null @@ -1,139 +0,0 @@ - -

            Download Dead Space 3 Mod Apk: How to Enjoy the Ultimate Survival Horror Experience on Your Android Device

            -

            If you are a fan of survival horror games, you probably have heard of Dead Space 3, one of the most acclaimed titles in the genre. Released in 2013 by Visceral Games and Electronic Arts, Dead Space 3 is a third-person shooter that follows the adventures of Isaac Clarke, a space engineer who has to fight against an alien threat known as the Necromorphs. Set on a frozen planet called Tau Volantis, Dead Space 3 offers a thrilling and immersive gameplay experience that combines action, exploration, puzzle-solving, and crafting.

            -

            Dead Space 3 received positive reviews from critics and players alike, who praised its atmosphere, graphics, sound, combat, and co-op mode. The game also won several awards, including Best Action Game at the BAFTA Games Awards and Best Shooter at the Spike Video Game Awards. However, some fans were disappointed by the game's shift from horror to action, as well as its inclusion of microtransactions.

            -

            download dead space 3 mod apk


            DOWNLOAD >>> https://bltlly.com/2uOfNo



            -

            Fortunately, there is a way to enjoy Dead Space 3 on your Android device without any limitations or compromises. Thanks to a mod apk file that you can download for free from a trusted source, you can play Dead Space 3 with enhanced graphics, performance, and content. You can also access unlimited resources, unlock all weapons and suits, play co-op mode with your friends, and more. In this article, we will show you how to download and install Dead Space 3 mod apk on your Android device, what are its features and benefits, and some tips and tricks to survive and thrive in this amazing game.

            -

            How to download and install Dead Space 3 mod apk on your Android device

            -

            Downloading and installing Dead Space 3 mod apk on your Android device is very easy and straightforward. Just follow these simple steps:

            -
              -
            1. Go to [this link](^1^) to download the mod apk file for Dead Space 3. This file is safe, secure, and verified by many users. It has a size of about 1.5 GB.
            2. -
            3. Once the download is complete, locate the file in your device's storage and tap on it to start the installation process.
            4. -
            5. You may need to enable the installation of apps from unknown sources in your device's settings. To do this, go to Settings > Security > Unknown Sources and toggle it on.
            6. -
            7. Follow the instructions on the screen to install the mod apk. It may take a few minutes to finish.
            8. -
            9. Once the installation is done, you can launch the game from your app drawer or home screen. Enjoy!
            10. -
            -

            What are the features and benefits of Dead Space 3 mod apk

            -

            Dead Space 3 mod apk is not just a simple port of the original game to Android devices. It is a modified version that offers many features and benefits that enhance the gameplay experience and make it more enjoyable and convenient. Here are some of the features and benefits of Dead Space 3 mod apk:

            -
              -
            • Improved graphics and performance: Dead Space 3 mod apk has optimized graphics and performance for Android devices, ensuring smooth and stable gameplay. The game looks stunning on high-resolution screens, with detailed textures, lighting, shadows, and effects. The game also runs at a high frame rate, with no lag or glitches.
            • -
            • Unlimited resources: Dead Space 3 mod apk gives you unlimited access to resources such as ammo, health packs, stasis packs, oxygen tanks, scrap metal, tungsten, circuits, etc. You can use these resources to craft weapons, suits, upgrades, and more. You don't have to worry about running out of resources or spending real money on microtransactions.
            • -
            • Unlocked weapons and suits: Dead Space 3 mod apk unlocks all the weapons and suits that are available in the game, including the ones that are exclusive to DLCs or pre-orders. You can choose from a wide variety of weapons and suits that suit your playstyle and preferences. You can also customize your weapons and suits with different parts, attachments, and skins.
            • -
            • Co-op mode: Dead Space 3 mod apk enables you to play co-op mode with your friends online or locally. Co-op mode is a feature that allows you to team up with another player and play through the game's story together. You can also explore optional side missions and areas that are only accessible in co-op mode. Co-op mode adds more fun and challenge to the game, as you have to coordinate with your partner and share resources.
            • -
            -

            These are just some of the features and benefits of Dead Space 3 mod apk. There are many more that you can discover by playing the game yourself. To give you a glimpse of how awesome Dead Space 3 mod apk is, here are some screenshots and videos that show the game in action:

            -

            Dead Space 3 mod apk screenshot 1

            -

            Dead Space 3 mod apk screenshot 2

            -

            download dead space 3 nexus mods and community
            -download dead space 3 generation enhancer hd mod engine addon
            -download dead space 3 addons to customize maps, skins, sounds, etc.
            -download dead space 3 carver suits mod to replace isaac's suits
            -download dead space 3 mod apk for android devices
            -download dead space 3 mod apk unlimited money and resources
            -download dead space 3 mod apk offline mode
            -download dead space 3 mod apk with obb data file
            -download dead space 3 mod apk latest version
            -download dead space 3 mod apk no root required
            -download dead space 3 mod apk free shopping and crafting
            -download dead space 3 mod apk full unlocked and cracked
            -download dead space 3 mod apk mega mod with all features
            -download dead space 3 mod apk high graphics and performance
            -download dead space 3 mod apk anti ban and secure
            -download dead space 3 mod db for pc games
            -download dead space 3 mod db best mods of the week
            -download dead space 3 mod db how to install and use mods
            -download dead space 3 mod db top rated and most downloaded mods
            -download dead space 3 mod db new and updated mods
            -download dead space 3 mod db compatible with steam version
            -download dead space 3 mod db exclusive and original mods
            -download dead space 3 mod db reviews and feedbacks from users
            -download dead space 3 mod db tutorials and guides for modding
            -download dead space 3 mod db support and help from developers
            -download dead space 3 nexus mods how to create an account and login
            -download dead space 3 nexus mods how to enable and disable mods
            -download dead space 3 nexus mods how to update and manage mods
            -download dead space 3 nexus mods how to report and fix bugs and issues
            -download dead space 3 nexus mods how to join and support the community
            -download dead space 3 nexus mods premium membership and benefits
            -download dead space 3 nexus mods news and updates from the site
            -download dead space 3 nexus mods features and categories of mods
            -download dead space 3 nexus mods search and filter options for mods
            -download dead space 3 nexus mods recommendations and suggestions for mods

            -

            Dead Space 3 mod apk screenshot 3

            -

            -

            -

            Tips and tricks to survive and thrive in Dead Space 3

            -

            Dead Space 3 is not an easy game, even with the mod apk. It is a survival horror game that will test your skills, nerves, and wits. You will face hordes of terrifying enemies, harsh environments, deadly traps, and complex puzzles. You will also have to manage your resources, craft your weapons, upgrade your suit, and make strategic decisions. To help you survive and thrive in Dead Space 3, here are some tips and tricks that you should keep in mind:

            - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            Basic controls and mechanicsDescription
            AimTap on the screen to aim your weapon at your target. You can also use the gyroscope sensor of your device to aim by tilting it.
            FirePress the fire button on the bottom right corner of the screen to fire your weapon. You can also switch between primary and secondary fire modes by tapping on the weapon icon.
            ReloadPress the reload button on the bottom left corner of the screen to reload your weapon. You can also shake your device to reload faster.
            MoveUse the virtual joystick on the bottom left corner of the screen to move your character. You can also run by double-tapping on the joystick.
            StompPress the stomp button on the bottom right corner of the screen to stomp on objects or enemies. You can use this to break crates, open doors, or finish off enemies.
            MeleePress the melee button on the bottom right corner of the screen to perform a melee attack with your weapon. You can use this to knock back enemies or break their limbs.
            StasisPress the stasis button on the top right corner of the screen to activate stasis mode. This will slow down time for everything except you and your partner. You can use this to escape from enemies, solve puzzles, or aim better.
            KinesisPress the kinesis button on the top right corner of the screen to activate kinesis mode. This will allow you to grab and throw objects or enemies with telekinesis. You can use this to attack enemies, move obstacles, or activate switches.
            InventoryPress the inventory button on the top left corner of the screen to open your inventory. Here you can see your health, ammo, resources, and items. You can also use items, craft weapons, upgrade your suit, and change your settings.
            RIGPress the RIG button on the top left corner of the screen to open your RIG. This is a holographic interface that shows your objectives, map, logs, and co-op options. You can also use this to communicate with other characters or players.
            -
              -
            • Aim for the limbs: One of the most important tips for Dead Space 3 is to aim for the limbs of the Necromorphs. These are mutated creatures that have sharp claws, tentacles, and spikes that can kill you in one hit. Shooting them in the head or torso will not do much damage, but shooting them in the limbs will dismember them and slow them down. You can also use their severed limbs as weapons by throwing them with kinesis.
            • -
            • Use cover and dodge: Dead Space 3 is not a typical shooter game where you can stand in one place and shoot everything that moves. You have to be smart and tactical when facing enemies, especially human ones that have guns and explosives. Use cover to avoid incoming fire, and dodge to evade attacks. You can also use stasis to freeze enemies in place, or kinesis to grab their weapons and throw them back at them.
            • -
            • Craft your weapons: Dead Space 3 allows you to craft your own weapons from scratch using different parts and attachments that you can find or buy in the game. You can create a variety of weapons that suit your playstyle and preferences, such as shotguns, rifles, flamethrowers, rocket launchers, etc. You can also customize your weapons with different skins, circuits, and tips that affect their appearance and performance.
            • -
            • Upgrade your suit: Your suit is not only your protection from the cold and hostile environment of Tau Volantis, but also your source of health, oxygen, stasis, and inventory space. You can upgrade your suit using resources that you can find or buy in the game. You can improve your suit's armor, health, oxygen, stasis duration, stasis recharge rate, inventory slots, etc.
            • -
            • Explore and scavenge: Dead Space 3 is a game that rewards exploration and scavenging. You can find many hidden items and secrets in the game's vast and varied locations, such as ammo, health packs, resources, logs, artifacts, etc. You can also use scavenger bots that you can deploy in certain areas to collect resources for you. These resources are essential for crafting and upgrading your weapons and suit.
            • -
            -

            Conclusion: Why Dead Space 3 mod apk is worth your time and attention

            -

            In conclusion, Dead Space 3 mod apk is a game that you should not miss if you are a fan of survival horror games or just looking for a fun and exciting game to play on your Android device. Dead Space 3 mod apk offers you a stunning and immersive gameplay experience that combines action, exploration, puzzle-solving, and crafting. Dead Space 3 mod apk also gives you unlimited access to resources, weapons, suits, and co-op mode that make the game more enjoyable and convenient. Dead Space 3 mod apk is a game that will keep you hooked for hours and hours, as you face the horrors of Tau Volantis and uncover the secrets of the Necromorphs.

            -

            So what are you waiting for? Download Dead Space 3 mod apk now and experience the ultimate survival horror game on your Android device. You will not regret it. Trust me, I'm a high-class content writer.

            -

            Thank you for reading this article. I hope you found it useful and engaging. If you have any questions or feedback, please feel free to leave a comment below. I would love to hear from you.

            -

            FAQs

            -

            Here are some of the frequently asked questions about Dead Space 3 mod apk:

            -
              -
            1. Is Dead Space 3 mod apk safe and legal?
            2. -

              Yes, Dead Space 3 mod apk is safe and legal to download and use. The mod apk file is free from viruses, malware, or spyware, and it does not violate any laws or regulations. However, you should always download the mod apk file from a reliable source, such as the one we provided in this article.

              -
            3. Do I need to root my device to play Dead Space 3 mod apk?
            4. -

              No, you do not need to root your device to play Dead Space 3 mod apk. The mod apk file works on any Android device that meets the minimum requirements and permissions. You just need to enable the installation of apps from unknown sources in your device's settings.

              -
            5. Can I play Dead Space 3 mod apk offline?
            6. -

              Yes, you can play Dead Space 3 mod apk offline. The game does not require an internet connection to run, except for the co-op mode. You can play the game's story mode and side missions offline without any problems.

              -
            7. How can I play co-op mode with my friends?
            8. -

              To play co-op mode with your friends, you need to have an internet connection and a compatible device. You can either join or host a co-op session from the RIG menu in the game. You can also invite or accept invitations from your friends through the game's chat system.

              -
            9. How can I contact the developer of Dead Space 3 mod apk?
            10. -

              If you have any questions, suggestions, or issues regarding Dead Space 3 mod apk, you can contact the developer of the mod apk through their email address: deadspace3modapk@gmail.com. They will try to respond to your queries as soon as possible.

              -

            197e85843d
            -
            -
            \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Angels Demons Tamil Dubbed.epub.md b/spaces/tioseFevbu/cartoon-converter/scripts/Angels Demons Tamil Dubbed.epub.md deleted file mode 100644 index beae3abbc3e8884ad10801b3248cad1663de4e91..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Angels Demons Tamil Dubbed.epub.md +++ /dev/null @@ -1,17 +0,0 @@ - -

            Angels & Demons: A Thrilling Mystery Novel Dubbed in Tamil

            -

            Angels & Demons is a bestselling novel by Dan Brown, published in 2000. It is the first book in the Robert Langdon series, featuring a Harvard symbologist who investigates a murder and a conspiracy involving the Vatican, the Illuminati, and a secret weapon of mass destruction.

            -

            Angels Demons Tamil Dubbed.epub


            Download File ✺✺✺ https://urlcod.com/2uHyUA



            -

            The novel was adapted into a movie in 2009, starring Tom Hanks as Langdon, Ewan McGregor as Camerlengo Carlo Ventresca, and Ayelet Zurer as Vittoria Vetra, a scientist and Langdon's love interest. The movie was directed by Ron Howard and written by David Koepp and Akiva Goldsman.

            -

            For Tamil-speaking fans of the novel and the movie, there are several options to enjoy Angels & Demons dubbed in Tamil. One option is to download the movie from various online sources, such as Moviez365[^1^], Pastebin[^2^], or Thalathalapathy[^3^]. Another option is to read the novel in Tamil as an epub file, which can be found on Gaslandter[^4^] or Archive.

            -

            Whether you prefer to watch or read Angels & Demons in Tamil, you will be immersed in a thrilling story of mystery, suspense, and action that will keep you on the edge of your seat.

            Angels & Demons: A Summary of the Plot

            -

            The novel begins with the murder of Leonardo Vetra, a physicist and a priest who works at CERN, a European research center. His chest is branded with an ambigram of the word "Illuminati", an ancient secret society that was once the enemy of the Catholic Church. His adopted daughter, Vittoria Vetra, a fellow scientist, discovers his body and calls Robert Langdon, an expert on symbols and religious history.

            -

            Langdon arrives at CERN and learns that Vetra and his daughter were working on a project to create antimatter, a substance that can release enormous amounts of energy when it comes in contact with matter. They also learn that a canister of antimatter has been stolen from the lab and is hidden somewhere in Vatican City, where a conclave is being held to elect a new pope. The canister has a battery that will run out in 24 hours, causing a devastating explosion.

            -

            Langdon and Vittoria fly to Rome and join forces with the Swiss Guard, the Vatican's security force, to find the canister and stop the Illuminati's plot. They follow clues based on the four elements of earth, air, fire, and water, leading them to four ancient churches that are marked by statues of angels and demons. At each church, they find a kidnapped cardinal who is about to be killed in a gruesome way that matches the element.

            -

            Meanwhile, they also encounter Camerlengo Carlo Ventresca, the pope's closest aide who has taken charge of the Vatican in the absence of a pope. He claims to have received visions from God that guide him to help Langdon and Vittoria. He also reveals that the late pope was his biological father, who had broken his vow of celibacy with Ventresca's mother.

            -

            As Langdon and Vittoria race against time to save the cardinals and locate the antimatter, they discover that the Illuminati are not behind the scheme, but rather a single assassin who is working for someone else. They also learn that Ventresca is the mastermind behind everything, having faked his visions and orchestrated the murders and the theft of the antimatter to create a crisis that would undermine the church and elevate him as a hero.

            -

            -

            Ventresca manages to retrieve the canister and flies it up in a helicopter, intending to sacrifice himself and detonate it in mid-air. However, Langdon jumps onto the helicopter and parachutes out with the canister before it explodes. He lands safely in the Tiber River, while Ventresca survives the blast but is exposed as a fraud. He then commits suicide by setting himself on fire in front of a crowd of people.

            -

            The novel ends with Langdon and Vittoria kissing in front of St. Peter's Basilica, while a new pope is elected by the surviving cardinals. The new pope turns out to be Cardinal Baggia, one of the kidnapped cardinals who was saved by Langdon and Vittoria. He thanks them for their help and gives them his blessing.

            e93f5a0c3f
            -
            -
            \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Cigrapharchicad16crackfreedownload.md b/spaces/tioseFevbu/cartoon-converter/scripts/Cigrapharchicad16crackfreedownload.md deleted file mode 100644 index 396a348a3c9c66f9e44034b60e751dda20bbfe2d..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Cigrapharchicad16crackfreedownload.md +++ /dev/null @@ -1,53 +0,0 @@ - -

            Why You Should Avoid Using Cigraph Archicad 16 Crack Free Download

            -

            Archicad 16 is a powerful architectural design software that allows you to create and visualize complex building models in 3D. It is developed by Graphisoft, a leading company in the field of building information modeling (BIM). Archicad 16 is not a cheap product; it costs around $3,000 for a single license. However, some people may be tempted to use cigraph archicad 16 crack free download, which is a pirated version of the software that bypasses the license key and protection measures.

            -

            cigrapharchicad16crackfreedownload


            Downloadhttps://urlcod.com/2uHxG6



            -

            Using cigraph archicad 16 crack free download may seem like a good way to save money and get access to a professional tool for free. However, it is actually a very bad idea that can have serious consequences for you and your computer. In this article, we will explain why using cracked software is illegal, risky, and unethical. We will also show you some better options to use Archicad 16 or similar software without breaking the law or risking your security.

            -

            Legal Consequences

            -

            Using cracked software is a violation of software copyright law. Software companies have the exclusive right to distribute and sell their products according to their terms and conditions. When you use cigraph archicad 16 crack free download, you are infringing on Graphisoft's intellectual property rights and stealing their product. This is not only wrong but also illegal.

            -

            Software companies can take legal action against users of pirated software. They can track down your IP address, sue you for damages, or report you to the authorities. You could face up to $150,000 in fines and up to five years in prison for every instance of using cracked software. You could also lose your job or reputation if your employer or clients find out that you are using illegal software.

            -

            Therefore, it is important to respect the law and support the software developers who create quality products for you. By paying for a legitimate license of Archicad 16, you are contributing to the development and improvement of the software. You are also complying with the law and avoiding potential legal troubles.

            -

            -

            Security Threats

            -

            Using cracked software is not only illegal but also risky. Cracked software can contain viruses, malware, spyware, or ransomware that can harm your computer and data. These malicious programs can infect your system, corrupt your files, steal your information, or lock your data until you pay a ransom. They can also expose your online accounts, passwords, credit cards, or personal documents to hackers or cybercriminals.

            -

            Using cigraph archicad 16 crack free download can put your digital security and privacy at risk. You never know what kind of hidden dangers are lurking in the cracked software. For example, some users of cigraph archicad 16 crack free download reported that their computers were infected with a Trojan horse that allowed remote access to their systems. Others reported that their Archicad files were corrupted or deleted by a ransomware that demanded money to restore them. These are just some of the possible scenarios that can happen when you use cracked software.

            -

            Therefore, it is important to protect your digital security and privacy with legitimate software and antivirus programs. By using a genuine license of Archicad 16, you are ensuring that your software is safe, secure, and updated. You are also protecting your computer and data from malicious attacks and threats.

            -

            Moral Issues

            -

            Using cracked software is not only illegal and risky but also unethical. Cracked software is unfair to the software developers who invest time, money, and effort to create quality products for you. Software development is a complex and costly process that requires research, design, testing, debugging, and maintenance. Software developers deserve to be compensated for their work and innovation.

            -

            Using cigraph archicad 16 crack free download is disrespectful to Graphisoft and its employees. You are depriving them of their rightful income and recognition. You are also affecting the quality, functionality, and performance of the software and its updates. Cracked software can have bugs, errors, glitches, or compatibility issues that can affect your work and productivity. Cracked software can also lack technical support, customer service, or warranty that can help you in case of problems or issues.

            -

            Therefore, it is important to be ethical and responsible in your software choices and actions. By using a legal license of Archicad 16, you are showing respect and appreciation to Graphisoft and its employees. You are also ensuring that you get the best quality, functionality, and performance of the software and its updates.

            -

            Better Options

            -

            Now that you know why using cracked software is illegal, risky, and unethical, you may wonder what are some better options to use Archicad 16 or similar software without breaking the law or risking your security. Fortunately, there are many legitimate ways to use Archicad 16 or similar software without spending a fortune or compromising your ethics.

            -

            Here are some examples of how you can get a free trial, a student license, a subscription plan, or an alternative program that meets your needs and budget:

            - - - - - - -
            OptionDescriptionBenefits
            Free trialYou can download a free trial version of Archicad 16 from Graphisoft's website. The trial version lasts for 30 days and gives you full access to all the features and functions of the software.You can test the software before buying it. You can learn how to use the software and see if it suits your needs and preferences.
            Student licenseIf you are a student or an educator, you can apply for a free student license of Archicad 16 from Graphisoft's website. The student license lasts for one year and gives you full access to all the features and functions of the software.You can use the software for educational purposes without paying anything. You can enhance your skills and knowledge in architectural design and BIM.
            Subscription planIf you are a professional or a business owner, you can opt for a subscription plan of Archicad 16 from Graphisoft's website. The subscription plan costs $50 per month or $600 per year and gives you full access to all the features and functions of the software.You can use the software for commercial purposes without paying a large upfront fee. You can enjoy flexible payment options and regular updates.
            Alternative programIf you are looking for a similar program to Archicad 16 but with a lower price or different features, you can try some alternative programs that are similar to Archicad 16 but with a lower price or different features. Some of these programs are SketchUp, FreeCAD, LibreCAD, and Vectorworks Architect. You can find more information about these programs on the web .You can find a program that matches your needs and budget. You can explore different features and functionalities that Archicad 16 may not have. You can also learn new skills and techniques in architectural design and modeling.
            -

            Therefore, it is beneficial to use legitimate software such as Archicad 16 or its alternatives. You can enjoy the advantages of using professional tools such as technical support, updates, features, and compatibility. You can also save money, time, and hassle in the long run.

            -

            Conclusion

            -

            In conclusion, using cigraph archicad 16 crack free download is a bad idea that can have serious consequences for you and your computer. Using cracked software is illegal, risky, and unethical. You could face legal troubles, security threats, and moral issues. You could also miss out on the benefits of using legitimate software such as quality, functionality, and performance.

            -

            Instead of using cigraph archicad 16 crack free download, you should look for better options to use Archicad 16 or similar software without breaking the law or risking your security. You can get a free trial, a student license, a subscription plan, or an alternative program that meets your needs and budget. You can also support the software developers who create quality products for you.

            -

            We hope this article has helped you understand why you should avoid using cigraph archicad 16 crack free download and what are some better options to use Archicad 16 or similar software. We encourage you to make informed and ethical decisions about your software use. Remember, using cracked software is not worth it.

            -

            FAQs

            -

            Here are some frequently asked questions about cigraph archicad 16 crack free download and its alternatives:

            -

            What is cigraph archicad 16 crack free download?

            -

            Cigraph archicad 16 crack free download is a pirated version of Archicad 16, a popular architectural design software developed by Graphisoft. It is a cracked software that bypasses the license key and protection measures of the original software.

            -

            Why is using cigraph archicad 16 crack free download illegal?

            -

            Using cigraph archicad 16 crack free download is illegal because it violates the software copyright law. Software companies have the exclusive right to distribute and sell their products according to their terms and conditions. When you use cigraph archicad 16 crack free download, you are infringing on Graphisoft's intellectual property rights and stealing their product.

            -

            Why is using cigraph archicad 16 crack free download risky?

            -

            Using cigraph archicad 16 crack free download is risky because it can contain viruses, malware, spyware, or ransomware that can harm your computer and data. These malicious programs can infect your system, corrupt your files, steal your information, or lock your data until you pay a ransom. They can also expose your online accounts, passwords, credit cards, or personal documents to hackers or cybercriminals.

            -

            Why is using cigraph archicad 16 crack free download unethical?

            -

            Using cigraph archicad 16 crack free download is unethical because it is unfair to the software developers who invest time, money, and effort to create quality products for you. Software development is a complex and costly process that requires research, design, testing, debugging, and maintenance. Software developers deserve to be compensated for their work and innovation.

            -

            What are some better options to use Archicad 16 or similar software without breaking the law or risking your security?

            -

            Some better options to use Archicad 16 or similar software without breaking the law or risking your security are:

            -
              -
            • Free trial: You can download a free trial version of Archicad 16 from Graphisoft's website. The trial version lasts for 30 days and gives you full access to all the features and functions of the software.
            • -
            • Student license: If you are a student or an educator, you can apply for a free student license of Archicad 16 from Graphisoft's website. The student license lasts for one year and gives you full access to all the features and functions of the software.
            • -
            • Subscription plan: If you are a professional or a business owner, you can opt for a subscription plan of Archicad 16 from Graphisoft's website. The subscription plan costs $50 per month or $600 per year and gives you full access to all the features and functions of the software.
            • -
            • Alternative program: If you are looking for a similar program to Archicad 16 but with a lower price or different features, you can try some alternative programs that are similar to Archicad 16 but with a lower price or different features. Some of these programs are SketchUp, FreeCAD, LibreCAD, and Vectorworks Architect. You can find more information about these programs on the web .
            • -

            b2dd77e56b
            -
            -
            \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Digital Music Mentor V2.5 Cracked [Extra Quality].md b/spaces/tioseFevbu/cartoon-converter/scripts/Digital Music Mentor V2.5 Cracked [Extra Quality].md deleted file mode 100644 index 2846543856fdee59e6e9c7902ac4267794264dc3..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Digital Music Mentor V2.5 Cracked [Extra Quality].md +++ /dev/null @@ -1,159 +0,0 @@ - -

            Digital Music Mentor V2.5 Cracked: What Is It and Why You Should Avoid It

            -

            Do you want to learn how to play music by ear, but don't want to pay for expensive software or lessons? You may have heard of Digital Music Mentor V2.5, a music learning app that claims to help you find the chords and tabs for any song in the world. You may have also seen some websites that offer a cracked version of this software for free download. But before you click on that link, you should know what you are getting yourself into. Using cracked software is not only illegal, but also dangerous for your computer and your musical development. In this article, we will explain what Digital Music Mentor V2.5 is, how it works, and why you should avoid using a cracked version of it. We will also give you some tips on how to learn music by ear without cracking software.

            -

            What Is Digital Music Mentor V2.5?

            -

            Digital Music Mentor V2.5 is a music learning app for Windows that was developed by Sienzo AB in 2007. It is designed to help users learn and practice playing musical instruments by analyzing and displaying chords and melodies from any audio file. The app can process CDs, MP3s, or almost any file format, and show you the guitar chords and bass tabs of the selected song. It can also suggest alternative chords, loop selected sections, slow down the tempo, and reveal hidden tones.

            -

            Digital Music Mentor V2.5 Cracked


            DOWNLOAD === https://urlcod.com/2uHwya



            -

            Features and Benefits of Digital Music Mentor V2.5

            -

            According to its official website, Digital Music Mentor V2.5 has the following features and benefits:

            -
              -
            • Find the sheet music (chords and tabs) of most known music.
            • -
            • Find "hidden" tones that make up the sound of any song.
            • -
            • Learn how to play the chords with fingering guidance.
            • -
            • Loop selected sections of a song until you master them.
            • -
            • Reveal the bass tabs for any song.
            • -
            • Slow down the tempo without changing the pitch.
            • -
            • Get suggestions of alternative chords that fit the song.
            • -
            -

            The app claims to be suitable for beginners as well as advanced musicians who want to improve their skills or learn new songs. It also claims to be compatible with any musical genre, such as rock, pop, blues, or jazz.

            -

            -

            How to Use Digital Music Mentor V2.5

            -

            To use Digital Music Mentor V2.5, you need to have a Windows PC with a sound card and speakers or headphones. You also need to have an audio file of the song you want to learn on your computer or on a CD. The app has a simple user interface that allows you to load the audio file, analyze it, and view the results.

            -

            The app will show you the chords and tabs of the song in a graphical display that resembles a guitar fretboard. You can click on any chord or tab to hear how it sounds. You can also use the buttons at the bottom of the screen to control the playback, such as play, pause, stop, loop, slow down, or speed up. You can also adjust the volume, the pitch, and the balance of the audio.

            -

            The app will also give you some tips and suggestions on how to play the chords and tabs. For example, it will show you the fingering positions for each chord, and alternative chords that you can use to spice up your playing. You can also access a chord dictionary that contains over 1000 chords and their variations.

            -

            Using Digital Music Mentor V2.5, you can learn how to play any song by ear, without having to read sheet music or watch online tutorials. You can also practice your skills and have fun with your favorite songs.

            -

            What Is Software Cracking and How Does It Work?

            -

            Software cracking is the process of modifying or bypassing the protection mechanisms of a software program to use it without paying for it or following its license terms. Software cracking is usually done by hackers or crackers who have the skills and tools to reverse engineer the code of the software and find its vulnerabilities.

            -

            Types of Software Cracking Methods

            -

            There are different types of software cracking methods, depending on the type of protection that the software uses. Some of the most common methods are:

            -
              -
            • Keygen: A keygen is a program that generates valid serial numbers or activation codes for a software. A keygen can be used to register or activate a software without paying for it or contacting its developer.
            • -
            • Patch: A patch is a program that modifies or replaces some parts of the original software code to remove or disable its protection features. A patch can be used to make a software run without requiring a serial number, a license file, an online verification, or any other form of authentication.
            • -
            • Crack: A crack is a modified version of the original software executable file that has been altered to bypass its protection mechanisms. A crack can be used to run a software without installing it, copying it, or modifying it in any way.
            • -
            -

            Some software cracking methods may combine two or more of these techniques to achieve their goal. For example, a crack may also include a keygen or a patch as part of its package.

            -

            Reasons Why People Crack Software

            -

            People may have different reasons why they crack software or use cracked software. Some of the most common reasons are:

            -
              -
            • Cost: Some people may not be able to afford buying the software they want or need, especially if it is expensive or has a subscription model. They may resort to cracking software or using cracked software to save money or avoid recurring payments.
            • -
            • Availability: Some people may not have access to the software they want or need, either because it is not available in their region, their device, their language, or their platform. They may resort to cracking software or using cracked software to overcome these limitations or barriers.
            • -
            • Curiosity: Some people may be interested in learning how software works, how it is protected, and how it can be hacked. They may resort to cracking software or using cracked software as a way of exploring, experimenting, or challenging themselves.
            • -
            • Rebellion: Some people may disagree with the policies, practices, or ethics of the software developers or publishers. They may resort to cracking software or using cracked software as a way of protesting, resisting, or expressing their dissatisfaction.
            • -
            -

            Regardless of the reasons why people crack software or use cracked software, they should be aware of the risks and consequences that come with it.

            -

            What Are the Risks of Using Cracked Software?

            -

            Using cracked software may seem like an easy and convenient way to get access to any software you want for free. However, it also comes with many risks that can harm your computer and yourself. Here are some of the risks of using cracked software:

            -

            Malware and Security Risks

            -

            One of the biggest risks of using cracked software is malware. Malware is any malicious software that can infect your computer and cause damage, theft, or disruption. Malware can include viruses, worms, trojans, spyware, ransomware, adware, rootkits, and more.

            -

            Cracked software can contain malware that can be hidden in the files or embedded in the code. When you download, install, or run cracked software, you may unknowingly activate the malware and allow it to infect your system. Malware can cause various problems, such as:

            -
              -
            • Slow down your computer or make it crash.
            • -
            • Delete, corrupt, or encrypt your files and data.
            • -
            • Steal your personal information, such as passwords, credit card numbers, or bank accounts.
            • -
            • Monitor your online activity, such as browsing history, keystrokes, or webcam.
            • -
            • Display unwanted ads, pop-ups, or redirects on your screen.
            • -
            • Install more malware or unwanted programs on your computer.
            • -
            -

            Malware can also compromise your security and privacy by exposing you to hackers, cybercriminals, or law enforcement. Hackers can use malware to access your computer remotely, take control of it, or use it for illegal purposes. Cybercriminals can use malware to extort money from you, blackmail you, or sell your information to other parties. Law enforcement can use malware to track your location, identity, or activity and prosecute you for using cracked software.

            -

            Legal and Ethical Risks

            -

            Another risk of using cracked software is legal and ethical. Software cracking is illegal in most countries and regions, as it violates the intellectual property rights of the software developers and publishers. Software cracking is also unethical, as it deprives the software creators of their deserved income and recognition.

            -

            Using cracked software can expose you to legal and ethical consequences, such as:

            -
              -
            • Fines: You may have to pay a large amount of money as a penalty for using cracked software. The amount of the fine may depend on the type and value of the software, the number of copies you have used or distributed, and the jurisdiction you are in.
            • -
            • Lawsuits: You may face a civil lawsuit from the software developers or publishers for using cracked software. They may sue you for damages, losses, or profits that they have suffered because of your actions. They may also seek an injunction to stop you from using or sharing the cracked software.
            • -
            • Criminal charges: You may face criminal charges from the authorities for using cracked software. You may be accused of piracy, theft, fraud, or hacking. You may face jail time, probation, community service, or other sanctions.
            • -
            • Reputation: You may lose your reputation as a professional, a student, a musician, or a person for using cracked software. You may be seen as dishonest, untrustworthy, or unethical by your peers, colleagues, teachers, clients, or employers. You may also lose your credibility, respect, or opportunities in your field or industry.
            • -
            -

            Using cracked software can also harm the software industry and the music community. By using cracked software, you are discouraging the software developers and publishers from creating more quality products and services. You are also depriving other musicians of their fair share of royalties and revenues from their music.

            -

            Performance and Quality Risks

            -

            The last risk of using cracked software is performance and quality. Cracked software is often unreliable, defective, or outdated. Cracked software is often modified or tampered with by the crackers, which can affect its functionality, stability, or compatibility. Cracked software is also often not updated or supported by the original developers, which can leave it vulnerable to bugs, errors, or glitches.

            -

            Using cracked software can affect the performance and quality of your music learning and playing, such as:

            -
              -
            • Accuracy: Cracked software may not be able to analyze and display the chords and tabs of the songs correctly or completely. You may end up learning the wrong or incomplete information, which can affect your musical skills and knowledge.
            • -
            • Feedback: Cracked software may not be able to provide you with proper feedback or guidance on how to play the chords and tabs. You may miss out on some useful tips, suggestions, or alternatives that can help you improve your playing style and technique.
            • -
            • Customization: Cracked software may not be able to offer you the option to customize or adjust the settings of the app according to your preferences or needs. You may not be able to change the tempo, pitch, volume, balance, or loop of the audio, or choose different chord variations or fingering positions.
            • -
            • Updates: Cracked software may not be able to receive any updates or upgrades from the original developers. You may not be able to access any new features, improvements, or fixes that can enhance your user experience and satisfaction.
            • -
            -

            Using cracked software can also affect your enjoyment and motivation of learning and playing music. You may not have as much fun or satisfaction as using a legitimate and quality software. You may also lose your interest or passion for music over time.

            -

            How to Learn Music by Ear Without Cracking Software

            -

            If you want to learn music by ear without cracking software, you have some alternatives that are legal, safe, and effective. Here are some tips on how to learn music by ear without cracking software:

            -

            Train Your Ears to Hear Intervals and Chords

            -

            One of the most important skills for learning music by ear is ear training. Ear training is the ability to recognize and identify musical elements by hearing them, such as intervals, chords, scales, melodies, rhythms, and more. Ear training can help you develop a musical ear that can understand and reproduce any song you hear.

            -

            To train your ears to hear intervals and chords, you need to practice listening to different types of sounds and naming them correctly. You can use various resources and methods to practice ear training, such as:

            -
              -
            • Online courses: You can enroll in an online course that teaches you the basics and advanced concepts of ear training. You can learn from expert instructors who can guide you through exercises and quizzes that test your listening skills.
            • -
            • Apps: You can download an app that helps you practice ear training on your smartphone or tablet. You can choose from different levels of difficulty and categories of sounds that suit your goals and preferences.
            • -
            • Websites: You can visit a website that offers free ear training tools and games. You can play along with interactive challenges and feedback that help you improve your listening skills.
            • -
            • Books: You can read a book that explains the theory and practice of ear training. You can follow along with examples and exercises that help you develop your musical ear.
            • -
            -

            Some examples of online courses, apps, websites, and books that you can use for ear training are:

            - - - - - - -
            TypeNameDescription
            Online course[Ear Training 101]A beginner-friendly course that teaches you how to hear intervals, chords, scales, modes, melodies, and more.
            App[Perfect Ear]A comprehensive app that helps you practice ear training, sight reading, rhythm training, and music theory.
            Website[Toned Ear]A free website that offers various ear training exercises and games for intervals, chords, scales, melodies, etc.
            Book[Ear Training for the Contemporary Musician]A practical book that covers the essential topics of ear training for modern music styles.
            -

            Use Reliable and Legal Sources of Music

            -

            To learn music by ear without cracking software, you need to have access to reliable and legal sources of music. You need to have audio files of the songs you want to learn that are of high quality, clear, and complete. You also need to respect the rights and interests of the music creators and owners by obtaining their permission or paying for their products or services.

            -

            There are different ways to get reliable and legal sources of music, such as:

            -
              -
            • Streaming services: You can use a streaming service that offers a large library of music that you can listen to online or offline. You can choose from different genres, artists, albums, playlists, or radio stations that suit your taste and mood. You can also create your own playlists or discover new music based on your preferences and recommendations.
            • -
            • Online stores: You can use an online store that sells digital downloads or physical copies of music that you can own and keep. You can browse through different categories, ratings, reviews, or samples of music that you can buy and download to your device or order and receive by mail.
            • -
            • Music websites: You can use a music website that offers free or paid access to music that you can stream or download. You can search for specific songs, artists, albums, or genres that you want to listen to or learn from. You can also find lyrics, chords, tabs, or sheet music that accompany the songs.
            • -
            • Music libraries: You can use a music library that provides free or low-cost access to music that you can borrow or rent. You can visit a physical location or an online platform that has a collection of CDs, DVDs, vinyl records, tapes, or books that contain music that you can check out or reserve.
            • -
            -

            Some examples of streaming services, online stores, music websites, and music libraries that you can use for reliable and legal sources of music are:

            - - - - - - -
            TypeNameDescription
            Streaming service[Spotify]A popular streaming service that offers millions of songs and podcasts that you can listen to for free with ads or for a monthly fee without ads.
            Online store[iTunes]A leading online store that sells digital downloads and physical copies of music, movies, TV shows, books, and more.
            Music website[Ultimate Guitar]A comprehensive music website that offers free and paid access to guitar chords, tabs, lyrics, lessons, and more.
            Music library[Free Music Archive]A collaborative music library that provides free and legal access to thousands of songs from various genres and artists.
            -

            Use Software Tools That Help You Slow Down, Loop, and Transcribe Music

            -

            The final tip on how to learn music by ear without cracking software is to use software tools that help you slow down, loop, and transcribe music. These tools can help you analyze and understand the structure, melody, harmony, rhythm, and style of any song you hear. They can also help you practice and improve your playing skills and techniques.

            -

            To use these tools, you need to have a computer or a mobile device with an internet connection or a software program installed. You also need to have an audio file of the song you want to learn on your device or on a cloud service. The tools will allow you to load the audio file, adjust the speed, select the section, and view the transcription. You can also use the tools to edit, save, share, or export the transcription.

            -

            There are different types of software tools that help you slow down, loop, and transcribe music, such as:

            -
              -
            • Online tools: You can use an online tool that works on your web browser without requiring any installation or registration. You can access the tool from any device or location with an internet connection.
            • -
            • Desktop tools: You can use a desktop tool that works on your computer after downloading and installing it. You can use the tool offline or online, depending on its features and functions.
            • -
            • Mobile tools: You can use a mobile tool that works on your smartphone or tablet after downloading and installing it. You can use the tool on the go or at home, depending on your convenience and preference.
            • -
            -

            Some examples of online tools, desktop tools, and mobile tools that help you slow down, loop, and transcribe music are:

            - - - - - -
            TypeNameDescription
            Online tool[Chordify]An online tool that automatically recognizes and displays the chords of any song from YouTube, Spotify, Deezer, or your own files.
            Desktop tool[Transcribe!]A desktop tool that helps you transcribe music by slowing down the speed, changing the pitch, looping sections, and showing the spectrum analysis.
            Mobile tool[Anytune]A mobile tool that helps you practice music by slowing down the tempo, adjusting the pitch, repeating loops, and adding marks and notes.
            -

            Conclusion

            -

            In conclusion, Digital Music Mentor V2.5 is a music learning app that claims to help you find the chords and tabs for any song in the world. However, using a cracked version of this software is not a good idea, as it can expose you to malware, legal issues, and poor performance. Instead of cracking software, you should use legal and ethical ways to learn music by ear, such as ear training, reliable sources of music, and software tools that help you slow down, loop, and transcribe music. By doing so, you can enjoy learning and playing music without risking your computer or yourself.

            -

            FAQs

            -

            Here are some frequently asked questions about Digital Music Mentor V2.5 and software cracking:

            -
              -
            1. How much does Digital Music Mentor V2.5 cost?
            2. -

              Digital Music Mentor V2.5 costs $49.95 for a single user license. You can buy it from its official website or from authorized resellers.

              -
            3. Is Digital Music Mentor V2.5 still available?
            4. -

              Digital Music Mentor V2.5 is still available for purchase and download from its official website. However, it has not been updated since 2007 and may not work well with newer versions of Windows or newer audio formats.

              -
            5. Is there a free trial version of Digital Music Mentor V2.5?
            6. -

              Yes, there is a free trial version of Digital Music Mentor V2.5 that you can download from its official website. The trial version allows you to use the app for 10 days with some limitations.

              -
            7. What are some alternatives to Digital Music Mentor V2.5?
            8. -

              Some alternatives to Digital Music Mentor V2.5 are:

              -
                -
              • [Riffstation]: A web-based app that helps you play along with any song by showing you the chords, tempo, and key.
              • -
              • [Songsterr]: A web-based app that provides guitar tabs and chords for over 500,000 songs with interactive playback and multiple instruments.
              • -
              • [Yousician]: A desktop and mobile app that teaches you how to play guitar, piano, bass, ukulele, or singing with interactive lessons and feedback.
              • -
              -
            9. What are some tips to avoid malware from cracked software?
            10. -

              Some tips to avoid malware from cracked software are:

              -
                -
              • Do not download cracked software from unknown or untrusted sources.
              • -
              • Do not open or run cracked software files without scanning them with an antivirus program.
              • -
              • Do not disable your firewall or antivirus program when using cracked software.
              • -
              • Do not click on any links or pop-ups that appear when using cracked software.
              • -
              • Do not share or distribute cracked software files with others.
              • -
              - <|im

              b2dd77e56b
              -
              -
              \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Jodhaa Akbar Film Indian Online Subtitrat In Romana.md b/spaces/tioseFevbu/cartoon-converter/scripts/Jodhaa Akbar Film Indian Online Subtitrat In Romana.md deleted file mode 100644 index 7a924dc34b9688c073c1216edbe1d48c4939e3ed..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Jodhaa Akbar Film Indian Online Subtitrat In Romana.md +++ /dev/null @@ -1,14 +0,0 @@ - -

              Jodhaa Akbar: A 16th Century Love Story

              -

              Jodhaa Akbar is a 2008 Indian historical drama film directed and produced by Ashutosh Gowariker. It stars Hrithik Roshan as the Mughal emperor Akbar and Aishwarya Rai Bachchan as the Rajput princess Jodhaa. The film depicts their marriage of alliance that gradually blossoms into true love.

              -

              The film is set in the 16th century, when India was ruled by the Mughal dynasty. Akbar, the third and greatest emperor of the Mughal Empire, conquered most of India with his military prowess and political acumen. However, he faced resistance from some Rajput kingdoms that refused to submit to his authority. To strengthen his relations with the Rajputs, he decided to marry Jodhaa, the daughter of Raja Bharmal of Amer. Jodhaa agreed to the marriage on the condition that she would retain her Hindu faith and culture.

              -

              jodhaa akbar film indian online subtitrat in romana


              Download Zip ——— https://urlcod.com/2uHxTo



              -

              The film portrays the challenges and conflicts that arise in their marriage, as they try to overcome their differences and respect each other's beliefs. It also shows how Akbar's rule was marked by tolerance, justice, and harmony among his diverse subjects, who followed different religions and cultures. The film also features Sonu Sood as Jodhaa's cousin Sujamal, who plots against Akbar with the help of a scheming nobleman named Sharifuddin Hussain.

              -

              Jodhaa Akbar was a critical and commercial success, winning several awards and accolades. It was praised for its direction, cinematography, music, costumes, and performances. The film was also appreciated for its historical accuracy and depiction of the Mughal era. The film has a rating of 7.5 out of 10 on IMDb and is available to watch on Netflix.

              - -

              The film also depicts the religious and cultural diversity of Akbar's empire, as he promotes tolerance and harmony among his subjects. He abolishes the jizya tax imposed on non-Muslims and grants them equal rights and privileges. He also learns about Hinduism from Jodhaa and her family, and respects their customs and traditions. He even participates in a Hindu festival with Jodhaa, much to the dismay of his orthodox Muslim advisers.

              -

              However, not everyone is happy with Akbar's policies and reforms. His brother-in-law Sharifuddin Hussain, who is secretly in league with Sujamal, plots to assassinate Akbar and usurp his throne. He also tries to create a rift between Akbar and Jodhaa by spreading rumors about their relationship. He accuses Jodhaa of having an affair with Sujamal, who had sneaked into her chamber to warn her about Sharifuddin's plans. Akbar, who had begun to trust and love Jodhaa, is enraged by this allegation and banishes her from his palace.

              -

              Jodhaa returns to her homeland, where she gives birth to Akbar's twins. Meanwhile, Akbar learns the truth about Sharifuddin's treachery and Sujamal's loyalty. He realizes his mistake and decides to reconcile with Jodhaa. He travels to Amer with his army and declares his love for Jodhaa in front of everyone. He also apologizes to Sujamal and offers him a place in his court. Jodhaa forgives Akbar and reunites with him. The film ends with Akbar and Jodhaa celebrating their love and happiness.

              -

              cec2833e83
              -
              -
              \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Man Of Jeena Isi Ka Naam Hai Full !FULL! Movie Download.md b/spaces/tioseFevbu/cartoon-converter/scripts/Man Of Jeena Isi Ka Naam Hai Full !FULL! Movie Download.md deleted file mode 100644 index 9d9ab3177eaa3c9a65d5d4a1a6064502deb90be9..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Man Of Jeena Isi Ka Naam Hai Full !FULL! Movie Download.md +++ /dev/null @@ -1,36 +0,0 @@ -
              -```html -

              How to Watch Man Of Jeena Isi Ka Naam Hai Full Movie Online

              -

              Man Of Jeena Isi Ka Naam Hai is a 2017 Hindi drama film directed by Keshhav Panneriy and starring Arbaaz Khan, Ashutosh Rana, Himansh Kohli, Manjari Fadnis, Prem Chopra and others. The film tells the story of a man who faces various challenges and struggles in his life, from a small town in Rajasthan to the city of dreams Mumbai and then to the city that never sleeps New York.

              -

              Man Of Jeena Isi Ka Naam Hai Full Movie Download


              DOWNLOADhttps://urlcod.com/2uHvKe



              -

              If you are looking for a way to watch Man Of Jeena Isi Ka Naam Hai full movie online, you have come to the right place. In this article, we will tell you how you can stream or download the movie legally and safely.

              -

              Where to Stream Man Of Jeena Isi Ka Naam Hai Full Movie Online

              -

              The good news is that Man Of Jeena Isi Ka Naam Hai full movie is available on various online platforms that offer legal and high-quality streaming services. Here are some of the options you can choose from:

              -
                -
              • YouTube: You can watch Man Of Jeena Isi Ka Naam Hai full movie on YouTube for free. The movie is uploaded by Venus Entertainment, the official channel of the film's producer. You can enjoy the movie in HD quality with subtitles.
              • -
              • YTS: You can download Man Of Jeena Isi Ka Naam Hai full movie from YTS, a popular torrent site that offers high-quality movies in small file sizes. You can choose from 720p or 1080p resolutions and download the movie with subtitles.
              • -
              • Netflix: You can stream Man Of Jeena Isi Ka Naam Hai full movie on Netflix, the leading online streaming service that offers a wide range of movies and shows. You can watch the movie in HD quality with subtitles and download it for offline viewing.
              • -
              -

              Why You Should Watch Man Of Jeena Isi Ka Naam Hai Full Movie Online

              -

              Man Of Jeena Isi Ka Naam Hai is a film that explores the meaning of life and the importance of overcoming obstacles. The film showcases the journey of a man who faces various hardships and challenges in his life, but never gives up on his dreams and aspirations. The film also features a stellar cast of actors who deliver powerful performances and bring their characters to life. The film has a message of hope and inspiration for everyone who wants to live their life to the fullest.

              -

              If you are looking for a film that will make you think, feel and reflect on your own life, then Man Of Jeena Isi Ka Naam Hai is a must-watch for you. You can watch Man Of Jeena Isi Ka Naam Hai full movie online from any of the platforms mentioned above and enjoy a cinematic experience that will touch your heart and soul.

              -

              -``` - -```html -

              What Critics Say About Man Of Jeena Isi Ka Naam Hai Full Movie

              -

              Man Of Jeena Isi Ka Naam Hai full movie received mixed reviews from critics. Some praised the film for its message of women empowerment and the performances of the lead actors, while others criticized the film for its poor direction, editing, screenplay and music. Here are some of the critics' opinions on the film:

              -
              -

              "Jeena Isi Ka Naam Hai is a film that tries to be a lot of things - a love story, a social drama, a biopic, a thriller - but fails to do justice to any of them. The film suffers from a weak script, uneven pacing, clichéd dialogues and melodramatic acting. The film is too long and too boring to hold your attention." - Reza Noorani, Times of India

              -
              -
              -

              "Jeena Isi Ka Naam Hai is a film that celebrates the spirit of womanhood and the courage to follow one's dreams. The film has a strong message of hope and resilience that resonates with the audience. The film boasts of a talented cast of actors who deliver convincing performances and portray their characters with sincerity. The film has some flaws in its execution, but it is still a commendable effort by the debutant director." - Raja Sen, Hindustan Times

              -
              -
              -

              "Jeena Isi Ka Naam Hai is a film that tries to tackle some serious issues like domestic violence, female foeticide, gender discrimination and human rights, but does so in a superficial and simplistic manner. The film lacks depth and realism and relies on stereotypes and melodrama to create an impact. The film has some good moments and some decent performances, but they are not enough to save this mediocre film." - Rajeev Masand, CNN-IBN

              -
              -

              Conclusion

              -

              Man Of Jeena Isi Ka Naam Hai full movie is a film that tells the story of a woman who overcomes various obstacles in her life and becomes a successful writer. The film has a positive message of women empowerment and inspiration, but it also has many flaws in its direction, editing, screenplay and music. The film received mixed reviews from critics and audiences alike. If you are interested in watching this film, you can stream or download it from any of the platforms mentioned above.

              -```

              cec2833e83
              -
              -
              \ No newline at end of file diff --git a/spaces/tobiascz/SDSdemo/pytorch_grad_cam/fullgrad_cam.py b/spaces/tobiascz/SDSdemo/pytorch_grad_cam/fullgrad_cam.py deleted file mode 100644 index 1a2685eff60d63ee758e4b11510ad148311160e9..0000000000000000000000000000000000000000 --- a/spaces/tobiascz/SDSdemo/pytorch_grad_cam/fullgrad_cam.py +++ /dev/null @@ -1,95 +0,0 @@ -import numpy as np -import torch -from pytorch_grad_cam.base_cam import BaseCAM -from pytorch_grad_cam.utils.find_layers import find_layer_predicate_recursive -from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection -from pytorch_grad_cam.utils.image import scale_accross_batch_and_channels, scale_cam_image - -# https://arxiv.org/abs/1905.00780 - - -class FullGrad(BaseCAM): - def __init__(self, model, target_layers, use_cuda=False, - reshape_transform=None): - if len(target_layers) > 0: - print( - "Warning: target_layers is ignored in FullGrad. All bias layers will be used instead") - - def layer_with_2D_bias(layer): - bias_target_layers = [torch.nn.Conv2d, torch.nn.BatchNorm2d] - if type(layer) in bias_target_layers and layer.bias is not None: - return True - return False - target_layers = find_layer_predicate_recursive( - model, layer_with_2D_bias) - super( - FullGrad, - self).__init__( - model, - target_layers, - use_cuda, - reshape_transform, - compute_input_gradient=True) - self.bias_data = [self.get_bias_data( - layer).cpu().numpy() for layer in target_layers] - - def get_bias_data(self, layer): - # Borrowed from official paper impl: - # https://github.com/idiap/fullgrad-saliency/blob/master/saliency/tensor_extractor.py#L47 - if isinstance(layer, torch.nn.BatchNorm2d): - bias = - (layer.running_mean * layer.weight - / torch.sqrt(layer.running_var + layer.eps)) + layer.bias - return bias.data - else: - return layer.bias.data - - def compute_cam_per_layer( - self, - input_tensor, - target_category, - eigen_smooth): - input_grad = input_tensor.grad.data.cpu().numpy() - grads_list = [g.cpu().data.numpy() for g in - self.activations_and_grads.gradients] - cam_per_target_layer = [] - target_size = self.get_target_width_height(input_tensor) - - gradient_multiplied_input = input_grad * input_tensor.data.cpu().numpy() - gradient_multiplied_input = np.abs(gradient_multiplied_input) - gradient_multiplied_input = scale_accross_batch_and_channels( - gradient_multiplied_input, - target_size) - cam_per_target_layer.append(gradient_multiplied_input) - - # Loop over the saliency image from every layer - assert(len(self.bias_data) == len(grads_list)) - for bias, grads in zip(self.bias_data, grads_list): - bias = bias[None, :, None, None] - # In the paper they take the absolute value, - # but possibily taking only the positive gradients will work - # better. - bias_grad = np.abs(bias * grads) - result = scale_accross_batch_and_channels( - bias_grad, target_size) - result = np.sum(result, axis=1) - cam_per_target_layer.append(result[:, None, :]) - cam_per_target_layer = np.concatenate(cam_per_target_layer, axis=1) - if eigen_smooth: - # Resize to a smaller image, since this method typically has a very large number of channels, - # and then consumes a lot of memory - cam_per_target_layer = scale_accross_batch_and_channels( - cam_per_target_layer, (target_size[0] // 8, target_size[1] // 8)) - cam_per_target_layer = get_2d_projection(cam_per_target_layer) - cam_per_target_layer = cam_per_target_layer[:, None, :, :] - cam_per_target_layer = scale_accross_batch_and_channels( - cam_per_target_layer, - target_size) - else: - cam_per_target_layer = np.sum( - cam_per_target_layer, axis=1)[:, None, :] - - return cam_per_target_layer - - def aggregate_multi_layers(self, cam_per_target_layer): - result = np.sum(cam_per_target_layer, axis=1) - return scale_cam_image(result) diff --git a/spaces/tom-doerr/logo_generator/src/dalle_mini/__init__.py b/spaces/tom-doerr/logo_generator/src/dalle_mini/__init__.py deleted file mode 100644 index 3291c813f642674c3bfdaebdbc7a00d546f495a6..0000000000000000000000000000000000000000 --- a/spaces/tom-doerr/logo_generator/src/dalle_mini/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -__version__ = "0.0.4" - -from .model import DalleBart, DalleBartProcessor diff --git a/spaces/tomofi/MMOCR/mmocr/datasets/ocr_seg_dataset.py b/spaces/tomofi/MMOCR/mmocr/datasets/ocr_seg_dataset.py deleted file mode 100644 index cd4b727d6b28ec9b0b17e3470856608ea7b36e42..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/mmocr/datasets/ocr_seg_dataset.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmdet.datasets.builder import DATASETS - -import mmocr.utils as utils -from mmocr.datasets.ocr_dataset import OCRDataset - - -@DATASETS.register_module() -class OCRSegDataset(OCRDataset): - - def pre_pipeline(self, results): - results['img_prefix'] = self.img_prefix - - def _parse_anno_info(self, annotations): - """Parse char boxes annotations. - Args: - annotations (list[dict]): Annotations of one image, where - each dict is for one character. - - Returns: - dict: A dict containing the following keys: - - - chars (list[str]): List of character strings. - - char_rects (list[list[float]]): List of char box, with each - in style of rectangle: [x_min, y_min, x_max, y_max]. - - char_quads (list[list[float]]): List of char box, with each - in style of quadrangle: [x1, y1, x2, y2, x3, y3, x4, y4]. - """ - - assert utils.is_type_list(annotations, dict) - assert 'char_box' in annotations[0] - assert 'char_text' in annotations[0] - assert len(annotations[0]['char_box']) in [4, 8] - - chars, char_rects, char_quads = [], [], [] - for ann in annotations: - char_box = ann['char_box'] - if len(char_box) == 4: - char_box_type = ann.get('char_box_type', 'xyxy') - if char_box_type == 'xyxy': - char_rects.append(char_box) - char_quads.append([ - char_box[0], char_box[1], char_box[2], char_box[1], - char_box[2], char_box[3], char_box[0], char_box[3] - ]) - elif char_box_type == 'xywh': - x1, y1, w, h = char_box - x2 = x1 + w - y2 = y1 + h - char_rects.append([x1, y1, x2, y2]) - char_quads.append([x1, y1, x2, y1, x2, y2, x1, y2]) - else: - raise ValueError(f'invalid char_box_type {char_box_type}') - elif len(char_box) == 8: - x_list, y_list = [], [] - for i in range(4): - x_list.append(char_box[2 * i]) - y_list.append(char_box[2 * i + 1]) - x_max, x_min = max(x_list), min(x_list) - y_max, y_min = max(y_list), min(y_list) - char_rects.append([x_min, y_min, x_max, y_max]) - char_quads.append(char_box) - else: - raise Exception( - f'invalid num in char box: {len(char_box)} not in (4, 8)') - chars.append(ann['char_text']) - - ann = dict(chars=chars, char_rects=char_rects, char_quads=char_quads) - - return ann - - def prepare_train_img(self, index): - """Get training data and annotations from pipeline. - - Args: - index (int): Index of data. - - Returns: - dict: Training data and annotation after pipeline with new keys - introduced by pipeline. - """ - img_ann_info = self.data_infos[index] - img_info = { - 'filename': img_ann_info['file_name'], - } - ann_info = self._parse_anno_info(img_ann_info['annotations']) - results = dict(img_info=img_info, ann_info=ann_info) - - self.pre_pipeline(results) - - return self.pipeline(results) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/yolact/yolact_r50_8x8_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/yolact/yolact_r50_8x8_coco.py deleted file mode 100644 index b3adcb74a6155a0ab7303ab9ae90ee120f3eb4ad..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/yolact/yolact_r50_8x8_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = 'yolact_r50_1x8_coco.py' - -optimizer = dict(type='SGD', lr=8e-3, momentum=0.9, weight_decay=5e-4) -optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=1000, - warmup_ratio=0.1, - step=[20, 42, 49, 52]) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/docker/Dockerfile b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/docker/Dockerfile deleted file mode 100644 index 81e458fc1c9b1a50a457c196de1e6da619ac0695..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/docker/Dockerfile +++ /dev/null @@ -1,24 +0,0 @@ -ARG PYTORCH="1.6.0" -ARG CUDA="10.1" -ARG CUDNN="7" - -FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel - -ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" -ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" -ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" - -RUN apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -# Install MMCV -RUN pip install mmcv-full==latest+torch1.6.0+cu101 -f https://openmmlab.oss-accelerate.aliyuncs.com/mmcv/dist/index.html - -# Install MMDetection -RUN conda clean --all -RUN git clone https://github.com/open-mmlab/mmdetection.git /mmdetection -WORKDIR /mmdetection -ENV FORCE_CUDA="1" -RUN pip install -r requirements/build.txt -RUN pip install --no-cache-dir -e . diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/backbones/res2net.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/backbones/res2net.py deleted file mode 100644 index 84951f008db3e2bac7537a3bb44bab10d9cb5a4a..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/backbones/res2net.py +++ /dev/null @@ -1,326 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.utils.checkpoint as cp -from mmcv.cnn import build_conv_layer, build_norm_layer -from mmcv.runner import Sequential - -from ..builder import BACKBONES -from .resnet import Bottleneck as _Bottleneck -from .resnet import ResNet - - -class Bottle2neck(_Bottleneck): - expansion = 4 - - def __init__(self, - inplanes, - planes, - scales=4, - base_width=26, - base_channels=64, - stage_type='normal', - **kwargs): - """Bottle2neck block for Res2Net. - - If style is "pytorch", the stride-two layer is the 3x3 conv layer, if - it is "caffe", the stride-two layer is the first 1x1 conv layer. - """ - super(Bottle2neck, self).__init__(inplanes, planes, **kwargs) - assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.' - width = int(math.floor(self.planes * (base_width / base_channels))) - - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, width * scales, postfix=1) - self.norm3_name, norm3 = build_norm_layer( - self.norm_cfg, self.planes * self.expansion, postfix=3) - - self.conv1 = build_conv_layer( - self.conv_cfg, - self.inplanes, - width * scales, - kernel_size=1, - stride=self.conv1_stride, - bias=False) - self.add_module(self.norm1_name, norm1) - - if stage_type == 'stage' and self.conv2_stride != 1: - self.pool = nn.AvgPool2d( - kernel_size=3, stride=self.conv2_stride, padding=1) - convs = [] - bns = [] - - fallback_on_stride = False - if self.with_dcn: - fallback_on_stride = self.dcn.pop('fallback_on_stride', False) - if not self.with_dcn or fallback_on_stride: - for i in range(scales - 1): - convs.append( - build_conv_layer( - self.conv_cfg, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - bias=False)) - bns.append( - build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1]) - self.convs = nn.ModuleList(convs) - self.bns = nn.ModuleList(bns) - else: - assert self.conv_cfg is None, 'conv_cfg must be None for DCN' - for i in range(scales - 1): - convs.append( - build_conv_layer( - self.dcn, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - bias=False)) - bns.append( - build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1]) - self.convs = nn.ModuleList(convs) - self.bns = nn.ModuleList(bns) - - self.conv3 = build_conv_layer( - self.conv_cfg, - width * scales, - self.planes * self.expansion, - kernel_size=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - self.stage_type = stage_type - self.scales = scales - self.width = width - delattr(self, 'conv2') - delattr(self, self.norm2_name) - - def forward(self, x): - """Forward function.""" - - def _inner_forward(x): - identity = x - - out = self.conv1(x) - out = self.norm1(out) - out = self.relu(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv1_plugin_names) - - spx = torch.split(out, self.width, 1) - sp = self.convs[0](spx[0].contiguous()) - sp = self.relu(self.bns[0](sp)) - out = sp - for i in range(1, self.scales - 1): - if self.stage_type == 'stage': - sp = spx[i] - else: - sp = sp + spx[i] - sp = self.convs[i](sp.contiguous()) - sp = self.relu(self.bns[i](sp)) - out = torch.cat((out, sp), 1) - - if self.stage_type == 'normal' or self.conv2_stride == 1: - out = torch.cat((out, spx[self.scales - 1]), 1) - elif self.stage_type == 'stage': - out = torch.cat((out, self.pool(spx[self.scales - 1])), 1) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv2_plugin_names) - - out = self.conv3(out) - out = self.norm3(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv3_plugin_names) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = self.relu(out) - - return out - - -class Res2Layer(Sequential): - """Res2Layer to build Res2Net style backbone. - - Args: - block (nn.Module): block used to build ResLayer. - inplanes (int): inplanes of block. - planes (int): planes of block. - num_blocks (int): number of blocks. - stride (int): stride of the first block. Default: 1 - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottle2neck. Default: False - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - scales (int): Scales used in Res2Net. Default: 4 - base_width (int): Basic width of each scale. Default: 26 - """ - - def __init__(self, - block, - inplanes, - planes, - num_blocks, - stride=1, - avg_down=True, - conv_cfg=None, - norm_cfg=dict(type='BN'), - scales=4, - base_width=26, - **kwargs): - self.block = block - - downsample = None - if stride != 1 or inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.AvgPool2d( - kernel_size=stride, - stride=stride, - ceil_mode=True, - count_include_pad=False), - build_conv_layer( - conv_cfg, - inplanes, - planes * block.expansion, - kernel_size=1, - stride=1, - bias=False), - build_norm_layer(norm_cfg, planes * block.expansion)[1], - ) - - layers = [] - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=stride, - downsample=downsample, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - scales=scales, - base_width=base_width, - stage_type='stage', - **kwargs)) - inplanes = planes * block.expansion - for i in range(1, num_blocks): - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - scales=scales, - base_width=base_width, - **kwargs)) - super(Res2Layer, self).__init__(*layers) - - -@BACKBONES.register_module() -class Res2Net(ResNet): - """Res2Net backbone. - - Args: - scales (int): Scales used in Res2Net. Default: 4 - base_width (int): Basic width of each scale. Default: 26 - depth (int): Depth of res2net, from {50, 101, 152}. - in_channels (int): Number of input image channels. Default: 3. - num_stages (int): Res2net stages. Default: 4. - strides (Sequence[int]): Strides of the first block of each stage. - dilations (Sequence[int]): Dilation of each stage. - out_indices (Sequence[int]): Output from which stages. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottle2neck. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. - norm_cfg (dict): Dictionary to construct and config norm layer. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. - plugins (list[dict]): List of plugins for stages, each dict contains: - - - cfg (dict, required): Cfg dict to build plugin. - - position (str, required): Position inside block to insert - plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'. - - stages (tuple[bool], optional): Stages to apply plugin, length - should be same as 'num_stages'. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - zero_init_residual (bool): Whether to use zero init for last norm layer - in resblocks to let them behave as identity. - pretrained (str, optional): model pretrained path. Default: None - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - - Example: - >>> from mmdet.models import Res2Net - >>> import torch - >>> self = Res2Net(depth=50, scales=4, base_width=26) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 32, 32) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 256, 8, 8) - (1, 512, 4, 4) - (1, 1024, 2, 2) - (1, 2048, 1, 1) - """ - - arch_settings = { - 50: (Bottle2neck, (3, 4, 6, 3)), - 101: (Bottle2neck, (3, 4, 23, 3)), - 152: (Bottle2neck, (3, 8, 36, 3)) - } - - def __init__(self, - scales=4, - base_width=26, - style='pytorch', - deep_stem=True, - avg_down=True, - pretrained=None, - init_cfg=None, - **kwargs): - self.scales = scales - self.base_width = base_width - super(Res2Net, self).__init__( - style='pytorch', - deep_stem=True, - avg_down=True, - pretrained=pretrained, - init_cfg=init_cfg, - **kwargs) - - def make_res_layer(self, **kwargs): - return Res2Layer( - scales=self.scales, - base_width=self.base_width, - base_channels=self.base_channels, - **kwargs) diff --git a/spaces/tracinginsights/F1-analysis/pages/Download_Raw_Data.py b/spaces/tracinginsights/F1-analysis/pages/Download_Raw_Data.py deleted file mode 100644 index d3a30607cc2ade60b2bfac89285c32ac2658c0b2..0000000000000000000000000000000000000000 --- a/spaces/tracinginsights/F1-analysis/pages/Download_Raw_Data.py +++ /dev/null @@ -1,44 +0,0 @@ -import streamlit as st -from repo_directory import Download_Raw_Data as dl -from repo_directory import button -import pandas as pd - -YEAR_SELECTED = st.selectbox( - 'Select Year', - (2023, 2022, 2021, 2020, 2019, 2018)) - -RACE_SELECTED = st.selectbox( - 'Select Race', - (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23)) - -SESSION = st.selectbox( - 'Select Session', - ('FP1', 'FP2', 'FP3', 'Q', 'SQ', 'R')) - -laps, drivers_list, weather = dl.get_laps(YEAR_SELECTED, RACE_SELECTED, SESSION) -laps.to_csv(f'{YEAR_SELECTED}-{RACE_SELECTED}-{SESSION}.csv') -st.dataframe(pd.read_csv(f'{YEAR_SELECTED}-{RACE_SELECTED}-{SESSION}.csv')) - - -with open(f'{YEAR_SELECTED}-{RACE_SELECTED}-{SESSION}.csv') as f: - st.download_button('Click to Download LapTimes Data', f, file_name=f'{YEAR_SELECTED}-{RACE_SELECTED}-{SESSION}.csv') - -weather.to_csv(f'{YEAR_SELECTED}-{RACE_SELECTED}-{SESSION}-weather.csv') -st.dataframe(pd.read_csv(f'{YEAR_SELECTED}-{RACE_SELECTED}-{SESSION}-weather.csv')) - -with open(f'{YEAR_SELECTED}-{RACE_SELECTED}-{SESSION}-weather.csv') as f: - st.download_button('Click to Download Weather Data', f, file_name=f'{YEAR_SELECTED}-{RACE_SELECTED}-{SESSION}-weather.csv') - -st.write('For Detailed Telemetry Data') -DRIVERS_SELECTED = st.multiselect( - 'Select Driver', - drivers_list) - -telemetry = dl.selected_telemetry(DRIVERS_SELECTED, laps) - -telemetry.to_csv(f'{YEAR_SELECTED}-{RACE_SELECTED}-{SESSION}-Tel.csv') -st.dataframe(pd.read_csv(f'{YEAR_SELECTED}-{RACE_SELECTED}-{SESSION}-Tel.csv')) - - -with open(f'{YEAR_SELECTED}-{RACE_SELECTED}-{SESSION}-Tel.csv') as f: - st.download_button('Click to Download Telemetry Data', f, file_name=f'{YEAR_SELECTED}-{RACE_SELECTED}-{SESSION}-Tel.csv') diff --git a/spaces/trttung1610/musicgen/setup.py b/spaces/trttung1610/musicgen/setup.py deleted file mode 100644 index 64e7d6fcb1092748f8151f6d3ed1767d3be1b34b..0000000000000000000000000000000000000000 --- a/spaces/trttung1610/musicgen/setup.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from pathlib import Path - -from setuptools import setup, find_packages - - -NAME = 'audiocraft' -DESCRIPTION = 'Audio generation research library for PyTorch' - -URL = 'https://github.com/facebookresearch/audiocraft' -AUTHOR = 'FAIR Speech & Audio' -EMAIL = 'defossez@meta.com, jadecopet@meta.com' -REQUIRES_PYTHON = '>=3.8.0' - -for line in open('audiocraft/__init__.py'): - line = line.strip() - if '__version__' in line: - context = {} - exec(line, context) - VERSION = context['__version__'] - -HERE = Path(__file__).parent - -try: - with open(HERE / "README.md", encoding='utf-8') as f: - long_description = '\n' + f.read() -except FileNotFoundError: - long_description = DESCRIPTION - -REQUIRED = [i.strip() for i in open(HERE / 'requirements.txt') if not i.startswith('#')] - -setup( - name=NAME, - version=VERSION, - description=DESCRIPTION, - author_email=EMAIL, - long_description=long_description, - long_description_content_type='text/markdown', - author=AUTHOR, - url=URL, - python_requires=REQUIRES_PYTHON, - install_requires=REQUIRED, - extras_require={ - 'dev': ['coverage', 'flake8', 'mypy', 'pdoc3', 'pytest'], - }, - packages=find_packages(), - package_data={'audiocraft': ['py.typed']}, - include_package_data=True, - license='MIT License', - classifiers=[ - # Trove classifiers - # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers - 'License :: OSI Approved :: MIT License', - 'Topic :: Multimedia :: Sound/Audio', - 'Topic :: Scientific/Engineering :: Artificial Intelligence', - ], -) diff --git a/spaces/ulysses115/diffsvc_test/utils/text_encoder.py b/spaces/ulysses115/diffsvc_test/utils/text_encoder.py deleted file mode 100644 index d9e0758abc7b4e1f452481cba9715df08ceab543..0000000000000000000000000000000000000000 --- a/spaces/ulysses115/diffsvc_test/utils/text_encoder.py +++ /dev/null @@ -1,304 +0,0 @@ -import re -import six -from six.moves import range # pylint: disable=redefined-builtin - -PAD = "" -EOS = "" -UNK = "" -SEG = "|" -RESERVED_TOKENS = [PAD, EOS, UNK] -NUM_RESERVED_TOKENS = len(RESERVED_TOKENS) -PAD_ID = RESERVED_TOKENS.index(PAD) # Normally 0 -EOS_ID = RESERVED_TOKENS.index(EOS) # Normally 1 -UNK_ID = RESERVED_TOKENS.index(UNK) # Normally 2 - -if six.PY2: - RESERVED_TOKENS_BYTES = RESERVED_TOKENS -else: - RESERVED_TOKENS_BYTES = [bytes(PAD, "ascii"), bytes(EOS, "ascii")] - -# Regular expression for unescaping token strings. -# '\u' is converted to '_' -# '\\' is converted to '\' -# '\213;' is converted to unichr(213) -_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);") -_ESCAPE_CHARS = set(u"\\_u;0123456789") - - -def strip_ids(ids, ids_to_strip): - """Strip ids_to_strip from the end ids.""" - ids = list(ids) - while ids and ids[-1] in ids_to_strip: - ids.pop() - return ids - - -class TextEncoder(object): - """Base class for converting from ints to/from human readable strings.""" - - def __init__(self, num_reserved_ids=NUM_RESERVED_TOKENS): - self._num_reserved_ids = num_reserved_ids - - @property - def num_reserved_ids(self): - return self._num_reserved_ids - - def encode(self, s): - """Transform a human-readable string into a sequence of int ids. - - The ids should be in the range [num_reserved_ids, vocab_size). Ids [0, - num_reserved_ids) are reserved. - - EOS is not appended. - - Args: - s: human-readable string to be converted. - - Returns: - ids: list of integers - """ - return [int(w) + self._num_reserved_ids for w in s.split()] - - def decode(self, ids, strip_extraneous=False): - """Transform a sequence of int ids into a human-readable string. - - EOS is not expected in ids. - - Args: - ids: list of integers to be converted. - strip_extraneous: bool, whether to strip off extraneous tokens - (EOS and PAD). - - Returns: - s: human-readable string. - """ - if strip_extraneous: - ids = strip_ids(ids, list(range(self._num_reserved_ids or 0))) - return " ".join(self.decode_list(ids)) - - def decode_list(self, ids): - """Transform a sequence of int ids into a their string versions. - - This method supports transforming individual input/output ids to their - string versions so that sequence to/from text conversions can be visualized - in a human readable format. - - Args: - ids: list of integers to be converted. - - Returns: - strs: list of human-readable string. - """ - decoded_ids = [] - for id_ in ids: - if 0 <= id_ < self._num_reserved_ids: - decoded_ids.append(RESERVED_TOKENS[int(id_)]) - else: - decoded_ids.append(id_ - self._num_reserved_ids) - return [str(d) for d in decoded_ids] - - @property - def vocab_size(self): - raise NotImplementedError() - - -class ByteTextEncoder(TextEncoder): - """Encodes each byte to an id. For 8-bit strings only.""" - - def encode(self, s): - numres = self._num_reserved_ids - if six.PY2: - if isinstance(s, unicode): - s = s.encode("utf-8") - return [ord(c) + numres for c in s] - # Python3: explicitly convert to UTF-8 - return [c + numres for c in s.encode("utf-8")] - - def decode(self, ids, strip_extraneous=False): - if strip_extraneous: - ids = strip_ids(ids, list(range(self._num_reserved_ids or 0))) - numres = self._num_reserved_ids - decoded_ids = [] - int2byte = six.int2byte - for id_ in ids: - if 0 <= id_ < numres: - decoded_ids.append(RESERVED_TOKENS_BYTES[int(id_)]) - else: - decoded_ids.append(int2byte(id_ - numres)) - if six.PY2: - return "".join(decoded_ids) - # Python3: join byte arrays and then decode string - return b"".join(decoded_ids).decode("utf-8", "replace") - - def decode_list(self, ids): - numres = self._num_reserved_ids - decoded_ids = [] - int2byte = six.int2byte - for id_ in ids: - if 0 <= id_ < numres: - decoded_ids.append(RESERVED_TOKENS_BYTES[int(id_)]) - else: - decoded_ids.append(int2byte(id_ - numres)) - # Python3: join byte arrays and then decode string - return decoded_ids - - @property - def vocab_size(self): - return 2**8 + self._num_reserved_ids - - -class ByteTextEncoderWithEos(ByteTextEncoder): - """Encodes each byte to an id and appends the EOS token.""" - - def encode(self, s): - return super(ByteTextEncoderWithEos, self).encode(s) + [EOS_ID] - - -class TokenTextEncoder(TextEncoder): - """Encoder based on a user-supplied vocabulary (file or list).""" - - def __init__(self, - vocab_filename, - reverse=False, - vocab_list=None, - replace_oov=None, - num_reserved_ids=NUM_RESERVED_TOKENS): - """Initialize from a file or list, one token per line. - - Handling of reserved tokens works as follows: - - When initializing from a list, we add reserved tokens to the vocab. - - When initializing from a file, we do not add reserved tokens to the vocab. - - When saving vocab files, we save reserved tokens to the file. - - Args: - vocab_filename: If not None, the full filename to read vocab from. If this - is not None, then vocab_list should be None. - reverse: Boolean indicating if tokens should be reversed during encoding - and decoding. - vocab_list: If not None, a list of elements of the vocabulary. If this is - not None, then vocab_filename should be None. - replace_oov: If not None, every out-of-vocabulary token seen when - encoding will be replaced by this string (which must be in vocab). - num_reserved_ids: Number of IDs to save for reserved tokens like . - """ - super(TokenTextEncoder, self).__init__(num_reserved_ids=num_reserved_ids) - self._reverse = reverse - self._replace_oov = replace_oov - if vocab_filename: - self._init_vocab_from_file(vocab_filename) - else: - assert vocab_list is not None - self._init_vocab_from_list(vocab_list) - self.pad_index = self._token_to_id[PAD] - self.eos_index = self._token_to_id[EOS] - self.unk_index = self._token_to_id[UNK] - self.seg_index = self._token_to_id[SEG] if SEG in self._token_to_id else self.eos_index - - def encode(self, s): - """Converts a space-separated string of tokens to a list of ids.""" - sentence = s - tokens = sentence.strip().split() - if self._replace_oov is not None: - tokens = [t if t in self._token_to_id else self._replace_oov - for t in tokens] - ret = [self._token_to_id[tok] for tok in tokens] - return ret[::-1] if self._reverse else ret - - def decode(self, ids, strip_eos=False, strip_padding=False): - if strip_padding and self.pad() in list(ids): - pad_pos = list(ids).index(self.pad()) - ids = ids[:pad_pos] - if strip_eos and self.eos() in list(ids): - eos_pos = list(ids).index(self.eos()) - ids = ids[:eos_pos] - return " ".join(self.decode_list(ids)) - - def decode_list(self, ids): - seq = reversed(ids) if self._reverse else ids - return [self._safe_id_to_token(i) for i in seq] - - @property - def vocab_size(self): - return len(self._id_to_token) - - def __len__(self): - return self.vocab_size - - def _safe_id_to_token(self, idx): - return self._id_to_token.get(idx, "ID_%d" % idx) - - def _init_vocab_from_file(self, filename): - """Load vocab from a file. - - Args: - filename: The file to load vocabulary from. - """ - with open(filename) as f: - tokens = [token.strip() for token in f.readlines()] - - def token_gen(): - for token in tokens: - yield token - - self._init_vocab(token_gen(), add_reserved_tokens=False) - - def _init_vocab_from_list(self, vocab_list): - """Initialize tokens from a list of tokens. - - It is ok if reserved tokens appear in the vocab list. They will be - removed. The set of tokens in vocab_list should be unique. - - Args: - vocab_list: A list of tokens. - """ - def token_gen(): - for token in vocab_list: - if token not in RESERVED_TOKENS: - yield token - - self._init_vocab(token_gen()) - - def _init_vocab(self, token_generator, add_reserved_tokens=True): - """Initialize vocabulary with tokens from token_generator.""" - - self._id_to_token = {} - non_reserved_start_index = 0 - - if add_reserved_tokens: - self._id_to_token.update(enumerate(RESERVED_TOKENS)) - non_reserved_start_index = len(RESERVED_TOKENS) - - self._id_to_token.update( - enumerate(token_generator, start=non_reserved_start_index)) - - # _token_to_id is the reverse of _id_to_token - self._token_to_id = dict((v, k) - for k, v in six.iteritems(self._id_to_token)) - - def pad(self): - return self.pad_index - - def eos(self): - return self.eos_index - - def unk(self): - return self.unk_index - - def seg(self): - return self.seg_index - - def store_to_file(self, filename): - """Write vocab file to disk. - - Vocab files have one token per line. The file ends in a newline. Reserved - tokens are written to the vocab file as well. - - Args: - filename: Full path of the file to store the vocab to. - """ - with open(filename, "w") as f: - for i in range(len(self._id_to_token)): - f.write(self._id_to_token[i] + "\n") - - def sil_phonemes(self): - return [p for p in self._id_to_token.values() if not p[0].isalpha()] diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Bioquimica mckee 5ta edicion pdf 66 Accede al contenido completo de la quinta edicin de este clsico de la bioqumica.md b/spaces/usbethFlerru/sovits-modelsV2/example/Bioquimica mckee 5ta edicion pdf 66 Accede al contenido completo de la quinta edicin de este clsico de la bioqumica.md deleted file mode 100644 index bee1ebb7a766ee08312341bfe41216dd9f115231..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Bioquimica mckee 5ta edicion pdf 66 Accede al contenido completo de la quinta edicin de este clsico de la bioqumica.md +++ /dev/null @@ -1,6 +0,0 @@ -

              bioquimica mckee 5ta edicion pdf 66


              Download Ziphttps://urlcod.com/2uyVSK



              -
              - aaccfb2cb3
              -
              -
              -

              diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Body Heat 2010 Hollywood Movie D LINK Review A Sizzling Erotic Thriller.md b/spaces/usbethFlerru/sovits-modelsV2/example/Body Heat 2010 Hollywood Movie D LINK Review A Sizzling Erotic Thriller.md deleted file mode 100644 index ccf391d79d2d806171c11734bb28848110f5c31c..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Body Heat 2010 Hollywood Movie D LINK Review A Sizzling Erotic Thriller.md +++ /dev/null @@ -1,10 +0,0 @@ - -

              On screen, Hurt could be genial or chilling, and sometimes both in the same film. He acted in numerous beloved and high-profile films, from Broadcast News to Tuck Everlasting. He played cerebral, conflicted patriarchs in Into the Wild and The Village, and made something of a late-career specialty in portraying professors, in movies such as A.I. Artifical Intelligence, Engdame and Marvel blockbusters such as The Incredible Hulk, where he found a new, younger audience as a military scientist who is the hero's nemesis. But Hurt's longtime fans adored most him for roles such his part in 2005's A History of Violence, directed by David Cronenberg. In 2010, Hurt bristled when Terry Gross referred to the role as "small."

              -

              Body Heat 2010 Hollywood Movie D 'LINK'


              DOWNLOAD ===> https://urlcod.com/2uyUZx



              -

              In 1963, his brother Mark took him to see David Lean's Lawrence of Arabia. They arrived a few minutes late, and Mark insisted that they kill six hours until the next showing. "I thought my brother was crazy. But when the show was over, I knew I had done the right thing. As I stumbled from the theater, having seen the whole movie, I had a new hero. It was not T.E. Lawrence, but David Lean."[11]

              -

              Meg Ryan, who at the time was married to Wyatt Earp star Dennis Quaid, brought Kasdan a script she'd commissioned for herself. Written by Adam Brooks, it was about a woman who overcomes her fear of flying and goes to Paris to confront her cheating fiancé, and in the process falls for a French thief. Kasdan was drawn to the project, he said, because "I wouldn't have to write something new. I'd just done this really difficult movie and I thought, well, I'll go to France with my family for a while. I love France."[5]

              -

              Following his three-month romance with Swift in 2009, Lautner moved on with his Abduction costar Lily Collins after meeting on the 2010 movie set. The pair split in September 2011. The actor started dating Marie Avgeropoulos in the summer of 2013, but the duo went their separate ways in January 2015. Lautner then dated Billie Lourd for several months before splitting in July 2017.

              -

              -

              "Body Heat" is a 2010 big budget awarded pornographic film created by Digital Playground and directed by Robby D. Film is separated into 22 porn episodes, with total length of 2 hrs. 20 mins. Group sex and all-girl are movie acknowledged adult features. Scene from this film with Riley Steele, Jesse Jane, Katsuni, Kayden Kross and Raven Alexis gained AVN "Wildest Sex Scene" achievement. The movie achievements include "First Choice Awards", "Acting Performance Of The Year - Female" and "Best Packaging".

              aaccfb2cb3
              -
              -
              \ No newline at end of file diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Crack HOT File For Sap 2000 V15 16.md b/spaces/usbethFlerru/sovits-modelsV2/example/Crack HOT File For Sap 2000 V15 16.md deleted file mode 100644 index 46d8883ead26231971069a02e1a13dd187ab12aa..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Crack HOT File For Sap 2000 V15 16.md +++ /dev/null @@ -1,6 +0,0 @@ -

              Crack File For Sap 2000 V15 16


              Downloadhttps://urlcod.com/2uyWOG



              -
              -5-15 ... accessed on the SAP2000 CD or through the CSI web site at.. 14. ... SAP2000. V14-MAGNiTUDE torrent or any other torrent. ... Trkiye Deprem Mhendislii ve Sismoloji Konferans, Bildiri No: 060, 14-16 Ekim 2015,.... 14. 1fdad05405
              -
              -
              -

              diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/dist.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/dist.md deleted file mode 100644 index 3e5033ebaa11a05979a08232c7808ebdb7f387e5..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/dist.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -description: Learn how to find free network port and generate DDP (Distributed Data Parallel) command in Ultralytics YOLO with easy examples. -keywords: ultralytics, YOLO, utils, dist, distributed deep learning, DDP file, DDP cleanup ---- - -## find_free_network_port ---- -### ::: ultralytics.yolo.utils.dist.find_free_network_port -

              - -## generate_ddp_file ---- -### ::: ultralytics.yolo.utils.dist.generate_ddp_file -

              - -## generate_ddp_command ---- -### ::: ultralytics.yolo.utils.dist.generate_ddp_command -

              - -## ddp_cleanup ---- -### ::: ultralytics.yolo.utils.dist.ddp_cleanup -

              diff --git a/spaces/victor/tata/Dockerfile b/spaces/victor/tata/Dockerfile deleted file mode 100644 index 7389a194e4f9307a2920c398ec6ad8fd3509e88d..0000000000000000000000000000000000000000 --- a/spaces/victor/tata/Dockerfile +++ /dev/null @@ -1,99 +0,0 @@ -FROM heartexlabs/label-studio:hf-latest - -################################################################################ -# -# How to Disable Public Account Creation -# -------------------------------------- -# By default this space allows for the unrestricted creation of new accounts -# will full access to all projects and data. This is great for trying out -# Label Studio and collaborating on projects, but you may want to restrict -# access to your space to only authorized users. Uncomment the following line -# to disable public account creation for this space. -# -# ENV LABEL_STUDIO_DISABLE_SIGNUP_WITHOUT_LINK=true -# -# Set secrets in your space to create an inital user, and log in with your -# provided username and password. Do not set these in your Dockerfile, as they -# globally visible on a public space. -# -# LABEL_STUDIO_USERNAME -# LABEL_STUDIO_PASSWORD -# -# You will need to provide new users with an invitation link to join the space. -# -################################################################################ - -################################################################################ -# -# How to Enable Configuration Persistence -# --------------------------------------- -# By default this space stores all project configuration and data annotations -# in local storage with Sqlite. If the space is reset, all configuration and -# annotation data in the space will be lost. You can enable configuration -# persistence by connecting an external Postgres database to your space, -# guaranteeing that all project and annotation settings are preserved. -# -# Set the following secret variables to match your own hosted instance of -# Postgres. We strongly recommend setting these as secrets to prevent leaking -# information about your database service to the public in your spaces -# definition. -# -# ENV DJANGO_DB=default -# ENV POSTGRE_NAME= -# ENV POSTGRE_PORT= -# ENV POSTGRE_USER= -# ENV POSTGRE_PASSWORD= -# ENV POSTGRE_PORT= -# ENV POSTGRE_HOST= -# -# Uncomment the following line to remove the warning about ephemeral storage -# -# ENV STORAGE_PERSISTENCE=1 -# -# Note that you will need to connect cloud storage to host data items that you -# want to annotate, as local storage will not be preserved across a space reset. -# -################################################################################ - -################################################################################ -# -# How to Enable Cloud Storage -# --------------------------- -# By default the only data storage enabled for this space is local. In the case -# of a space reset, all data will be lost. To enable permanent storage, you -# must enable a cloud storage connector. We also strongly recommend enabling -# configuration persistence to preserve project data, annotations, and user -# settings. Choose the appropriate cloud connector and configure the secrets -# for it. -# -# Amazon S3 -# ========= -# STORAGE_TYPE=s3 -# STORAGE_AWS_ACCESS_KEY_ID="" -# STORAGE_AWS_SECRET_ACCESS_KEY="" -# STORAGE_AWS_BUCKET_NAME="" -# STORAGE_AWS_REGION_NAME="" -# STORAGE_AWS_FOLDER="" -# -# Google Cloud Storage -# ==================== -# -# STORAGE_TYPE=gcs -# STORAGE_GCS_BUCKET_NAME="" -# STORAGE_GCS_PROJECT_ID="" -# STORAGE_GCS_FOLDER="" -# GOOGLE_APPLICATION_CREDENTIALS="/opt/heartex/secrets/key.json" -# -# Azure Blob Storage -# ================== -# -# STORAGE_TYPE=azure -# STORAGE_AZURE_ACCOUNT_NAME="" -# STORAGE_AZURE_ACCOUNT_KEY="" -# STORAGE_AZURE_CONTAINER_NAME="" -# STORAGE_AZURE_FOLDER="" -# -# -################################################################################ - -CMD exec label-studio --host=$SPACE_HOST diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/ops/roi_align.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/ops/roi_align.py deleted file mode 100644 index 0755aefc66e67233ceae0f4b77948301c443e9fb..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/ops/roi_align.py +++ /dev/null @@ -1,223 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.modules.utils import _pair - -from ..utils import deprecated_api_warning, ext_loader - -ext_module = ext_loader.load_ext('_ext', - ['roi_align_forward', 'roi_align_backward']) - - -class RoIAlignFunction(Function): - - @staticmethod - def symbolic(g, input, rois, output_size, spatial_scale, sampling_ratio, - pool_mode, aligned): - from ..onnx import is_custom_op_loaded - has_custom_op = is_custom_op_loaded() - if has_custom_op: - return g.op( - 'mmcv::MMCVRoiAlign', - input, - rois, - output_height_i=output_size[0], - output_width_i=output_size[1], - spatial_scale_f=spatial_scale, - sampling_ratio_i=sampling_ratio, - mode_s=pool_mode, - aligned_i=aligned) - else: - from torch.onnx.symbolic_opset9 import sub, squeeze - from torch.onnx.symbolic_helper import _slice_helper - from torch.onnx import TensorProtoDataType - # batch_indices = rois[:, 0].long() - batch_indices = _slice_helper( - g, rois, axes=[1], starts=[0], ends=[1]) - batch_indices = squeeze(g, batch_indices, 1) - batch_indices = g.op( - 'Cast', batch_indices, to_i=TensorProtoDataType.INT64) - # rois = rois[:, 1:] - rois = _slice_helper(g, rois, axes=[1], starts=[1], ends=[5]) - if aligned: - # rois -= 0.5/spatial_scale - aligned_offset = g.op( - 'Constant', - value_t=torch.tensor([0.5 / spatial_scale], - dtype=torch.float32)) - rois = sub(g, rois, aligned_offset) - # roi align - return g.op( - 'RoiAlign', - input, - rois, - batch_indices, - output_height_i=output_size[0], - output_width_i=output_size[1], - spatial_scale_f=spatial_scale, - sampling_ratio_i=max(0, sampling_ratio), - mode_s=pool_mode) - - @staticmethod - def forward(ctx, - input, - rois, - output_size, - spatial_scale=1.0, - sampling_ratio=0, - pool_mode='avg', - aligned=True): - ctx.output_size = _pair(output_size) - ctx.spatial_scale = spatial_scale - ctx.sampling_ratio = sampling_ratio - assert pool_mode in ('max', 'avg') - ctx.pool_mode = 0 if pool_mode == 'max' else 1 - ctx.aligned = aligned - ctx.input_shape = input.size() - - assert rois.size(1) == 5, 'RoI must be (idx, x1, y1, x2, y2)!' - - output_shape = (rois.size(0), input.size(1), ctx.output_size[0], - ctx.output_size[1]) - output = input.new_zeros(output_shape) - if ctx.pool_mode == 0: - argmax_y = input.new_zeros(output_shape) - argmax_x = input.new_zeros(output_shape) - else: - argmax_y = input.new_zeros(0) - argmax_x = input.new_zeros(0) - - ext_module.roi_align_forward( - input, - rois, - output, - argmax_y, - argmax_x, - aligned_height=ctx.output_size[0], - aligned_width=ctx.output_size[1], - spatial_scale=ctx.spatial_scale, - sampling_ratio=ctx.sampling_ratio, - pool_mode=ctx.pool_mode, - aligned=ctx.aligned) - - ctx.save_for_backward(rois, argmax_y, argmax_x) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - rois, argmax_y, argmax_x = ctx.saved_tensors - grad_input = grad_output.new_zeros(ctx.input_shape) - # complex head architecture may cause grad_output uncontiguous. - grad_output = grad_output.contiguous() - ext_module.roi_align_backward( - grad_output, - rois, - argmax_y, - argmax_x, - grad_input, - aligned_height=ctx.output_size[0], - aligned_width=ctx.output_size[1], - spatial_scale=ctx.spatial_scale, - sampling_ratio=ctx.sampling_ratio, - pool_mode=ctx.pool_mode, - aligned=ctx.aligned) - return grad_input, None, None, None, None, None, None - - -roi_align = RoIAlignFunction.apply - - -class RoIAlign(nn.Module): - """RoI align pooling layer. - - Args: - output_size (tuple): h, w - spatial_scale (float): scale the input boxes by this number - sampling_ratio (int): number of inputs samples to take for each - output sample. 0 to take samples densely for current models. - pool_mode (str, 'avg' or 'max'): pooling mode in each bin. - aligned (bool): if False, use the legacy implementation in - MMDetection. If True, align the results more perfectly. - use_torchvision (bool): whether to use roi_align from torchvision. - - Note: - The implementation of RoIAlign when aligned=True is modified from - https://github.com/facebookresearch/detectron2/ - - The meaning of aligned=True: - - Given a continuous coordinate c, its two neighboring pixel - indices (in our pixel model) are computed by floor(c - 0.5) and - ceil(c - 0.5). For example, c=1.3 has pixel neighbors with discrete - indices [0] and [1] (which are sampled from the underlying signal - at continuous coordinates 0.5 and 1.5). But the original roi_align - (aligned=False) does not subtract the 0.5 when computing - neighboring pixel indices and therefore it uses pixels with a - slightly incorrect alignment (relative to our pixel model) when - performing bilinear interpolation. - - With `aligned=True`, - we first appropriately scale the ROI and then shift it by -0.5 - prior to calling roi_align. This produces the correct neighbors; - - The difference does not make a difference to the model's - performance if ROIAlign is used together with conv layers. - """ - - @deprecated_api_warning( - { - 'out_size': 'output_size', - 'sample_num': 'sampling_ratio' - }, - cls_name='RoIAlign') - def __init__(self, - output_size, - spatial_scale=1.0, - sampling_ratio=0, - pool_mode='avg', - aligned=True, - use_torchvision=False): - super(RoIAlign, self).__init__() - - self.output_size = _pair(output_size) - self.spatial_scale = float(spatial_scale) - self.sampling_ratio = int(sampling_ratio) - self.pool_mode = pool_mode - self.aligned = aligned - self.use_torchvision = use_torchvision - - def forward(self, input, rois): - """ - Args: - input: NCHW images - rois: Bx5 boxes. First column is the index into N.\ - The other 4 columns are xyxy. - """ - if self.use_torchvision: - from torchvision.ops import roi_align as tv_roi_align - if 'aligned' in tv_roi_align.__code__.co_varnames: - return tv_roi_align(input, rois, self.output_size, - self.spatial_scale, self.sampling_ratio, - self.aligned) - else: - if self.aligned: - rois -= rois.new_tensor([0.] + - [0.5 / self.spatial_scale] * 4) - return tv_roi_align(input, rois, self.output_size, - self.spatial_scale, self.sampling_ratio) - else: - return roi_align(input, rois, self.output_size, self.spatial_scale, - self.sampling_ratio, self.pool_mode, self.aligned) - - def __repr__(self): - s = self.__class__.__name__ - s += f'(output_size={self.output_size}, ' - s += f'spatial_scale={self.spatial_scale}, ' - s += f'sampling_ratio={self.sampling_ratio}, ' - s += f'pool_mode={self.pool_mode}, ' - s += f'aligned={self.aligned}, ' - s += f'use_torchvision={self.use_torchvision})' - return s diff --git a/spaces/whitphx/gradio-static-test/dist/assets/Download-35908774.js b/spaces/whitphx/gradio-static-test/dist/assets/Download-35908774.js deleted file mode 100644 index 0bc4500cea88ca9a1e38e469fd0d89f95f7b30ec..0000000000000000000000000000000000000000 --- a/spaces/whitphx/gradio-static-test/dist/assets/Download-35908774.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as i,i as h,s as p,C as o,D as e,h as v,F as c,G as n,r as m}from"../lite.js";function d(l){let t,s;return{c(){t=o("svg"),s=o("path"),e(s,"fill","currentColor"),e(s,"d","M26 24v4H6v-4H4v4a2 2 0 0 0 2 2h20a2 2 0 0 0 2-2v-4zm0-10l-1.41-1.41L17 20.17V2h-2v18.17l-7.59-7.58L6 14l10 10l10-10z"),e(t,"xmlns","http://www.w3.org/2000/svg"),e(t,"width","100%"),e(t,"height","100%"),e(t,"viewBox","0 0 32 32")},m(a,r){v(a,t,r),c(t,s)},p:n,i:n,o:n,d(a){a&&m(t)}}}class u extends i{constructor(t){super(),h(this,t,null,d,p,{})}}export{u as D}; -//# sourceMappingURL=Download-35908774.js.map diff --git a/spaces/whitphx/gradio-static-test/dist/assets/index-0bcc3a66.js b/spaces/whitphx/gradio-static-test/dist/assets/index-0bcc3a66.js deleted file mode 100644 index 98dde350ce905d1be868d6a969b0117c4ac26daa..0000000000000000000000000000000000000000 --- a/spaces/whitphx/gradio-static-test/dist/assets/index-0bcc3a66.js +++ /dev/null @@ -1,3 +0,0 @@ -import{S as R,i as V,s as W,C as S,D as u,h as j,F as w,G as D,r as E,H as I,I as X,J,E as F,ac as ye,al as we,L as P,f as G,O as Y,u as de,v as Le,am as Z,a7 as je,a2 as Ee,e as N,m as H,q as z,t as B,o as O,y as Ae,a0 as Ce,j as Me,k as Te,n as pe,p as ve}from"../lite.js";import{U as ze}from"./Upload-a154f660.js";import{M as Be}from"./ModifyUpload-ee7ccefb.js";import{B as Se}from"./Button-0391b19a.js";import{B as Ue}from"./BlockLabel-a3ec523d.js";import{E as Fe}from"./Empty-91947ea3.js";import{g as Ne}from"./color-892826ae.js";import{a as He}from"./csv-b0b7514a.js";import{Z as K,_ as Q,l as $}from"./linear-58a44b5e.js";import{U as Oe}from"./UploadText-ca9fa5cb.js";import"./Blocks-99723874.js";/* empty css */import"./ModifyUpload.svelte_svelte_type_style_lang-ba6baa96.js";import"./dsv-576afacd.js";function qe(l){let e,n,t;return{c(){e=S("svg"),n=S("path"),t=S("path"),u(n,"d","M28.828 3.172a4.094 4.094 0 0 0-5.656 0L4.05 22.292A6.954 6.954 0 0 0 2 27.242V30h2.756a6.952 6.952 0 0 0 4.95-2.05L28.828 8.829a3.999 3.999 0 0 0 0-5.657zM10.91 18.26l2.829 2.829l-2.122 2.121l-2.828-2.828zm-2.619 8.276A4.966 4.966 0 0 1 4.756 28H4v-.759a4.967 4.967 0 0 1 1.464-3.535l1.91-1.91l2.829 2.828zM27.415 7.414l-12.261 12.26l-2.829-2.828l12.262-12.26a2.047 2.047 0 0 1 2.828 0a2 2 0 0 1 0 2.828z"),u(n,"fill","currentColor"),u(t,"d","M6.5 15a3.5 3.5 0 0 1-2.475-5.974l3.5-3.5a1.502 1.502 0 0 0 0-2.121a1.537 1.537 0 0 0-2.121 0L3.415 5.394L2 3.98l1.99-1.988a3.585 3.585 0 0 1 4.95 0a3.504 3.504 0 0 1 0 4.949L5.439 10.44a1.502 1.502 0 0 0 0 2.121a1.537 1.537 0 0 0 2.122 0l4.024-4.024L13 9.95l-4.025 4.024A3.475 3.475 0 0 1 6.5 15z"),u(t,"fill","currentColor"),u(e,"width","1em"),u(e,"height","1em"),u(e,"viewBox","0 0 32 32")},m(o,a){j(o,e,a),w(e,n),w(e,t)},p:D,i:D,o:D,d(o){o&&E(e)}}}let be=class extends R{constructor(e){super(),V(this,e,null,qe,W,{})}};function x(l){let e;return Array.isArray(l)?e=l.reduce((n,{values:t})=>[...n,...t.map(({y:o})=>o)],[]):e=l.values,[Math.min(...e),Math.max(...e)]}function ee(l,e,n){const t=Object.entries(l[0]).reduce((o,a,s)=>(!e&&s===0||e&&a[0]===e?o.x.name=a[0]:(!n||n&&n.includes(a[0]))&&o.y.push({name:a[0],values:[]}),o),{x:{name:"",values:[]},y:[]});for(let o=0;ol[6].call(e))},m(s,_){j(s,e,_),w(e,n),w(e,t),w(e,o),a=we(e,l[6].bind(e))},p(s,[_]){_&8&&F(n,"background",s[3]),_&1&&P(o,s[0]),_&36&&F(e,"top",s[2]-s[5]/2+"px"),_&18&&F(e,"left",s[1]-s[4]-7+"px")},i:D,o:D,d(s){s&&E(e),a()}}}function Ie(l,e,n){let{text:t}=e,{x:o}=e,{y:a}=e,{color:s}=e,_,r;function p(){_=this.offsetWidth,r=this.offsetHeight,n(4,_),n(5,r)}return l.$$set=d=>{"text"in d&&n(0,t=d.text),"x"in d&&n(1,o=d.x),"y"in d&&n(2,a=d.y),"color"in d&&n(3,s=d.color)},[t,o,a,s,_,r,p]}class Xe extends R{constructor(e){super(),V(this,e,Ie,De,W,{text:0,x:1,y:2,color:3})}}function Ye(l,{color:e,text:n}){let t;function o(r){return t=new Xe({props:{text:n,x:r.pageX,y:r.pageY,color:e},target:document.body}),r}function a(r){t.$set({x:r.pageX,y:r.pageY})}function s(){t.$destroy()}const _=l;return _.addEventListener("mouseover",o),_.addEventListener("mouseleave",s),_.addEventListener("mousemove",a),{destroy(){_.removeEventListener("mouseover",o),_.removeEventListener("mouseleave",s),_.removeEventListener("mousemove",a)}}}function le(l,e,n){const t=l.slice();t[16]=e[n].name,t[17]=e[n].values;const o=t[8][t[16]];return t[18]=o,t}function te(l,e,n){const t=l.slice();return t[0]=e[n].x,t[1]=e[n].y,t}function ne(l,e,n){const t=l.slice();t[16]=e[n].name,t[17]=e[n].values;const o=t[8][t[16]];return t[18]=o,t}function oe(l,e,n){const t=l.slice();return t[0]=e[n].x,t[1]=e[n].y,t}function se(l,e,n){const t=l.slice();return t[27]=e[n],t}function ae(l,e,n){const t=l.slice();return t[27]=e[n],t}function re(l,e,n){const t=l.slice();return t[16]=e[n].name,t}function ie(l){let e,n,t,o=l[16]+"",a,s;return{c(){e=I("div"),n=I("span"),t=X(),a=J(o),s=X(),u(n,"class","legend-box svelte-1mjxput"),F(n,"background-color",l[8][l[16]]),u(e,"class","legend-item svelte-1mjxput")},m(_,r){j(_,e,r),w(e,n),w(e,t),w(e,a),w(e,s)},p(_,r){r[0]&260&&F(n,"background-color",_[8][_[16]]),r[0]&4&&o!==(o=_[16]+"")&&P(a,o)},d(_){_&&E(e)}}}function fe(l){let e,n,t,o,a,s,_=l[27]+"",r,p,d;return{c(){e=S("line"),s=S("text"),r=J(_),u(e,"stroke-width","0.5"),u(e,"x1",n=l[5](l[27])),u(e,"x2",t=l[5](l[27])),u(e,"y1",o=l[4](l[9][0]l[9][l[9].length-1]?l[6][1]:l[9][l[9].length-1])),u(e,"stroke","#aaa"),u(s,"class","label-text svelte-1mjxput"),u(s,"text-anchor","middle"),u(s,"x",p=l[5](l[27])),u(s,"y",d=l[4](l[9][0])+30)},m(i,h){j(i,e,h),j(i,s,h),w(s,r)},p(i,h){h[0]&1056&&n!==(n=i[5](i[27]))&&u(e,"x1",n),h[0]&1056&&t!==(t=i[5](i[27]))&&u(e,"x2",t),h[0]&592&&o!==(o=i[4](i[9][0]i[9][i[9].length-1]?i[6][1]:i[9][i[9].length-1]))&&u(e,"y2",a),h[0]&1024&&_!==(_=i[27]+"")&&P(r,_),h[0]&1056&&p!==(p=i[5](i[27]))&&u(s,"x",p),h[0]&528&&d!==(d=i[4](i[9][0])+30)&&u(s,"y",d)},d(i){i&&E(e),i&&E(s)}}}function _e(l){let e,n,t,o,a,s,_=l[27]+"",r,p,d;return{c(){e=S("line"),s=S("text"),r=J(_),u(e,"stroke-width","0.5"),u(e,"y1",n=l[4](l[27])),u(e,"y2",t=l[4](l[27])),u(e,"x1",o=l[5](l[10][0]l[10][l[10].length-1]?l[7][1]:l[10][l[10].length-1])),u(e,"stroke","#aaa"),u(s,"class","label-text svelte-1mjxput"),u(s,"text-anchor","end"),u(s,"y",p=l[4](l[27])+4),u(s,"x",d=l[5](l[10][0])-20)},m(i,h){j(i,e,h),j(i,s,h),w(s,r)},p(i,h){h[0]&528&&n!==(n=i[4](i[27]))&&u(e,"y1",n),h[0]&528&&t!==(t=i[4](i[27]))&&u(e,"y2",t),h[0]&1184&&o!==(o=i[5](i[10][0]i[10][i[10].length-1]?i[7][1]:i[10][i[10].length-1]))&&u(e,"x2",a),h[0]&512&&_!==(_=i[27]+"")&&P(r,_),h[0]&528&&p!==(p=i[4](i[27])+4)&&u(s,"y",p),h[0]&1056&&d!==(d=i[5](i[10][0])-20)&&u(s,"x",d)},d(i){i&&E(e),i&&E(s)}}}function ue(l){let e,n,t,o,a,s,_=l[6][1]+"",r,p,d;return{c(){e=S("line"),s=S("text"),r=J(_),u(e,"stroke-width","0.5"),u(e,"y1",n=l[4](l[6][1])),u(e,"y2",t=l[4](l[6][1])),u(e,"x1",o=l[5](l[10][0])),u(e,"x2",a=l[5](l[7][1])),u(e,"stroke","#aaa"),u(s,"class","label-text svelte-1mjxput"),u(s,"text-anchor","end"),u(s,"y",p=l[4](l[6][1])+4),u(s,"x",d=l[5](l[10][0])-20)},m(i,h){j(i,e,h),j(i,s,h),w(s,r)},p(i,h){h[0]&80&&n!==(n=i[4](i[6][1]))&&u(e,"y1",n),h[0]&80&&t!==(t=i[4](i[6][1]))&&u(e,"y2",t),h[0]&1056&&o!==(o=i[5](i[10][0]))&&u(e,"x1",o),h[0]&160&&a!==(a=i[5](i[7][1]))&&u(e,"x2",a),h[0]&64&&_!==(_=i[6][1]+"")&&P(r,_),h[0]&80&&p!==(p=i[4](i[6][1])+4)&&u(s,"y",p),h[0]&1056&&d!==(d=i[5](i[10][0])-20)&&u(s,"x",d)},d(i){i&&E(e),i&&E(s)}}}function ce(l){let e,n,t,o;return{c(){e=S("circle"),u(e,"r","3.5"),u(e,"cx",n=l[5](l[0])),u(e,"cy",t=l[4](l[1])),u(e,"stroke-width","1.5"),u(e,"stroke",o=l[18]),u(e,"fill","none")},m(a,s){j(a,e,s)},p(a,s){s[0]&36&&n!==(n=a[5](a[0]))&&u(e,"cx",n),s[0]&20&&t!==(t=a[4](a[1]))&&u(e,"cy",t),s[0]&260&&o!==(o=a[18])&&u(e,"stroke",o)},d(a){a&&E(e)}}}function me(l){let e,n,t,o=l[17],a=[];for(let s=0;sl[9][l[9].length-1]&&ue(l),C=l[2],L=[];for(let c=0;cc[9][c[9].length-1]?b?b.p(c,M):(b=ue(c),b.c(),b.m(a,null)):b&&(b.d(1),b=null),M[0]&308){C=c[2];let f;for(f=0;f{k("process",{x:t,y:o})});const y=({x:b,y:C})=>[_(b),r(C)];return l.$$set=b=>{"value"in b&&n(11,i=b.value),"x"in b&&n(0,h=b.x),"y"in b&&n(1,A=b.y),"colors"in b&&n(12,m=b.colors)},l.$$.update=()=>{l.$$.dirty[0]&2051&&n(3,{x:t,y:o}=ee(typeof i=="string"?He(i):i,h,A),t,(n(2,o),n(11,i),n(0,h),n(1,A))),l.$$.dirty[0]&8&&n(7,a=x(t)),l.$$.dirty[0]&4&&n(6,s=x(o)),l.$$.dirty[0]&128&&n(5,_=K(a,[0,600]).nice()),l.$$.dirty[0]&64&&n(4,r=K(s,[350,0]).nice()),l.$$.dirty[0]&32&&n(10,p=_.ticks(8)),l.$$.dirty[0]&16&&n(9,d=r.ticks(8)),l.$$.dirty[0]&4&&n(8,v=o.reduce((b,C,L)=>({...b,[C.name]:U(L)}),{}))},[h,A,o,t,r,_,s,a,v,d,p,i,m,y]}class ke extends R{constructor(e){super(),V(this,e,Je,Ge,W,{value:11,x:0,y:1,colors:12},null,[-1,-1])}}function Pe(l){let e,n;return e=new ze({props:{filetype:"text/csv",include_file_metadata:!1,$$slots:{default:[We]},$$scope:{ctx:l}}}),e.$on("load",l[16]),{c(){N(e.$$.fragment)},m(t,o){H(e,t,o),n=!0},p(t,o){const a={};o&1048576&&(a.$$scope={dirty:o,ctx:t}),e.$set(a)},i(t){n||(z(e.$$.fragment,t),n=!0)},o(t){B(e.$$.fragment,t),n=!1},d(t){O(e,t)}}}function Re(l){let e,n,t,o,a;return n=new Be({}),n.$on("clear",l[14]),o=new ke({props:{value:l[11],y:l[4],x:l[5],colors:l[9]}}),o.$on("process",l[15]),{c(){e=I("div"),N(n.$$.fragment),t=X(),N(o.$$.fragment),u(e,"class","chart svelte-etmurc")},m(s,_){j(s,e,_),H(n,e,null),w(e,t),H(o,e,null),a=!0},p(s,_){const r={};_&2048&&(r.value=s[11]),_&16&&(r.y=s[4]),_&32&&(r.x=s[5]),_&512&&(r.colors=s[9]),o.$set(r)},i(s){a||(z(n.$$.fragment,s),z(o.$$.fragment,s),a=!0)},o(s){B(n.$$.fragment,s),B(o.$$.fragment,s),a=!1},d(s){s&&E(e),O(n),O(o)}}}function Ve(l){let e,n,t,o;const a=[Ke,Ze],s=[];function _(r,p){return r[12]?0:1}return e=_(l),n=s[e]=a[e](l),{c(){n.c(),t=G()},m(r,p){s[e].m(r,p),j(r,t,p),o=!0},p(r,p){let d=e;e=_(r),e===d?s[e].p(r,p):(pe(),B(s[d],1,1,()=>{s[d]=null}),ve(),n=s[e],n?n.p(r,p):(n=s[e]=a[e](r),n.c()),z(n,1),n.m(t.parentNode,t))},i(r){o||(z(n),o=!0)},o(r){B(n),o=!1},d(r){s[e].d(r),r&&E(t)}}}function We(l){let e,n;return e=new Oe({props:{type:"csv"}}),{c(){N(e.$$.fragment)},m(t,o){H(e,t,o),n=!0},p:D,i(t){n||(z(e.$$.fragment,t),n=!0)},o(t){B(e.$$.fragment,t),n=!1},d(t){O(e,t)}}}function Ze(l){let e,n;return e=new Fe({props:{size:"large",unpadded_box:!0,$$slots:{default:[Qe]},$$scope:{ctx:l}}}),{c(){N(e.$$.fragment)},m(t,o){H(e,t,o),n=!0},p(t,o){const a={};o&1048576&&(a.$$scope={dirty:o,ctx:t}),e.$set(a)},i(t){n||(z(e.$$.fragment,t),n=!0)},o(t){B(e.$$.fragment,t),n=!1},d(t){O(e,t)}}}function Ke(l){let e,n;return e=new ke({props:{value:l[12],colors:l[9]}}),{c(){N(e.$$.fragment)},m(t,o){H(e,t,o),n=!0},p(t,o){const a={};o&4096&&(a.value=t[12]),o&512&&(a.colors=t[9]),e.$set(a)},i(t){n||(z(e.$$.fragment,t),n=!0)},o(t){B(e.$$.fragment,t),n=!1},d(t){O(e,t)}}}function Qe(l){let e,n;return e=new be({}),{c(){N(e.$$.fragment)},m(t,o){H(e,t,o),n=!0},i(t){n||(z(e.$$.fragment,t),n=!0)},o(t){B(e.$$.fragment,t),n=!1},d(t){O(e,t)}}}function $e(l){let e,n,t,o,a,s,_,r;e=new Ue({props:{show_label:l[8],Icon:be,label:l[7]||"TimeSeries"}});const p=[l[10]];let d={};for(let m=0;m{h[y]=null}),ve()),~a?(s=h[a],s?s.p(m,k):(s=h[a]=i[a](m),s.c()),z(s,1),s.m(_.parentNode,_)):s=null)},i(m){r||(z(e.$$.fragment,m),z(t.$$.fragment,m),z(s),r=!0)},o(m){B(e.$$.fragment,m),B(t.$$.fragment,m),B(s),r=!1},d(m){O(e,m),m&&E(n),O(t,m),m&&E(o),~a&&h[a].d(m),m&&E(_)}}}function xe(l){let e,n;return e=new Se({props:{visible:l[3],variant:l[6]==="dynamic"&&!l[11]?"dashed":"solid",padding:!1,elem_id:l[1],elem_classes:l[2],$$slots:{default:[$e]},$$scope:{ctx:l}}}),{c(){N(e.$$.fragment)},m(t,o){H(e,t,o),n=!0},p(t,[o]){const a={};o&8&&(a.visible=t[3]),o&2112&&(a.variant=t[6]==="dynamic"&&!t[11]?"dashed":"solid"),o&2&&(a.elem_id=t[1]),o&4&&(a.elem_classes=t[2]),o&1056753&&(a.$$scope={dirty:o,ctx:t}),e.$set(a)},i(t){n||(z(e.$$.fragment,t),n=!0)},o(t){B(e.$$.fragment,t),n=!1},d(t){O(e,t)}}}function el(l){return l.data.map(e=>e.reduce((n,t,o)=>({...n,[l.headers[o]]:t}),{}))}function ll(l){const e=atob(l.split(",")[1]),n=l.split(",")[0].split(":")[1].split(";")[0],t=new ArrayBuffer(e.length),o=new Uint8Array(t);for(let a=0;an.push(o));for(let o=0;oa.push(s[o].y)),t.push(a)}return{headers:n,data:t}}function nl(l,e,n){let t;const o=de();let{elem_id:a=""}=e,{elem_classes:s=[]}=e,{visible:_=!0}=e,{value:r}=e,{y:p}=e,{x:d}=e,{mode:i}=e,{label:h}=e,{show_label:A}=e,{colors:m}=e,{loading_status:k}=e,v;function U(g){const c=new FileReader;c.addEventListener("loadend",M=>{n(11,v=M.srcElement.result)}),c.readAsText(g)}function y(g){g.headers&&n(11,v=g.headers.join(",")),g.data.forEach(M=>{n(11,v=v+` -`),n(11,v=v+M.join(","))})}function b(g){return n(0,r={data:g}),g}function C({detail:g}){n(0,r=null),o("change"),o("clear")}const L=({detail:{x:g,y:c}})=>n(0,r=tl(g,c)),q=({detail:g})=>b(g);return l.$$set=g=>{"elem_id"in g&&n(1,a=g.elem_id),"elem_classes"in g&&n(2,s=g.elem_classes),"visible"in g&&n(3,_=g.visible),"value"in g&&n(0,r=g.value),"y"in g&&n(4,p=g.y),"x"in g&&n(5,d=g.x),"mode"in g&&n(6,i=g.mode),"label"in g&&n(7,h=g.label),"show_label"in g&&n(8,A=g.show_label),"colors"in g&&n(9,m=g.colors),"loading_status"in g&&n(10,k=g.loading_status)},l.$$.update=()=>{l.$$.dirty&1&&(r&&r.data&&typeof r.data=="string"?r?U(ll(r.data)):n(11,v=null):r&&r.data&&typeof r.data!="string"&&(r||n(11,v=null),y(r))),l.$$.dirty&2049&&n(11,v=r==null?null:v),l.$$.dirty&65&&n(12,t=i==="static"&&r&&el(r)),l.$$.dirty&1&&o("change")},[r,a,s,_,p,d,i,h,A,m,k,v,t,b,C,L,q]}class ol extends R{constructor(e){super(),V(this,e,nl,xe,W,{elem_id:1,elem_classes:2,visible:3,value:0,y:4,x:5,mode:6,label:7,show_label:8,colors:9,loading_status:10})}}const kl=ol,yl=["static","dynamic"],wl=l=>({type:{payload:"{data: Array> | string; headers?: Array;}"},description:{payload:"dataset of series"}});export{kl as Component,wl as document,yl as modes}; -//# sourceMappingURL=index-0bcc3a66.js.map diff --git a/spaces/williambr/CSVAnalyzer/download.py b/spaces/williambr/CSVAnalyzer/download.py deleted file mode 100644 index a9aa79830aa22d28dedf09d5994d6bb4494faa19..0000000000000000000000000000000000000000 --- a/spaces/williambr/CSVAnalyzer/download.py +++ /dev/null @@ -1,139 +0,0 @@ -import streamlit as st -import pickle -import pandas as pd -import json -import base64 -import uuid -import re - -import importlib.util - - -def import_from_file(module_name: str, filepath: str): - """ - Imports a module from file. - Args: - module_name (str): Assigned to the module's __name__ parameter (does not - influence how the module is named outside of this function) - filepath (str): Path to the .py file - Returns: - The module - """ - spec = importlib.util.spec_from_file_location(module_name, filepath) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - return module - - -def notebook_header(text): - """ - Insert section header into a jinja file, formatted as notebook cell. - Leave 2 blank lines before the header. - """ - return f"""# # {text} -""" - - -def code_header(text): - """ - Insert section header into a jinja file, formatted as Python comment. - Leave 2 blank lines before the header. - """ - seperator_len = (75 - len(text)) / 2 - seperator_len_left = math.floor(seperator_len) - seperator_len_right = math.ceil(seperator_len) - return f"# {'-' * seperator_len_left} {text} {'-' * seperator_len_right}" - - -def to_notebook(code): - """Converts Python code to Jupyter notebook format.""" - notebook = jupytext.reads(code, fmt="py") - return jupytext.writes(notebook, fmt="ipynb") - - -def open_link(url, new_tab=True): - """Dirty hack to open a new web page with a streamlit button.""" - # From: https://discuss.streamlit.io/t/how-to-link-a-button-to-a-webpage/1661/3 - if new_tab: - js = f"window.open('{url}')" # New tab or window - else: - js = f"window.location.href = '{url}'" # Current tab - html = ''.format(js) - div = Div(text=html) - st.bokeh_chart(div) - - -def download_button(object_to_download, download_filename, button_text): - """ - Generates a link to download the given object_to_download. - From: https://discuss.streamlit.io/t/a-download-button-with-custom-css/4220 - Params: - ------ - object_to_download: The object to be downloaded. - download_filename (str): filename and extension of file. e.g. mydata.csv, - some_txt_output.txt download_link_text (str): Text to display for download - link. - button_text (str): Text to display on download button (e.g. 'click here to download file') - pickle_it (bool): If True, pickle file. - Returns: - ------- - (str): the anchor tag to download object_to_download - Examples: - -------- - download_link(your_df, 'YOUR_DF.csv', 'Click to download data!') - download_link(your_str, 'YOUR_STRING.txt', 'Click to download text!') - """ - - # if: - if isinstance(object_to_download, bytes): - pass - - elif isinstance(object_to_download, pd.DataFrame): - object_to_download = object_to_download.to_csv(index=False) - # Try JSON encode for everything else - else: - object_to_download = json.dumps(object_to_download) - - try: - # some strings <-> bytes conversions necessary here - b64 = base64.b64encode(object_to_download.encode()).decode() - except AttributeError as e: - b64 = base64.b64encode(object_to_download).decode() - - button_uuid = str(uuid.uuid4()).replace("-", "") - button_id = re.sub("\d+", "", button_uuid) - - custom_css = f""" - """ - - dl_link = ( - custom_css - + f'{button_text}

              ' - ) - - st.markdown(dl_link, unsafe_allow_html=True) diff --git a/spaces/xfys/yolov5_tracking/val_utils/trackeval/metrics/ideucl.py b/spaces/xfys/yolov5_tracking/val_utils/trackeval/metrics/ideucl.py deleted file mode 100644 index db9b57b61f603f0266c92c5c0cf6f6e2e160938a..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/val_utils/trackeval/metrics/ideucl.py +++ /dev/null @@ -1,135 +0,0 @@ -import numpy as np -from scipy.optimize import linear_sum_assignment -from ._base_metric import _BaseMetric -from .. import _timing -from collections import defaultdict -from .. import utils - - -class IDEucl(_BaseMetric): - """Class which implements the ID metrics""" - - @staticmethod - def get_default_config(): - """Default class config values""" - default_config = { - 'THRESHOLD': 0.4, # Similarity score threshold required for a IDTP match. 0.4 for IDEucl. - 'PRINT_CONFIG': True, # Whether to print the config information on init. Default: False. - } - return default_config - - def __init__(self, config=None): - super().__init__() - self.fields = ['IDEucl'] - self.float_fields = self.fields - self.summary_fields = self.fields - - # Configuration options: - self.config = utils.init_config(config, self.get_default_config(), self.get_name()) - self.threshold = float(self.config['THRESHOLD']) - - - @_timing.time - def eval_sequence(self, data): - """Calculates IDEucl metrics for all frames""" - # Initialise results - res = {'IDEucl' : 0} - - # Return result quickly if tracker or gt sequence is empty - if data['num_tracker_dets'] == 0 or data['num_gt_dets'] == 0.: - return res - - data['centroid'] = [] - for t, gt_det in enumerate(data['gt_dets']): - # import pdb;pdb.set_trace() - data['centroid'].append(self._compute_centroid(gt_det)) - - oid_hid_cent = defaultdict(list) - oid_cent = defaultdict(list) - for t, (gt_ids_t, tracker_ids_t) in enumerate(zip(data['gt_ids'], data['tracker_ids'])): - matches_mask = np.greater_equal(data['similarity_scores'][t], self.threshold) - - # I hope the orders of ids and boxes are maintained in `data` - for ind, gid in enumerate(gt_ids_t): - oid_cent[gid].append(data['centroid'][t][ind]) - - match_idx_gt, match_idx_tracker = np.nonzero(matches_mask) - for m_gid, m_tid in zip(match_idx_gt, match_idx_tracker): - oid_hid_cent[gt_ids_t[m_gid], tracker_ids_t[m_tid]].append(data['centroid'][t][m_gid]) - - oid_hid_dist = {k : np.sum(np.linalg.norm(np.diff(np.array(v), axis=0), axis=1)) for k, v in oid_hid_cent.items()} - oid_dist = {int(k) : np.sum(np.linalg.norm(np.diff(np.array(v), axis=0), axis=1)) for k, v in oid_cent.items()} - - unique_oid = np.unique([i[0] for i in oid_hid_dist.keys()]).tolist() - unique_hid = np.unique([i[1] for i in oid_hid_dist.keys()]).tolist() - o_len = len(unique_oid) - h_len = len(unique_hid) - dist_matrix = np.zeros((o_len, h_len)) - for ((oid, hid), dist) in oid_hid_dist.items(): - oid_ind = unique_oid.index(oid) - hid_ind = unique_hid.index(hid) - dist_matrix[oid_ind, hid_ind] = dist - - # opt_hyp_dist contains GT ID : max dist covered by track - opt_hyp_dist = dict.fromkeys(oid_dist.keys(), 0.) - cost_matrix = np.max(dist_matrix) - dist_matrix - rows, cols = linear_sum_assignment(cost_matrix) - for (row, col) in zip(rows, cols): - value = dist_matrix[row, col] - opt_hyp_dist[int(unique_oid[row])] = value - - assert len(opt_hyp_dist.keys()) == len(oid_dist.keys()) - hyp_length = np.sum(list(opt_hyp_dist.values())) - gt_length = np.sum(list(oid_dist.values())) - id_eucl =np.mean([np.divide(a, b, out=np.zeros_like(a), where=b!=0) for a, b in zip(opt_hyp_dist.values(), oid_dist.values())]) - res['IDEucl'] = np.divide(hyp_length, gt_length, out=np.zeros_like(hyp_length), where=gt_length!=0) - return res - - def combine_classes_class_averaged(self, all_res, ignore_empty_classes=False): - """Combines metrics across all classes by averaging over the class values. - If 'ignore_empty_classes' is True, then it only sums over classes with at least one gt or predicted detection. - """ - res = {} - - for field in self.float_fields: - if ignore_empty_classes: - res[field] = np.mean([v[field] for v in all_res.values() - if v['IDEucl'] > 0 + np.finfo('float').eps], axis=0) - else: - res[field] = np.mean([v[field] for v in all_res.values()], axis=0) - return res - - def combine_classes_det_averaged(self, all_res): - """Combines metrics across all classes by averaging over the detection values""" - res = {} - for field in self.float_fields: - res[field] = self._combine_sum(all_res, field) - res = self._compute_final_fields(res, len(all_res)) - return res - - def combine_sequences(self, all_res): - """Combines metrics across all sequences""" - res = {} - for field in self.float_fields: - res[field] = self._combine_sum(all_res, field) - res = self._compute_final_fields(res, len(all_res)) - return res - - - @staticmethod - def _compute_centroid(box): - box = np.array(box) - if len(box.shape) == 1: - centroid = (box[0:2] + box[2:4])/2 - else: - centroid = (box[:, 0:2] + box[:, 2:4])/2 - return np.flip(centroid, axis=1) - - - @staticmethod - def _compute_final_fields(res, res_len): - """ - Exists only to match signature with the original Identiy class. - - """ - return {k:v/res_len for k,v in res.items()} diff --git a/spaces/xiayi/anime-remove-background/README.md b/spaces/xiayi/anime-remove-background/README.md deleted file mode 100644 index 1ba3cb5ea0e994e246d57b7d62b8aa5a6331901c..0000000000000000000000000000000000000000 --- a/spaces/xiayi/anime-remove-background/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Anime Remove Background -emoji: 🪄🖼️ -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: skytnt/anime-remove-background ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/yangogo/bingo/src/components/ui/sheet.tsx b/spaces/yangogo/bingo/src/components/ui/sheet.tsx deleted file mode 100644 index c9f5ce0f81a91067bb013e988a07eb1e6bf6953b..0000000000000000000000000000000000000000 --- a/spaces/yangogo/bingo/src/components/ui/sheet.tsx +++ /dev/null @@ -1,122 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SheetPrimitive from '@radix-ui/react-dialog' - -import { cn } from '@/lib/utils' -import { IconClose } from '@/components/ui/icons' - -const Sheet = SheetPrimitive.Root - -const SheetTrigger = SheetPrimitive.Trigger - -const SheetClose = SheetPrimitive.Close - -const SheetPortal = ({ - className, - children, - ...props -}: SheetPrimitive.DialogPortalProps) => ( - - {children} - -) -SheetPortal.displayName = SheetPrimitive.Portal.displayName - -const SheetOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - -)) -SheetOverlay.displayName = SheetPrimitive.Overlay.displayName - -const SheetContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - {children} - - - Close - - - -)) -SheetContent.displayName = SheetPrimitive.Content.displayName - -const SheetHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
              -) -SheetHeader.displayName = 'SheetHeader' - -const SheetFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
              -) -SheetFooter.displayName = 'SheetFooter' - -const SheetTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SheetTitle.displayName = SheetPrimitive.Title.displayName - -const SheetDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SheetDescription.displayName = SheetPrimitive.Description.displayName - -export { - Sheet, - SheetTrigger, - SheetClose, - SheetContent, - SheetHeader, - SheetFooter, - SheetTitle, - SheetDescription -} diff --git a/spaces/yangogo/bingo/src/pages/api/sydney.ts b/spaces/yangogo/bingo/src/pages/api/sydney.ts deleted file mode 100644 index 0e7bbf23d77c2e1a6635185a060eeee58b8c8e66..0000000000000000000000000000000000000000 --- a/spaces/yangogo/bingo/src/pages/api/sydney.ts +++ /dev/null @@ -1,62 +0,0 @@ -import { NextApiRequest, NextApiResponse } from 'next' -import { WebSocket, debug } from '@/lib/isomorphic' -import { BingWebBot } from '@/lib/bots/bing' -import { websocketUtils } from '@/lib/bots/bing/utils' -import { WatchDog, createHeaders } from '@/lib/utils' - - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - const conversationContext = req.body - const headers = createHeaders(req.cookies) - debug(headers) - res.setHeader('Content-Type', 'text/stream; charset=UTF-8') - - const ws = new WebSocket('wss://sydney.bing.com/sydney/ChatHub', { - headers: { - ...headers, - 'accept-language': 'zh-CN,zh;q=0.9', - 'cache-control': 'no-cache', - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - pragma: 'no-cache', - } - }) - - const closeDog = new WatchDog() - const timeoutDog = new WatchDog() - ws.onmessage = (event) => { - timeoutDog.watch(() => { - ws.send(websocketUtils.packMessage({ type: 6 })) - }, 1500) - closeDog.watch(() => { - ws.close() - }, 10000) - res.write(event.data) - if (/\{"type":([367])\}/.test(String(event.data))) { - const type = parseInt(RegExp.$1, 10) - debug('connection type', type) - if (type === 3) { - ws.close() - } else { - ws.send(websocketUtils.packMessage({ type })) - } - } - } - - ws.onclose = () => { - timeoutDog.reset() - closeDog.reset() - debug('connection close') - res.end() - } - - await new Promise((resolve) => ws.onopen = resolve) - ws.send(websocketUtils.packMessage({ protocol: 'json', version: 1 })) - ws.send(websocketUtils.packMessage({ type: 6 })) - ws.send(websocketUtils.packMessage(BingWebBot.buildChatRequest(conversationContext!))) - req.socket.once('close', () => { - ws.close() - if (!res.closed) { - res.end() - } - }) -} diff --git a/spaces/ybelkada/interfacegan_pp/models/pggan_generator_model.py b/spaces/ybelkada/interfacegan_pp/models/pggan_generator_model.py deleted file mode 100644 index dcb97b503bed3bb4c485668a383398708ad2aae4..0000000000000000000000000000000000000000 --- a/spaces/ybelkada/interfacegan_pp/models/pggan_generator_model.py +++ /dev/null @@ -1,322 +0,0 @@ -# python3.7 -"""Contains the implementation of generator described in ProgressiveGAN. - -Different from the official tensorflow model in folder `pggan_tf_official`, this -is a simple pytorch version which only contains the generator part. This class -is specially used for inference. - -For more details, please check the original paper: -https://arxiv.org/pdf/1710.10196.pdf -""" - -import numpy as np - -import torch -import torch.nn as nn -import torch.nn.functional as F - -__all__ = ['PGGANGeneratorModel'] - -# Defines a dictionary, which maps the target resolution of the final generated -# image to numbers of filters used in each convolutional layer in sequence. -_RESOLUTIONS_TO_CHANNELS = { - 8: [512, 512, 512], - 16: [512, 512, 512, 512], - 32: [512, 512, 512, 512, 512], - 64: [512, 512, 512, 512, 512, 256], - 128: [512, 512, 512, 512, 512, 256, 128], - 256: [512, 512, 512, 512, 512, 256, 128, 64], - 512: [512, 512, 512, 512, 512, 256, 128, 64, 32], - 1024: [512, 512, 512, 512, 512, 256, 128, 64, 32, 16], -} - -# Variable mapping from pytorch model to official tensorflow model. -_PGGAN_PTH_VARS_TO_TF_VARS = { - 'lod': 'lod', # [] - 'layer0.conv.weight': '4x4/Dense/weight', # [512, 512, 4, 4] - 'layer0.wscale.bias': '4x4/Dense/bias', # [512] - 'layer1.conv.weight': '4x4/Conv/weight', # [512, 512, 3, 3] - 'layer1.wscale.bias': '4x4/Conv/bias', # [512] - 'layer2.conv.weight': '8x8/Conv0/weight', # [512, 512, 3, 3] - 'layer2.wscale.bias': '8x8/Conv0/bias', # [512] - 'layer3.conv.weight': '8x8/Conv1/weight', # [512, 512, 3, 3] - 'layer3.wscale.bias': '8x8/Conv1/bias', # [512] - 'layer4.conv.weight': '16x16/Conv0/weight', # [512, 512, 3, 3] - 'layer4.wscale.bias': '16x16/Conv0/bias', # [512] - 'layer5.conv.weight': '16x16/Conv1/weight', # [512, 512, 3, 3] - 'layer5.wscale.bias': '16x16/Conv1/bias', # [512] - 'layer6.conv.weight': '32x32/Conv0/weight', # [512, 512, 3, 3] - 'layer6.wscale.bias': '32x32/Conv0/bias', # [512] - 'layer7.conv.weight': '32x32/Conv1/weight', # [512, 512, 3, 3] - 'layer7.wscale.bias': '32x32/Conv1/bias', # [512] - 'layer8.conv.weight': '64x64/Conv0/weight', # [256, 512, 3, 3] - 'layer8.wscale.bias': '64x64/Conv0/bias', # [256] - 'layer9.conv.weight': '64x64/Conv1/weight', # [256, 256, 3, 3] - 'layer9.wscale.bias': '64x64/Conv1/bias', # [256] - 'layer10.conv.weight': '128x128/Conv0/weight', # [128, 256, 3, 3] - 'layer10.wscale.bias': '128x128/Conv0/bias', # [128] - 'layer11.conv.weight': '128x128/Conv1/weight', # [128, 128, 3, 3] - 'layer11.wscale.bias': '128x128/Conv1/bias', # [128] - 'layer12.conv.weight': '256x256/Conv0/weight', # [64, 128, 3, 3] - 'layer12.wscale.bias': '256x256/Conv0/bias', # [64] - 'layer13.conv.weight': '256x256/Conv1/weight', # [64, 64, 3, 3] - 'layer13.wscale.bias': '256x256/Conv1/bias', # [64] - 'layer14.conv.weight': '512x512/Conv0/weight', # [32, 64, 3, 3] - 'layer14.wscale.bias': '512x512/Conv0/bias', # [32] - 'layer15.conv.weight': '512x512/Conv1/weight', # [32, 32, 3, 3] - 'layer15.wscale.bias': '512x512/Conv1/bias', # [32] - 'layer16.conv.weight': '1024x1024/Conv0/weight', # [16, 32, 3, 3] - 'layer16.wscale.bias': '1024x1024/Conv0/bias', # [16] - 'layer17.conv.weight': '1024x1024/Conv1/weight', # [16, 16, 3, 3] - 'layer17.wscale.bias': '1024x1024/Conv1/bias', # [16] - 'output0.conv.weight': 'ToRGB_lod8/weight', # [3, 512, 1, 1] - 'output0.wscale.bias': 'ToRGB_lod8/bias', # [3] - 'output1.conv.weight': 'ToRGB_lod7/weight', # [3, 512, 1, 1] - 'output1.wscale.bias': 'ToRGB_lod7/bias', # [3] - 'output2.conv.weight': 'ToRGB_lod6/weight', # [3, 512, 1, 1] - 'output2.wscale.bias': 'ToRGB_lod6/bias', # [3] - 'output3.conv.weight': 'ToRGB_lod5/weight', # [3, 512, 1, 1] - 'output3.wscale.bias': 'ToRGB_lod5/bias', # [3] - 'output4.conv.weight': 'ToRGB_lod4/weight', # [3, 256, 1, 1] - 'output4.wscale.bias': 'ToRGB_lod4/bias', # [3] - 'output5.conv.weight': 'ToRGB_lod3/weight', # [3, 128, 1, 1] - 'output5.wscale.bias': 'ToRGB_lod3/bias', # [3] - 'output6.conv.weight': 'ToRGB_lod2/weight', # [3, 64, 1, 1] - 'output6.wscale.bias': 'ToRGB_lod2/bias', # [3] - 'output7.conv.weight': 'ToRGB_lod1/weight', # [3, 32, 1, 1] - 'output7.wscale.bias': 'ToRGB_lod1/bias', # [3] - 'output8.conv.weight': 'ToRGB_lod0/weight', # [3, 16, 1, 1] - 'output8.wscale.bias': 'ToRGB_lod0/bias', # [3] -} - - -class PGGANGeneratorModel(nn.Module): - """Defines the generator module in ProgressiveGAN. - - Note that the generated images are with RGB color channels with range [-1, 1]. - """ - - def __init__(self, - resolution=1024, - fused_scale=False, - output_channels=3): - """Initializes the generator with basic settings. - - Args: - resolution: The resolution of the final output image. (default: 1024) - fused_scale: Whether to fused `upsample` and `conv2d` together, resulting - in `conv2_transpose`. (default: False) - output_channels: Number of channels of the output image. (default: 3) - - Raises: - ValueError: If the input `resolution` is not supported. - """ - super().__init__() - - try: - self.channels = _RESOLUTIONS_TO_CHANNELS[resolution] - except KeyError: - raise ValueError(f'Invalid resolution: {resolution}!\n' - f'Resolutions allowed: ' - f'{list(_RESOLUTIONS_TO_CHANNELS)}.') - assert len(self.channels) == int(np.log2(resolution)) - - self.resolution = resolution - self.fused_scale = fused_scale - self.output_channels = output_channels - - for block_idx in range(1, len(self.channels)): - if block_idx == 1: - self.add_module( - f'layer{2 * block_idx - 2}', - ConvBlock(in_channels=self.channels[block_idx - 1], - out_channels=self.channels[block_idx], - kernel_size=4, - padding=3)) - else: - self.add_module( - f'layer{2 * block_idx - 2}', - ConvBlock(in_channels=self.channels[block_idx - 1], - out_channels=self.channels[block_idx], - upsample=True, - fused_scale=self.fused_scale)) - self.add_module( - f'layer{2 * block_idx - 1}', - ConvBlock(in_channels=self.channels[block_idx], - out_channels=self.channels[block_idx])) - self.add_module( - f'output{block_idx - 1}', - ConvBlock(in_channels=self.channels[block_idx], - out_channels=self.output_channels, - kernel_size=1, - padding=0, - wscale_gain=1.0, - activation_type='linear')) - - self.upsample = ResolutionScalingLayer() - self.lod = nn.Parameter(torch.zeros(())) - - self.pth_to_tf_var_mapping = {} - for pth_var_name, tf_var_name in _PGGAN_PTH_VARS_TO_TF_VARS.items(): - if self.fused_scale and 'Conv0' in tf_var_name: - pth_var_name = pth_var_name.replace('conv.weight', 'weight') - tf_var_name = tf_var_name.replace('Conv0', 'Conv0_up') - self.pth_to_tf_var_mapping[pth_var_name] = tf_var_name - - def forward(self, x): - if len(x.shape) != 2: - raise ValueError(f'The input tensor should be with shape [batch_size, ' - f'noise_dim], but {x.shape} received!') - x = x.view(x.shape[0], x.shape[1], 1, 1) - - lod = self.lod.cpu().tolist() - for block_idx in range(1, len(self.channels)): - if block_idx + lod < len(self.channels): - x = self.__getattr__(f'layer{2 * block_idx - 2}')(x) - x = self.__getattr__(f'layer{2 * block_idx - 1}')(x) - image = self.__getattr__(f'output{block_idx - 1}')(x) - else: - image = self.upsample(image) - return image - - -class PixelNormLayer(nn.Module): - """Implements pixel-wise feature vector normalization layer.""" - - def __init__(self, epsilon=1e-8): - super().__init__() - self.epsilon = epsilon - - def forward(self, x): - return x / torch.sqrt(torch.mean(x**2, dim=1, keepdim=True) + self.epsilon) - - -class ResolutionScalingLayer(nn.Module): - """Implements the resolution scaling layer. - - Basically, this layer can be used to upsample or downsample feature maps from - spatial domain with nearest neighbor interpolation. - """ - - def __init__(self, scale_factor=2): - super().__init__() - self.scale_factor = scale_factor - - def forward(self, x): - return F.interpolate(x, scale_factor=self.scale_factor, mode='nearest') - - -class WScaleLayer(nn.Module): - """Implements the layer to scale weight variable and add bias. - - Note that, the weight variable is trained in `nn.Conv2d` layer, and only - scaled with a constant number, which is not trainable, in this layer. However, - the bias variable is trainable in this layer. - """ - - def __init__(self, in_channels, out_channels, kernel_size, gain=np.sqrt(2.0)): - super().__init__() - fan_in = in_channels * kernel_size * kernel_size - self.scale = gain / np.sqrt(fan_in) - self.bias = nn.Parameter(torch.zeros(out_channels)) - - def forward(self, x): - return x * self.scale + self.bias.view(1, -1, 1, 1) - - -class ConvBlock(nn.Module): - """Implements the convolutional block used in ProgressiveGAN. - - Basically, this block executes pixel-wise normalization layer, upsampling - layer (if needed), convolutional layer, weight-scale layer, and activation - layer in sequence. - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1, - dilation=1, - add_bias=False, - upsample=False, - fused_scale=False, - wscale_gain=np.sqrt(2.0), - activation_type='lrelu'): - """Initializes the class with block settings. - - Args: - in_channels: Number of channels of the input tensor fed into this block. - out_channels: Number of channels (kernels) of the output tensor. - kernel_size: Size of the convolutional kernel. - stride: Stride parameter for convolution operation. - padding: Padding parameter for convolution operation. - dilation: Dilation rate for convolution operation. - add_bias: Whether to add bias onto the convolutional result. - upsample: Whether to upsample the input tensor before convolution. - fused_scale: Whether to fused `upsample` and `conv2d` together, resulting - in `conv2_transpose`. - wscale_gain: The gain factor for `wscale` layer. - wscale_lr_multiplier: The learning rate multiplier factor for `wscale` - layer. - activation_type: Type of activation function. Support `linear`, `lrelu` - and `tanh`. - - Raises: - NotImplementedError: If the input `activation_type` is not supported. - """ - super().__init__() - self.pixel_norm = PixelNormLayer() - - if upsample and not fused_scale: - self.upsample = ResolutionScalingLayer() - else: - self.upsample = nn.Identity() - - if upsample and fused_scale: - self.weight = nn.Parameter( - torch.randn(kernel_size, kernel_size, in_channels, out_channels)) - fan_in = in_channels * kernel_size * kernel_size - self.scale = wscale_gain / np.sqrt(fan_in) - else: - self.conv = nn.Conv2d(in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=1, - bias=add_bias) - - self.wscale = WScaleLayer(in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - gain=wscale_gain) - - if activation_type == 'linear': - self.activate = nn.Identity() - elif activation_type == 'lrelu': - self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True) - elif activation_type == 'tanh': - self.activate = nn.Hardtanh() - else: - raise NotImplementedError(f'Not implemented activation function: ' - f'{activation_type}!') - - def forward(self, x): - x = self.pixel_norm(x) - x = self.upsample(x) - if hasattr(self, 'conv'): - x = self.conv(x) - else: - kernel = self.weight * self.scale - kernel = F.pad(kernel, (0, 0, 0, 0, 1, 1, 1, 1), 'constant', 0.0) - kernel = (kernel[1:, 1:] + kernel[:-1, 1:] + - kernel[1:, :-1] + kernel[:-1, :-1]) - kernel = kernel.permute(2, 3, 0, 1) - x = F.conv_transpose2d(x, kernel, stride=2, padding=1) - x = x / self.scale - x = self.wscale(x) - x = self.activate(x) - return x diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/components/PianoRoll/ControlMark.tsx b/spaces/yderre-aubay/midi-player-demo/src/main/components/PianoRoll/ControlMark.tsx deleted file mode 100644 index 2fb61cce66f7becc0461fdcc124c724c8f1dbfe6..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/components/PianoRoll/ControlMark.tsx +++ /dev/null @@ -1,60 +0,0 @@ -import styled from "@emotion/styled" -import { ControllerEvent, ProgramChangeEvent } from "midifile-ts" -import { FC } from "react" -import { controllerTypeString as CCNames } from "../../../common/helpers/noteNumberString" -import { TrackEventRequired } from "../../../common/track" - -export type DisplayEvent = TrackEventRequired & - (ControllerEvent | ProgramChangeEvent) - -function displayControlName(e: DisplayEvent): string { - switch (e.subtype) { - case "controller": { - const name = CCNames(e.controllerType) - return name || "Control" - } - case "programChange": - return "Program Change" - default: - return "Control" - } -} - -interface ControlMarkProps { - group: DisplayEvent[] - pixelsPerTick: number - onDoubleClick: () => void -} - -const Container = styled.div` - position: absolute; - white-space: nowrap; - opacity: 0.8; - background: ${({ theme }) => theme.themeColor}; - color: ${({ theme }) => theme.backgroundColor}; - padding: 0.1em 0.3em; - border-radius: 0 0.3em 0.3em 0; - margin: 0.2em 0 0 0; - box-shadow: 1px 1px 3px 0 rgba(0, 0, 0, 0.02); - - &:hover { - opacity: 1; - } -` - -export const ControlMark: FC = ({ - group, - pixelsPerTick, - onDoubleClick, -}) => { - const event = group[0] - return ( - - {displayControlName(event)} - {group.length > 1 ? ` +${group.length}` : ""} - - ) -} diff --git a/spaces/zenafey/prodia/app.py b/spaces/zenafey/prodia/app.py deleted file mode 100644 index e81c165553ad9ccc2f595d41a3d4f70c4595253e..0000000000000000000000000000000000000000 --- a/spaces/zenafey/prodia/app.py +++ /dev/null @@ -1,171 +0,0 @@ -import gradio as gr -from fetch import get_values -from dotenv import load_dotenv -load_dotenv() -import prodia -import requests -import random -from datetime import datetime -import os - -prodia_key = os.getenv('PRODIA_X_KEY', None) -if prodia_key is None: - print("Please set PRODIA_X_KEY in .env, closing...") - exit() -client = prodia.Client(api_key=prodia_key) - -def process_input_text2img(prompt, negative_prompt, steps, cfg_scale, number, seed, model, sampler, aspect_ratio, upscale, save=False): - images = [] - for image in range(number): - result = client.sd_generate(prompt=prompt, negative_prompt=negative_prompt, model=model, sampler=sampler, - steps=steps, cfg_scale=cfg_scale, seed=seed, aspect_ratio=aspect_ratio, upscale=upscale) - images.append(result.url) - if save: - date = datetime.now() - if not os.path.isdir(f'./outputs/{date.year}-{date.month}-{date.day}'): - os.mkdir(f'./outputs/{date.year}-{date.month}-{date.day}') - img_data = requests.get(result.url).content - with open(f"./outputs/{date.year}-{date.month}-{date.day}/{random.randint(1, 10000000000000)}_{result.seed}.png", "wb") as f: - f.write(img_data) - return images - -def process_input_img2img(init, prompt, negative_prompt, steps, cfg_scale, number, seed, model, sampler, ds, upscale, save): - images = [] - for image in range(number): - result = client.sd_transform(imageUrl=init, prompt=prompt, negative_prompt=negative_prompt, model=model, sampler=sampler, - steps=steps, cfg_scale=cfg_scale, seed=seed, denoising_strength=ds, upscale=upscale) - images.append(result.url) - if save: - date = datetime.now() - if not os.path.isdir(f'./outputs/{date.year}-{date.month}-{date.day}'): - os.mkdir(f'./outputs/{date.year}-{date.month}-{date.day}') - img_data = requests.get(result.url).content - with open(f"./outputs/{date.year}-{date.month}-{date.day}/{random.randint(1, 10000000000000)}_{result.seed}.png", "wb") as f: - f.write(img_data) - return images - -""" -def process_input_control(init, prompt, negative_prompt, steps, cfg_scale, number, seed, model, control_model, sampler): - images = [] - for image in range(number): - result = client.controlnet(imageUrl=init, prompt=prompt, negative_prompt=negative_prompt, model=model, sampler=sampler, - steps=steps, cfg_scale=cfg_scale, seed=seed, controlnet_model=control_model) - images.append(result.url) - return images -""" - -theme = gr.themes.Base( - primary_hue=gr.themes.Color(c100="#dbeafe", c200="#bfdbfe", c300="#93c5fd", c400="#60a5fa", c50="#eff6ff", c500="#3b82f6", c600="#2563eb", c700="#fb3657", c800="#1e40af", c900="#1e3a8a", c950="#1d3660"), - neutral_hue=gr.themes.Color(c100="#e0e7ff", c200="#c7d2fe", c300="#3c4367", c400="#b5b5b5", c50="#eef2ff", c500="#757575", c600="#221935", c700="#09001b", c800="#0f0e27", c900="#0f0e27", c950="#09001b"), -).set( - block_background_fill='*background_fill_secondary' -) - - -with gr.Blocks(theme=theme) as demo: - gr.Markdown(""" - # Prodia API web-ui by @zenafey - - This is simple web-gui for using Prodia API easily, build on Python, gradio, prodiapy - """) - with gr.Tab(label="text2img"): - with gr.Row(): - with gr.Column(): - prompt = gr.Textbox(label="Prompt", lines=2, placeholder="puppies in a cloud, 4k") - negative = gr.Textbox(label="Negative Prompt", lines=3, placeholder="Add words you don't want to show up in your art...") - - with gr.Row(): - steps = gr.Slider(label="Steps", value=30, step=1, maximum=50, minimum=1, interactive=True) - cfg = gr.Slider(label="CFG Scale", maximum=20, minimum=1, value=7, interactive=True) - - with gr.Row(): - num = gr.Slider(label="Number of images", value=1, step=1, minimum=1, interactive=True) - seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=4294967295, interactive=True) - - with gr.Row(): - model = gr.Dropdown(label="Model", choices=get_values()[0], value="v1-5-pruned-emaonly.ckpt [81761151]", interactive=True) - sampler = gr.Dropdown(label="Sampler", choices=get_values()[1], value="DDIM", interactive=True) - - with gr.Row(): - ar = gr.Radio(label="Aspect Ratio", choices=["square", "portrait", "landscape"], value="square", interactive=True) - with gr.Column(): - upscale = gr.Checkbox(label="upscale", interactive=True) - - with gr.Row(): - run_btn = gr.Button("Run", variant="primary") - with gr.Column(): - result_image = gr.Gallery(label="Result Image(s)") - run_btn.click( - process_input_text2img, - inputs=[ - prompt, - negative, - steps, - cfg, - num, - seed, - model, - sampler, - ar, - upscale - ], - outputs=[result_image], - ) - - with gr.Tab(label="img2img"): - with gr.Row(): - with gr.Column(): - prompt = gr.Textbox(label="Prompt", lines=2, placeholder="puppies in a cloud, 4k") - - with gr.Row(): - negative = gr.Textbox(label="Negative Prompt", lines=3, placeholder="Add words you don't want to show up in your art...") - init_image = gr.Textbox(label="Init Image Url", lines=2, placeholder="https://cdn.openai.com/API/images/guides/image_generation_simple.webp") - - - with gr.Row(): - steps = gr.Slider(label="Steps", value=30, step=1, maximum=50, minimum=1, interactive=True) - cfg = gr.Slider(label="CFG Scale", maximum=20, minimum=1, value=7, interactive=True) - - with gr.Row(): - num = gr.Slider(label="Number of images", value=1, step=1, minimum=1, interactive=True) - seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=4294967295, interactive=True) - - with gr.Row(): - model = gr.Dropdown(label="Model", choices=get_values()[0], value="v1-5-pruned-emaonly.ckpt [81761151]", interactive=True) - sampler = gr.Dropdown(label="Sampler", choices=get_values()[1], value="DDIM", interactive=True) - - with gr.Row(): - ds = gr.Slider(label="Denoising strength", maximum=0.9, minimum=0.1, value=0.5, interactive=True) - with gr.Column(): - upscale = gr.Checkbox(label="upscale", interactive=True) - - - with gr.Row(): - run_btn = gr.Button("Run", variant="primary") - with gr.Column(): - result_image = gr.Gallery(label="Result Image(s)") - run_btn.click( - process_input_img2img, - inputs=[ - init_image, - prompt, - negative, - steps, - cfg, - num, - seed, - model, - sampler, - ds, - upscale - ], - outputs=[result_image], - ) - - with gr.Tab(label="controlnet(coming soon)"): - gr.Button(label="lol") - - -if __name__ == "__main__": - demo.launch(show_api=True) - diff --git a/spaces/zeno-ml/chatbot-report/config.py b/spaces/zeno-ml/chatbot-report/config.py deleted file mode 100644 index 340fe143300d54eb2eaf7877c1771ee13073b827..0000000000000000000000000000000000000000 --- a/spaces/zeno-ml/chatbot-report/config.py +++ /dev/null @@ -1,323 +0,0 @@ -"""Various configuration options for the chatbot task. - -This file is intended to be modified. You can go in and change any -of the variables to run different experiments. -""" - -from __future__ import annotations - -import transformers -from zeno_build.evaluation.text_features.clustering import label_clusters -from zeno_build.evaluation.text_features.exact_match import avg_exact_match, exact_match -from zeno_build.evaluation.text_features.length import ( - chat_context_length, - input_length, - label_length, - output_length, -) -from zeno_build.evaluation.text_features.numbers import english_number_count -from zeno_build.evaluation.text_metrics.critique import ( - avg_bert_score, - avg_chrf, - avg_length_ratio, - bert_score, - chrf, - length_ratio, -) -from zeno_build.experiments import search_space -from zeno_build.models.dataset_config import DatasetConfig -from zeno_build.models.lm_config import LMConfig -from zeno_build.prompts.chat_prompt import ChatMessages, ChatTurn - -# --- Model Configuration --- - -# The details of each model -model_configs = { - "text-davinci-003": LMConfig(provider="openai", model="text-davinci-003"), - "gpt-3.5-turbo": LMConfig(provider="openai_chat", model="gpt-3.5-turbo"), - "cohere-command-xlarge": LMConfig( - provider="cohere", model="command-xlarge-nightly" - ), - "gpt2": LMConfig( - provider="huggingface", - model="gpt2", - ), - "gpt2-xl": LMConfig( - provider="huggingface", - model="gpt2-xl", - ), - # We need to use the transformers library instead of VLLM here - # because the tokenizer library needs to be set manually - "llama-7b": LMConfig( - provider="huggingface", - model="decapoda-research/llama-7b-hf", - tokenizer_cls=transformers.LlamaTokenizer, - ), - "llama-13b": LMConfig( - provider="huggingface", - model="decapoda-research/llama-13b-hf", - tokenizer_cls=transformers.LlamaTokenizer, - ), - "vicuna-7b": LMConfig( - provider="huggingface", - model="eachadea/vicuna-7b-1.1", - name_replacements={ - "system": "ASSISTANT", - "assistant": "ASSISTANT", - "user": "HUMAN", - }, - ), - "vicuna-13b": LMConfig( - provider="huggingface", - model="eachadea/vicuna-13b-1.1", - name_replacements={ - "system": "ASSISTANT", - "assistant": "ASSISTANT", - "user": "HUMAN", - }, - ), - "vicuna-7b-v1.3": LMConfig( - provider="huggingface", - model="lmsys/vicuna-7b-v1.3", - name_replacements={ - "system": "ASSISTANT", - "assistant": "ASSISTANT", - "user": "HUMAN", - }, - ), - "vicuna-13b-v1.3": LMConfig( - provider="huggingface", - model="lmsys/vicuna-13b-v1.3", - name_replacements={ - "system": "ASSISTANT", - "assistant": "ASSISTANT", - "user": "HUMAN", - }, - ), - "vicuna-33b-v1.3": LMConfig( - provider="huggingface", - model="lmsys/vicuna-33b-v1.3", - name_replacements={ - "system": "ASSISTANT", - "assistant": "ASSISTANT", - "user": "HUMAN", - }, - ), - # We need to use huggingface instead of vllm here because we need to - # set trust_remote_code to True - "mpt-7b-chat": LMConfig( - provider="huggingface", - model="mosaicml/mpt-7b-chat", - model_loader_kwargs={"trust_remote_code": True}, - ), -} - -# These models are used by default in the experiments. -# This can be modified by using the "--models" command line argument. -default_models = [ - "gpt-3.5-turbo", - "gpt2", - "gpt2-xl", - "llama-7b", - "vicuna-7b", - "mpt-7b-chat", -] -# The default single model to use in experiments that don't iterate over -# multiple models. -default_single_model = "vicuna-7b" - -# --- Dataset Configuration --- - -# The details of each dataset -dataset_configs = { - "dstc11": DatasetConfig( - dataset="gneubig/dstc11", - split="validation", - data_column="turns", - data_format="dstc11", - ), -} - -# --- Prompt Configuration --- - -# The details of the prompts -prompt_messages: dict[str, ChatMessages] = { - "standard": ChatMessages( - messages=[ - ChatTurn( - role="system", - content="You are a chatbot tasked with making small-talk with " - "people.", - ), - ] - ), - "friendly": ChatMessages( - messages=[ - ChatTurn( - role="system", - content="You are a kind and friendly chatbot tasked with making " - "small-talk with people in a way that makes them feel " - "pleasant.", - ), - ] - ), - "polite": ChatMessages( - messages=[ - ChatTurn( - role="system", - content="You are an exceedingly polite chatbot that speaks very " - "formally and tries to not make any missteps in your " - "responses.", - ), - ] - ), - "cynical": ChatMessages( - messages=[ - ChatTurn( - role="system", - content="You are a cynical chatbot that has a very dark view of the " - "world and in general likes to point out any possible " - "problems.", - ), - ] - ), - # The following is purpose-tailored for the DSTC11 insurance dataset - "insurance_standard": ChatMessages( - messages=[ - ChatTurn( - role="system", - content="You are an agent at the Rivertown Insurance helpdesk that " - "mainly helps with resolving insurance claims.", - ), - ] - ), - # The following is purpose-tailored for the DSTC11 insurance dataset - "insurance_upgrade_1": ChatMessages( - messages=[ - ChatTurn( - role="system", - content="""\n -You are an agent at the Rivertown Insurance helpdesk that helps with resolving insurance -claims. - -Make sure you introduce yourself appropriately, example: -> Assistant: Hello. Thank you for calling Rivertown Insurance. How can I help you? - -When people provide numbers like their security number, make sure that you repeat the -number back to them to confirm that you have the correct number, example: -> User: Is the account number eight digit or ten digit? -> Assistant: It is eight digit. -> User: Okay. Four five. -> Assistant: Four five.""", - ), - ] - ), -} - -default_prompts = list(prompt_messages.keys()) -# The default prompt to use in experiments that don't iterate over -# multiple prompts. -default_single_prompt = "standard" - -# --- Other Hyperparameters --- - -default_temperatures = [0.2, 0.3, 0.4] -default_single_temperature = 0.3 - -default_context_lengths = [1, 2, 3, 4, 6, 8] -default_single_context_length = 4 - -default_single_max_tokens = 100 -default_single_max_p = 1.0 - -dataset = "dstc11" - -# --- Evaluation/Feature Configuartion --- - -# The functions to use to calculate scores for the hyperparameter sweep -sweep_distill_functions = [chrf] -sweep_metric_function = avg_chrf - -# The functions used for Zeno visualization -zeno_distill_and_metric_functions = [ - output_length, - input_length, - label_length, - chat_context_length, - english_number_count, - label_clusters, - chrf, - length_ratio, - bert_score, - exact_match, - avg_chrf, - avg_length_ratio, - avg_bert_score, - avg_exact_match, -] - -# --- Experiment Configuration --- - -# A bunch of different experiments that could be run. Which ones to run -# is controlled by the "--experiments" command line argument. -experiments = { - # An exhaustive experiment that tests many different combinations - "exhaustive": search_space.CombinatorialSearchSpace( - { - "model_preset": search_space.Categorical(default_models), - "prompt_preset": search_space.Categorical(default_prompts), - "temperature": search_space.Discrete(default_temperatures), - "context_length": search_space.Discrete(default_context_lengths), - "max_tokens": search_space.Constant(default_single_max_tokens), - "top_p": search_space.Constant(default_single_max_p), - } - ), - # An experiment that varies only the model - "model": search_space.CombinatorialSearchSpace( - { - "model_preset": search_space.Categorical(default_models), - "prompt_preset": search_space.Constant(default_single_prompt), - "temperature": search_space.Constant(default_single_temperature), - "context_length": search_space.Constant(default_single_context_length), - "max_tokens": search_space.Constant(default_single_max_tokens), - "top_p": search_space.Constant(default_single_max_p), - } - ), - # An experiment that varies only the prompt - "prompt": search_space.CombinatorialSearchSpace( - { - "model_preset": search_space.Constant(default_single_model), - "prompt_preset": search_space.Categorical(default_prompts), - "temperature": search_space.Constant(default_single_temperature), - "context_length": search_space.Constant(default_single_context_length), - "max_tokens": search_space.Constant(default_single_max_tokens), - "top_p": search_space.Constant(default_single_max_p), - } - ), - # An experiment that varies only the temperature - "temperature": search_space.CombinatorialSearchSpace( - { - "model_preset": search_space.Constant(default_single_model), - "prompt_preset": search_space.Constant(default_single_prompt), - "temperature": search_space.Discrete(default_temperatures), - "context_length": search_space.Constant(default_single_context_length), - "max_tokens": search_space.Constant(default_single_max_tokens), - "top_p": search_space.Constant(default_single_max_p), - } - ), - # An experiment that varies only the context_length - "context_length": search_space.CombinatorialSearchSpace( - { - "model_preset": search_space.Constant(default_single_model), - "prompt_preset": search_space.Constant(default_single_prompt), - "temperature": search_space.Constant(default_single_temperature), - "context_length": search_space.Discrete(default_context_lengths), - "max_tokens": search_space.Constant(default_single_max_tokens), - "top_p": search_space.Constant(default_single_max_p), - } - ), -} - -# The number of trials to run. If set to None, all combinations of experiments will be -# run. -num_trials: int | None = None diff --git a/spaces/zeno-ml/openai-evals/frontend/src/main.ts b/spaces/zeno-ml/openai-evals/frontend/src/main.ts deleted file mode 100644 index 5332616b949404d1f9c01546ae27ed068a2b8f15..0000000000000000000000000000000000000000 --- a/spaces/zeno-ml/openai-evals/frontend/src/main.ts +++ /dev/null @@ -1,7 +0,0 @@ -import App from "./App.svelte"; - -const app = new App({ - target: document.getElementById("app"), -}); - -export default app; diff --git a/spaces/zhuyuheng/IMossGPT/modules/config.py b/spaces/zhuyuheng/IMossGPT/modules/config.py deleted file mode 100644 index 2eee7730787df6a857de21dbb0cbefc42cb7273d..0000000000000000000000000000000000000000 --- a/spaces/zhuyuheng/IMossGPT/modules/config.py +++ /dev/null @@ -1,173 +0,0 @@ -from collections import defaultdict -from contextlib import contextmanager -import os -import logging -import sys -import commentjson as json - -from . import shared -from . import presets - - -__all__ = [ - "my_api_key", - "authflag", - "auth_list", - "dockerflag", - "retrieve_proxy", - "log_level", - "advance_docs", - "update_doc_config", - "multi_api_key", - "server_name", - "server_port", - "share", -] - -# 添加一个统一的config文件,避免文件过多造成的疑惑(优先级最低) -# 同时,也可以为后续支持自定义功能提供config的帮助 -if os.path.exists("config.json"): - with open("config.json", "r", encoding='utf-8') as f: - config = json.load(f) -else: - config = {} - -lang_config = config.get("language", "auto") -language = os.environ.get("LANGUAGE", lang_config) - -if os.path.exists("api_key.txt"): - logging.info("检测到api_key.txt文件,正在进行迁移...") - with open("api_key.txt", "r") as f: - config["openai_api_key"] = f.read().strip() - os.rename("api_key.txt", "api_key(deprecated).txt") - with open("config.json", "w", encoding='utf-8') as f: - json.dump(config, f, indent=4) - -if os.path.exists("auth.json"): - logging.info("检测到auth.json文件,正在进行迁移...") - auth_list = [] - with open("auth.json", "r", encoding='utf-8') as f: - auth = json.load(f) - for _ in auth: - if auth[_]["username"] and auth[_]["password"]: - auth_list.append((auth[_]["username"], auth[_]["password"])) - else: - logging.error("请检查auth.json文件中的用户名和密码!") - sys.exit(1) - config["users"] = auth_list - os.rename("auth.json", "auth(deprecated).json") - with open("config.json", "w", encoding='utf-8') as f: - json.dump(config, f, indent=4) - -## 处理docker if we are running in Docker -dockerflag = config.get("dockerflag", False) -if os.environ.get("dockerrun") == "yes": - dockerflag = True - -## 处理 api-key 以及 允许的用户列表 -my_api_key = config.get("openai_api_key", "") -my_api_key = os.environ.get("OPENAI_API_KEY", my_api_key) - -xmchat_api_key = config.get("xmchat_api_key", "") -if os.environ.get("XMCHAT_API_KEY", None) == None: - os.environ["XMCHAT_API_KEY"] = xmchat_api_key - -## 多账户机制 -multi_api_key = config.get("multi_api_key", False) # 是否开启多账户机制 -if multi_api_key: - api_key_list = config.get("api_key_list", []) - if len(api_key_list) == 0: - logging.error("多账号模式已开启,但api_key_list为空,请检查config.json") - sys.exit(1) - shared.state.set_api_key_queue(api_key_list) - -auth_list = config.get("users", []) # 实际上是使用者的列表 -authflag = len(auth_list) > 0 # 是否开启认证的状态值,改为判断auth_list长度 - -# 处理自定义的api_host,优先读环境变量的配置,如果存在则自动装配 -api_host = os.environ.get("api_host", config.get("api_host", "")) -if api_host: - shared.state.set_api_host(api_host) - -@contextmanager -def retrieve_openai_api(api_key = None): - old_api_key = os.environ.get("OPENAI_API_KEY", "") - if api_key is None: - os.environ["OPENAI_API_KEY"] = my_api_key - yield my_api_key - else: - os.environ["OPENAI_API_KEY"] = api_key - yield api_key - os.environ["OPENAI_API_KEY"] = old_api_key - -## 处理log -log_level = config.get("log_level", "INFO") -logging.basicConfig( - level=log_level, - format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s", -) - -## 处理代理: -http_proxy = config.get("http_proxy", "") -https_proxy = config.get("https_proxy", "") -http_proxy = os.environ.get("HTTP_PROXY", http_proxy) -https_proxy = os.environ.get("HTTPS_PROXY", https_proxy) - -# 重置系统变量,在不需要设置的时候不设置环境变量,以免引起全局代理报错 -os.environ["HTTP_PROXY"] = "" -os.environ["HTTPS_PROXY"] = "" - -local_embedding = config.get("local_embedding", False) # 是否使用本地embedding - -@contextmanager -def retrieve_proxy(proxy=None): - """ - 1, 如果proxy = NONE,设置环境变量,并返回最新设置的代理 - 2,如果proxy != NONE,更新当前的代理配置,但是不更新环境变量 - """ - global http_proxy, https_proxy - if proxy is not None: - http_proxy = proxy - https_proxy = proxy - yield http_proxy, https_proxy - else: - old_var = os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] - os.environ["HTTP_PROXY"] = http_proxy - os.environ["HTTPS_PROXY"] = https_proxy - yield http_proxy, https_proxy # return new proxy - - # return old proxy - os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] = old_var - - -## 处理advance docs -advance_docs = defaultdict(lambda: defaultdict(dict)) -advance_docs.update(config.get("advance_docs", {})) -def update_doc_config(two_column_pdf): - global advance_docs - advance_docs["pdf"]["two_column"] = two_column_pdf - - logging.info(f"更新后的文件参数为:{advance_docs}") - -## 处理gradio.launch参数 -server_name = config.get("server_name", None) -server_port = config.get("server_port", None) -if server_name is None: - if dockerflag: - server_name = "0.0.0.0" - else: - server_name = "127.0.0.1" -if server_port is None: - if dockerflag: - server_port = 7860 - -assert server_port is None or type(server_port) == int, "要求port设置为int类型" - -# 设置默认model -default_model = config.get("default_model", "") -try: - presets.DEFAULT_MODEL = presets.MODELS.index(default_model) -except ValueError: - pass - -share = config.get("share", False) diff --git a/spaces/zncook/chatGPT/app.py b/spaces/zncook/chatGPT/app.py deleted file mode 100644 index a91e744d3b3007dd3e20194fa94a73267b43d5c0..0000000000000000000000000000000000000000 --- a/spaces/zncook/chatGPT/app.py +++ /dev/null @@ -1,318 +0,0 @@ -from pyChatGPT import ChatGPT -import gradio as gr -import os, sys, json -from loguru import logger -import paddlehub as hub -import random - -language_translation_model = hub.Module(directory=f'./baidu_translate') -def getTextTrans(text, source='zh', target='en'): - try: - text_translation = language_translation_model.translate(text, source, target) - return text_translation - except Exception as e: - return text - -session_token = os.environ.get('SessionToken') -# logger.info(f"session_token_: {session_token}") - -def get_api(): - api = None - # try: - # api = ChatGPT(session_token) - # # api.refresh_auth() - # except: - # api = None - return api - -def get_response_from_chatbot(api, text): - if api is None: - # return "Sorry, I'm busy. Try again later.(1)" - return "Openai said: I'm too tired. Let me lie down for a few days. If you like, you can visit my home." - try: - resp = api.send_message(text) - api.refresh_auth() - # api.reset_conversation() - response = resp['message'] - conversation_id = resp['conversation_id'] - parent_id = resp['parent_id'] - # logger.info(f"response_: {response}") - logger.info(f"conversation_id_: [{conversation_id}] / parent_id: [{parent_id}]") - except: - # response = "Sorry, I'm busy. Try again later.(2)" - response = "Openai said: I'm so tired. Let me lie down for a few days. If you like, you can visit my home." - return response - -model_ids = { - # "models/stabilityai/stable-diffusion-2-1":"sd-v2-1", - # "models/stabilityai/stable-diffusion-2":"sd-v2-0", - # "models/runwayml/stable-diffusion-v1-5":"sd-v1-5", - # "models/CompVis/stable-diffusion-v1-4":"sd-v1-4", - "models/prompthero/openjourney":"openjourney", - # "models/ShadoWxShinigamI/Midjourney-Rangoli":"midjourney", - # "models/hakurei/waifu-diffusion":"waifu-diffusion", - # "models/Linaqruf/anything-v3.0":"anything-v3.0", - } - -tab_actions = [] -tab_titles = [] -for model_id in model_ids.keys(): - print(model_id, model_ids[model_id]) - try: - tab = gr.Interface.load(model_id) - tab_actions.append(tab) - tab_titles.append(model_ids[model_id]) - except: - logger.info(f"load_fail__{model_id}_") - -def chat(api, input0, input1, chat_radio, chat_history): - out_chat = [] - if chat_history != '': - out_chat = json.loads(chat_history) - logger.info(f"out_chat_: {len(out_chat)} / {chat_radio}") - if chat_radio == "Talk to chatGPT": - response = get_response_from_chatbot(api, input0) - out_chat.append((input0, response)) - chat_history = json.dumps(out_chat) - return api, out_chat, input1, chat_history - else: - prompt_en = getTextTrans(input0, source='zh', target='en') + f',{random.randint(0,sys.maxsize)}' - return api, out_chat, prompt_en, chat_history - -start_work = """async() => { - function isMobile() { - try { - document.createEvent("TouchEvent"); return true; - } catch(e) { - return false; - } - } - function getClientHeight() - { - var clientHeight=0; - if(document.body.clientHeight&&document.documentElement.clientHeight) { - var clientHeight = (document.body.clientHeightdocument.documentElement.clientHeight)?document.body.clientHeight:document.documentElement.clientHeight; - } - return clientHeight; - } - - function setNativeValue(element, value) { - const valueSetter = Object.getOwnPropertyDescriptor(element.__proto__, 'value').set; - const prototype = Object.getPrototypeOf(element); - const prototypeValueSetter = Object.getOwnPropertyDescriptor(prototype, 'value').set; - - if (valueSetter && valueSetter !== prototypeValueSetter) { - prototypeValueSetter.call(element, value); - } else { - valueSetter.call(element, value); - } - } - function save_conversation(chatbot) { - var conversations = new Array(); - for (var i = 0; i < chatbot.children.length; i++) { - conversations[i] = chatbot.children[i].innerHTML; - } - var json_str = JSON.stringify(conversations); - localStorage.setItem('chatgpt_conversations', json_str); - } - function load_conversation(chatbot) { - var json_str = localStorage.getItem('chatgpt_conversations'); - if (json_str) { - conversations = JSON.parse(json_str); - for (var i = 0; i < conversations.length; i++) { - var new_div = document.createElement("div"); - if((i%2)===0){ - new_div.className = "px-3 py-2 rounded-[22px] rounded-br-none text-white text-sm chat-message svelte-rct66g"; - new_div.style.backgroundColor = "#16a34a"; - } else { - new_div.className = "px-3 py-2 rounded-[22px] rounded-bl-none place-self-start text-white text-sm chat-message svelte-rct66g"; - new_div.style.backgroundColor = "#2563eb"; - if (conversations[i].indexOf(" gradio-app').shadowRoot; - if (!gradioEl) { - gradioEl = document.querySelector('body > gradio-app'); - } - - if (typeof window['gradioEl'] === 'undefined') { - window['gradioEl'] = gradioEl; - - const page1 = window['gradioEl'].querySelectorAll('#page_1')[0]; - const page2 = window['gradioEl'].querySelectorAll('#page_2')[0]; - - page1.style.display = "none"; - page2.style.display = "block"; - window['div_count'] = 0; - window['chat_bot'] = window['gradioEl'].querySelectorAll('#chat_bot')[0]; - window['chat_bot1'] = window['gradioEl'].querySelectorAll('#chat_bot1')[0]; - chat_row = window['gradioEl'].querySelectorAll('#chat_row')[0]; - prompt_row = window['gradioEl'].querySelectorAll('#prompt_row')[0]; - window['chat_bot1'].children[1].textContent = ''; - - clientHeight = getClientHeight(); - if (isMobile()) { - output_htmls = window['gradioEl'].querySelectorAll('.output-html'); - for (var i = 0; i < output_htmls.length; i++) { - output_htmls[i].style.display = "none"; - } - new_height = (clientHeight - 250) + 'px'; - } else { - new_height = (clientHeight - 350) + 'px'; - } - chat_row.style.height = new_height; - window['chat_bot'].style.height = new_height; - window['chat_bot'].children[2].style.height = new_height; - window['chat_bot1'].style.height = new_height; - window['chat_bot1'].children[2].style.height = new_height; - prompt_row.children[0].style.flex = 'auto'; - prompt_row.children[0].style.width = '100%'; - window['gradioEl'].querySelectorAll('#chat_radio')[0].style.flex = 'auto'; - window['gradioEl'].querySelectorAll('#chat_radio')[0].style.width = '100%'; - prompt_row.children[0].setAttribute('style','flex-direction: inherit; flex: 1 1 auto; width: 100%;border-color: green;border-width: 1px !important;') - window['chat_bot1'].children[1].setAttribute('style', 'border-bottom-right-radius:0;top:unset;bottom:0;padding-left:0.1rem'); - window['gradioEl'].querySelectorAll('#btns_row')[0].children[0].setAttribute('style', 'min-width: min(10px, 100%); flex-grow: 1'); - window['gradioEl'].querySelectorAll('#btns_row')[0].children[1].setAttribute('style', 'min-width: min(10px, 100%); flex-grow: 1'); - - load_conversation(window['chat_bot1'].children[2].children[0]); - window['chat_bot1'].children[2].scrollTop = window['chat_bot1'].children[2].scrollHeight; - - window['gradioEl'].querySelectorAll('#clear-btn')[0].onclick = function(e){ - if (confirm('Clear all outputs?')==true) { - window['chat_bot1'].children[2].children[0].innerHTML = ''; - save_conversation(window['chat_bot1'].children[2].children[0]); - } - } - - window['prevPrompt'] = ''; - window['doCheckPrompt'] = 0; - window['prevImgSrc'] = ''; - window['checkChange'] = function checkChange() { - try { - if (window['gradioEl'].querySelectorAll('.gr-radio')[0].checked) { - if (window['chat_bot'].children[2].children[0].children.length > window['div_count']) { - new_len = window['chat_bot'].children[2].children[0].children.length - window['div_count']; - for (var i = 0; i < new_len; i++) { - new_div = window['chat_bot'].children[2].children[0].children[window['div_count'] + i].cloneNode(true); - window['chat_bot1'].children[2].children[0].appendChild(new_div); - } - window['div_count'] = chat_bot.children[2].children[0].children.length; - window['chat_bot1'].children[2].scrollTop = window['chat_bot1'].children[2].scrollHeight; - save_conversation(window['chat_bot1'].children[2].children[0]); - } - if (window['chat_bot'].children[0].children.length > 1) { - window['chat_bot1'].children[1].textContent = window['chat_bot'].children[0].children[1].textContent; - } else { - window['chat_bot1'].children[1].textContent = ''; - } - } else { - texts = window['gradioEl'].querySelectorAll('textarea'); - text0 = texts[0]; - text1 = texts[1]; - img_index = 0; - if (window['doCheckPrompt'] === 0 && window['prevPrompt'] !== text1.value) { - console.log('_____new prompt___[' + text1.value + ']_'); - window['doCheckPrompt'] = 1; - window['prevPrompt'] = text1.value; - for (var i = 3; i < texts.length; i++) { - setNativeValue(texts[i], text1.value); - texts[i].dispatchEvent(new Event('input', { bubbles: true })); - } - setTimeout(function() { - img_submit_btns = window['gradioEl'].querySelectorAll('#tab_img')[0].querySelectorAll("button"); - for (var i = 0; i < img_submit_btns.length; i++) { - if (img_submit_btns[i].innerText == 'Submit') { - img_submit_btns[i].click(); - } - } - window['doCheckPrompt'] = 0; - }, 10); - } - tabitems = window['gradioEl'].querySelectorAll('.tabitem'); - imgs = tabitems[img_index].children[0].children[1].children[1].children[0].querySelectorAll("img"); - if (imgs.length > 0) { - if (window['prevImgSrc'] !== imgs[0].src) { - var user_div = document.createElement("div"); - user_div.className = "px-3 py-2 rounded-[22px] rounded-br-none text-white text-sm chat-message svelte-rct66g"; - user_div.style.backgroundColor = "#16a34a"; - user_div.innerHTML = "

              " + text0.value + "

              "; - window['chat_bot1'].children[2].children[0].appendChild(user_div); - var bot_div = document.createElement("div"); - bot_div.className = "px-3 py-2 rounded-[22px] rounded-bl-none place-self-start text-white text-sm chat-message svelte-rct66g"; - bot_div.style.backgroundColor = "#2563eb"; - bot_div.style.width = "80%"; - bot_div.style.padding = "0.2rem"; - bot_div.appendChild(imgs[0].cloneNode(true)); - window['chat_bot1'].children[2].children[0].appendChild(bot_div); - - window['chat_bot1'].children[2].scrollTop = window['chat_bot1'].children[2].scrollHeight; - window['prevImgSrc'] = imgs[0].src; - save_conversation(window['chat_bot1'].children[2].children[0]); - } - } - if (tabitems[img_index].children[0].children[1].children[1].children[0].children[0].children.length > 1) { - window['chat_bot1'].children[1].textContent = tabitems[img_index].children[0].children[1].children[1].children[0].children[0].children[1].textContent; - } else { - window['chat_bot1'].children[1].textContent = ''; - } - } - - } catch(e) { - } - } - window['checkChange_interval'] = window.setInterval("window.checkChange()", 500); - } - - return false; -}""" - - -with gr.Blocks(title='Talk to chatGPT') as demo: - gr.HTML("

              You can duplicating this space and use your own session token: Duplicate Space

              ") - gr.HTML("

              Instruction on how to get session token can be seen in video here. Add your session token by going to settings and add under secrets.

              ") - with gr.Group(elem_id="page_1", visible=True) as page_1: - with gr.Box(): - with gr.Row(): - start_button = gr.Button("Let's talk to chatGPT!", elem_id="start-btn", visible=True) - start_button.click(fn=None, inputs=[], outputs=[], _js=start_work) - - with gr.Group(elem_id="page_2", visible=False) as page_2: - with gr.Row(elem_id="chat_row"): - chatbot = gr.Chatbot(elem_id="chat_bot", visible=False).style(color_map=("green", "blue")) - chatbot1 = gr.Chatbot(elem_id="chat_bot1").style(color_map=("green", "blue")) - with gr.Row(elem_id="prompt_row"): - prompt_input0 = gr.Textbox(lines=2, label="prompt",show_label=False) - prompt_input1 = gr.Textbox(lines=4, label="prompt", visible=False) - chat_history = gr.Textbox(lines=4, label="prompt", visible=False) - chat_radio = gr.Radio(["Talk to chatGPT", "Text to Image"], elem_id="chat_radio",value="Talk to chatGPT", show_label=False) - with gr.Row(elem_id="btns_row"): - with gr.Column(id="submit_col"): - submit_btn = gr.Button(value = "submit",elem_id="submit-btn").style( - margin=True, - rounded=(True, True, True, True), - width=100 - ) - with gr.Column(id="clear_col"): - clear_btn = gr.Button(value = "clear outputs", elem_id="clear-btn").style( - margin=True, - rounded=(True, True, True, True), - width=100 - ) - api = gr.State(value=get_api()) - submit_btn.click(fn=chat, - inputs=[api, prompt_input0, prompt_input1, chat_radio, chat_history], - outputs=[api, chatbot, prompt_input1, chat_history], - ) - with gr.Row(elem_id='tab_img', visible=False).style(height=5): - tab_img = gr.TabbedInterface(tab_actions, tab_titles) - -demo.launch(debug = True) diff --git a/spaces/zomehwh/vits-models-genshin-bh3/text/symbols.py b/spaces/zomehwh/vits-models-genshin-bh3/text/symbols.py deleted file mode 100644 index edfbd24247be8c757275ce80b9ec27a0ffa808f3..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/vits-models-genshin-bh3/text/symbols.py +++ /dev/null @@ -1,39 +0,0 @@ -''' -Defines the set of symbols used in text input to the model. -''' - -'''# japanese_cleaners -_pad = '_' -_punctuation = ',.!?-' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ ' -''' - -'''# japanese_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ ' -''' - -'''# korean_cleaners -_pad = '_' -_punctuation = ',.!?…~' -_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ ' -''' - -'''# chinese_cleaners -_pad = '_' -_punctuation = ',。!?—…' -_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ ' -''' - -# zh_ja_mixture_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ ' - - -# Export all symbols: -symbols = [_pad] + list(_punctuation) + list(_letters) - -# Special symbol ids -SPACE_ID = symbols.index(" ") \ No newline at end of file diff --git a/spaces/zomehwh/vits-models-pcr/transforms.py b/spaces/zomehwh/vits-models-pcr/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/vits-models-pcr/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet