diff --git a/spaces/0x7194633/nllb-1.3B-demo/README.md b/spaces/0x7194633/nllb-1.3B-demo/README.md deleted file mode 100644 index 220c462acad1cc5735b01872cabcbc27753ec1e8..0000000000000000000000000000000000000000 --- a/spaces/0x7194633/nllb-1.3B-demo/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Nllb Translation Demo -emoji: 👀 -colorFrom: indigo -colorTo: green -sdk: gradio -sdk_version: 3.0.26 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/0xHacked/zkProver/app.py b/spaces/0xHacked/zkProver/app.py deleted file mode 100644 index bbd65e0a8d179ace2fb755f18e0f695e33f7325f..0000000000000000000000000000000000000000 --- a/spaces/0xHacked/zkProver/app.py +++ /dev/null @@ -1,77 +0,0 @@ -import os -import tempfile -import uuid -import subprocess -import gradio as gr - - -BIN = os.path.join(os.path.dirname(__file__), "bin", "zkProver_linux_gpu") - - -def run_zk_prover(network, block_number, contract, file): - if not contract: - raise gr.Error("contract is required") - if not file: - raise gr.Error('file is required') - args = [ - BIN, - "evm", "-r", "https://rpc.flashbots.net/" - ] - if block_number: - args.extend(["-b", str(block_number)]) - proof_path = "/tmp/" + str(uuid.uuid4()) + ".bin" - args.extend(["-o", proof_path]) - - args.append(file.name + ":" + contract) - - proc = subprocess.Popen(args,) - proc.wait() - - if proc.returncode != 0: - raise gr.Error("generate proof failed") - return proof_path - - -with gr.Blocks() as demo: - gr.Markdown( - """ - # 0xHacked - This is the demo for [0xHacked](https://0xHacked.com), a trustless bug bounty platform. You can generate the proof of exploit here. However, due to the constraints of ZKP, the generation might be low on Huggingface. -
- We recommend [compiling it from the source](https://github.com/0xHackedLabs/zkProver). The generation can be very quick on GPU. For more details, please refer to [0xHacked Documentation](https://docs.0xHacked.com). -
- The sample PoC provided below takes ~800s to generate the proof. You can click "SushiRouterExploit.sol" below and hit "Run" to try it! - """ - ) - with gr.Column(): - with gr.Row(): - with gr.Column(): - network_input = gr.Dropdown(["Ethereum"], value="Ethereum", label='Network') - block_number_input = gr.Number(precision=0, label='Block Number') - contract_input = gr.Textbox(label='Poc Contract') - file_input = gr.File(file_types=[".sol"], label='Solidity File') - submit_btn = gr.Button(label="Submit") - with gr.Column(): - fileout = gr.File(label='Proof File') - - gr.Examples( - examples=[[ - "Ethereum", - 17007841, - "SushiExpProxy", - "./examples/SushiRouterExploit.sol"], - ], - fn=run_zk_prover, - inputs=[network_input, block_number_input, contract_input, file_input], - outputs=fileout - ) - - submit_btn.click( - fn=run_zk_prover, - inputs=[network_input, block_number_input, contract_input, file_input], - outputs=fileout - ) - -if __name__ == "__main__": - demo.launch(server_name="0.0.0.0", server_port=7860) - diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Ao No Kanata Four Rhythm Crack.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Ao No Kanata Four Rhythm Crack.md deleted file mode 100644 index a3ad5fb2aeb5746fdf905ae2e816fb9958d5dbbc..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Ao No Kanata Four Rhythm Crack.md +++ /dev/null @@ -1,32 +0,0 @@ - -

Ao no Kanata no Four Rhythm: A Visual Novel That Soars High

-

Ao no Kanata no Four Rhythm (also known as Aokana: Four Rhythm Across the Blue) is a visual novel developed by sprite and released in 2014. It is set in a world where flying is as simple as riding a bicycle, thanks to the invention of anti-gravitational shoes known as Grav-Shoes. The game follows the protagonist, Masaya Hinata, a former competitor in a sport called Flying Circus, who regains his passion for flying when he meets the transfer student Asuka Kurashina. Together with their friends, they join the Kunahama High School Flying Circus club and aim for the top of the national tournament.

-

ao no kanata four rhythm crack


DOWNLOAD ☆☆☆ https://byltly.com/2uKxag



-

The game features four main heroines, each with their own route and story. They are:

- -

The game has been praised for its beautiful graphics, engaging gameplay, and emotional story. It has also been adapted into an anime series in 2016 and a manga series in 2015. The game has been released in English by NekoNyan Ltd. and HIKARI FIELD in 2019, with an 18+ DLC available for free on NekoNyanSoft shop. However, the game has mosaics censorship, which may disappoint some fans of the genre.

-

If you are looking for a visual novel that combines romance, comedy, drama, and sports, you may want to give Ao no Kanata no Four Rhythm a try. You can download the game from Steam or NekoNyanSoft shop, and enjoy the thrilling experience of flying with your favorite heroine.

-

- -

One of the main attractions of Ao no Kanata no Four Rhythm is its gameplay, which simulates the Flying Circus matches in a 3D environment. The player can choose to control Masaya or one of the heroines, and compete against various opponents in different modes, such as time attack, point match, or survival. The player can also customize their Grav-Shoes and outfits, and unlock new skills and abilities as they progress through the game.

-

The gameplay is fast-paced and exhilarating, requiring the player to master the basics of flying, such as acceleration, turning, braking, and drifting. The player also has to use their tactics and reflexes to dodge attacks, counterattack, and perform special moves. The game offers multiple difficulty levels and adjustable settings, making it accessible for both beginners and veterans. The game also supports online multiplayer mode, where the player can challenge other players from around the world.

-

The game has received overwhelmingly positive reviews from both critics and users on Steam[^1^], who praised its gameplay, graphics, music, voice acting, and story. Some of the common compliments include:

-
-

"One of the best visual novels I've ever played. The story is engaging, the characters are lovable, the art is gorgeous, and the gameplay is addictive."

-

"A masterpiece of a visual novel. The gameplay is fun and challenging, the story is emotional and captivating, and the music is beautiful and fitting."

-

"A visual novel that transcends its genre. The gameplay is not just a gimmick, but an integral part of the story and character development. The story is not just a romance, but a journey of growth and friendship."

-
-

However, the game is not without its flaws. Some of the common criticisms include:

-
-

"The game has mosaics censorship, which ruins the immersion and quality of the 18+ scenes."

-

"The game has some bugs and glitches, such as crashes, freezes, or missing text."

-

"The game has some translation errors and typos, such as grammar mistakes, inconsistent names, or wrong choices."

-
-

Despite these issues, most reviewers agree that Ao no Kanata no Four Rhythm is a visual novel worth playing for its unique gameplay and compelling story. If you are a fan of visual novels or flying games, you should not miss this gem.

81aa517590
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Crack _BEST_ Vba Project Password Recovery 13.md b/spaces/1gistliPinn/ChatGPT4/Examples/Crack _BEST_ Vba Project Password Recovery 13.md deleted file mode 100644 index 0858f8cb688935c2ef29b9359b01ac567ccb4661..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Crack _BEST_ Vba Project Password Recovery 13.md +++ /dev/null @@ -1,50 +0,0 @@ -
-

How to Crack VBA Project Password Recovery 13 in Excel

-

If you have ever worked with VBA macros in Excel, you might have encountered a situation where you need to access or modify the code of a locked VBA project. This can happen when you inherit a workbook from someone else, or when you forget your own password. In this article, we will show you how to crack VBA project password recovery 13 in Excel using different methods.

-

What is VBA Project Password Recovery 13?

-

VBA project password recovery 13 is a term that refers to the process of unlocking a VBA project that is protected by a password in Excel. A VBA project is a collection of modules, forms, and classes that contain the code for your macros. You can protect your VBA project from unauthorized access or modification by setting a password in the VBA editor.

-

crack vba project password recovery 13


Download File →→→ https://imgfil.com/2uy1zd



-

However, sometimes you might need to crack the password for various reasons, such as:

- -

There are different ways to crack VBA project password recovery 13 in Excel, depending on the file format and the version of Excel you are using. We will cover some of the most common and effective methods in the following sections.

-

How to Crack VBA Project Password Recovery 13 for Older .XLS Files

-

If you are working with an older .XLS file (Excel 97-2003 format), you can use a simple hex editing technique to crack the password. Hex editing is a method of modifying the binary data of a file using a hexadecimal editor. You can use any hex editor software for this purpose, such as HxD or Notepad++.

-

Here are the steps to crack VBA project password recovery 13 for older .XLS files:

-
    -
  1. Open the .XLS file in your hex editor.
  2. -
  3. Search for the text "DPB=" (without quotes) in the file. You should find it just above "[Host Extender Info]".
  4. -
  5. Change "DPB=" to "DPx=" (without quotes) and save the file.
  6. -
  7. Open the file in Excel and click Yes if you see a warning message about repairing the file.
  8. -
  9. Open the VBA editor (Alt+F11) and click OK if you see a warning message about opening the project.
  10. -
  11. Right-click the VBA project name, select Properties, go to the Protection tab and delete the existing passwords as well as uncheck the Lock project for viewing checkbox.
  12. -
  13. Re-check the Lock project for viewing checkbox and add your own memorable password. Click OK and save the file.
  14. -
-

Congratulations! You have successfully cracked VBA project password recovery 13 for older .XLS files. You can now access and modify the code of your macros as you wish.

-

How to Crack VBA Project Password Recovery 13 for Newer .XLSM Files

-

If you are working with a newer .XLSM file (Excel 2007 or later format), you can use a different technique that involves changing the file extension and extracting a binary file. A binary file is a file that contains data in a binary format, which can be read by computers but not by humans. You can use any archiver software for this purpose, such as WinRAR or 7-Zip.

-

Here are the steps to crack VBA project password recovery 13 for newer .XLSM files:

-

-
    -
  1. Change the file extension of your .XLSM file to .ZIP. For example, if your file name is "MyWorkbook.xlsm", change it to "MyWorkbook.zip".
  2. -
  3. Open the .ZIP file in your archiver software and navigate to the "xl" folder inside it.
  4. -
  5. Extract the "vbaProject.bin" file from the "xl" folder to your desired location.
  6. -
  7. Perform steps #1-3 from the previous section (for older .XLS files) with "vbaProject.bin" instead of your original .XLSM file.
  8. -
  9. Replace the old "vbaProject.bin" file in the .ZIP file with the new hex edited version.
  10. -
  11. Change the file extension of your .ZIP file back to .XLSM. For example, if your file name is "MyWorkbook.zip", change it back to "MyWorkbook.xlsm".
  12. -
  13. Perform steps #4-7 from the previous section (for older .XLS files) with your original .XLSM file instead of "vbaProject.bin".
  14. -
-

Congratulations! You have successfully cracked VBA project password recovery 13 for newer .XLSM files. You can now access and modify the code of your macros as you wish.

- -

Conclusion

- -

In this article, we have shown you how to crack VBA project password recovery 13 in Excel using different methods. We hope this article was helpful and informative for you. However, we also advise you to use these methods responsibly and ethically, and not to violate any intellectual property rights or privacy policies of others. Remember that cracking passwords is not always legal or ethical, so use these methods at your own risk and discretion.

-

Conclusion

- -

In this article, we have shown you how to crack VBA project password recovery 13 in Excel using different methods. We hope this article was helpful and informative for you. However, we also advise you to use these methods responsibly and ethically, and not to violate any intellectual property rights or privacy policies of others. Remember that cracking passwords is not always legal or ethical, so use these methods at your own risk and discretion.

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Parking Multiplayer v4.8.8.3 Mod Apk - Unlock All Cars and Maps for Free.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Parking Multiplayer v4.8.8.3 Mod Apk - Unlock All Cars and Maps for Free.md deleted file mode 100644 index 446d851600f1fe198d5c30141f1b8a43e4cbc0af..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Parking Multiplayer v4.8.8.3 Mod Apk - Unlock All Cars and Maps for Free.md +++ /dev/null @@ -1,105 +0,0 @@ - -

Car Parking Mod APK 4.8.8.3: A Realistic and Fun Driving Simulator

-

Do you love driving cars and parking them in challenging spots? Do you want to experience the thrill of driving different types of vehicles and customizing them to your liking? Do you want to play with your friends and compete with other players online? If you answered yes to any of these questions, then you should try Car Parking Mod APK 4.8.8.3, a modified version of the popular Car Parking game that offers unlimited money, all cars unlocked, multiplayer mode, and more.

-

What is Car Parking Mod APK 4.8.8.3?

-

Car Parking is a realistic and fun driving simulator game that tests your skills in parking various cars in different scenarios. You can choose from over 100 cars, ranging from sedans, SUVs, sports cars, trucks, buses, and even police cars. You can also customize your cars with different colors, wheels, spoilers, stickers, and more.

-

car parking mod apk 4.8.8.3


Download Filehttps://urlin.us/2uSTZM



-

The game has over 200 levels, each with its own difficulty and objectives. You have to park your car in the designated spot without hitting any obstacles or other cars. You also have to follow the traffic rules and signals, such as speed limits, stop signs, traffic lights, etc.

-

Car Parking Mod APK 4.8.8.3 is a modified version of the original game that gives you access to unlimited money, all cars unlocked, multiplayer mode, and more features that make the game more enjoyable and exciting.

-

Features of Car Parking Mod APK 4.8.8.3

-

Unlimited Money

-

With Car Parking Mod APK 4.8.8.3, you don't have to worry about running out of money to buy new cars or upgrade them. You can get unlimited money by completing levels or by using the in-game shop.

-

All Cars Unlocked

-

With Car Parking Mod APK 4.8.8.3, you don't have to wait to unlock new cars or spend money to buy them. You can access all the cars in the game from the start and choose whichever one you like.

-

Multiplayer Mode

-

With Car Parking Mod APK 4.8.8.3, you don't have to play alone or against the computer. You can play with your friends or other players online in multiplayer mode. You can join or create rooms and chat with other players while playing.

-

car parking multiplayer mod apk v4.8.8.3 unlimited money
-car parking multiplayer mod apk v4.8.8.3 download for android
-car parking multiplayer mod apk v4.8.8.3 latest version
-car parking multiplayer mod apk v4.8.8.3 free shopping
-car parking multiplayer mod apk v4.8.8.3 unlocked all cars
-car parking multiplayer mod apk v4.8.8.3 no root
-car parking multiplayer mod apk v4.8.8.3 online
-car parking multiplayer mod apk v4.8.8.3 hack
-car parking multiplayer mod apk v4.8.8.3 gameplay
-car parking multiplayer mod apk v4.8.8.3 review
-car parking multiplayer mod apk v4.8.8.3 update
-car parking multiplayer mod apk v4.8.8.3 new features
-car parking multiplayer mod apk v4.8.8.3 cheats
-car parking multiplayer mod apk v4.8.8.3 how to install
-car parking multiplayer mod apk v4.8.8.3 best settings
-car parking multiplayer mod apk v4.8.8.3 tips and tricks
-car parking multiplayer mod apk v4.8.8.3 custom cars
-car parking multiplayer mod apk v4.8.8.3 realistic graphics
-car parking multiplayer mod apk v4.8.8.3 high quality open world
-car parking multiplayer mod apk v4.8.8.3 fun gameplay
-car parking multiplayer mod apk v4.8.8.3 82 real-life challenges
-car parking multiplayer mod apk v4.8.8.3 different vehicles
-car parking multiplayer mod apk v4.8.8.3 interior buildings
-car parking multiplayer mod apk v4.8.8.3 dynamic vinyls
-car parking multiplayer mod apk v4 . 88 . 83 vision auto tungsten
-car parking multiplayer mod apk v4 . 88 . 83 highly detailed environments
-car parking multiplayer mod apk v4 . 88 . 83 100 cars with real interiors
-car parking multiplayer mod apk v4 . 88 . 83 16 player skins
-car parking multiplayer mod apk v4 . 88 . 83 trailers, pickups, trucks, sports and classic cars
-car parking multiplayer mod apk v4 . 88 . 83 voice chat with friends
-car parking multiplayer mod apk v4 . 88 . 83 exchange cars with real players
-car parking multiplayer mod apk v4 . 88 . 83 police mode with sirens and radar
-car parking multiplayer mod apk v4 . 88 . 83 manual transmission with clutch and gear shift
-car parking multiplayer mod apk v4 . 88 . 83 engine tuning and performance upgrades
-car parking multiplayer mod apk v4 . 88 . 83 body parts and accessories customization
-car parking multiplayer mod apk v4 . 88 .  83 realistic physics and sound effects

-

Customization Options

-

With Car Parking Mod APK 4.8.8.3, you don't have to settle for the default appearance of your cars. You can customize them with different colors, wheels, spoilers, stickers, and more.

-

Realistic Physics and Graphics

-

With Car Parking Mod APK 4.8.8.3, you don't have to compromise on the quality of the game's physics and graphics. The game has realistic physics that simulate the behavior of real cars and their interaction with the environment.

-

The game also has high-quality graphics that create a realistic and immersive atmosphere for the game.

-

How to Download and Install Car Parking Mod APK 4.8.8.3?

-

If you want to download and install Car Parking Mod APK 4 .8.3, you need to follow these simple steps:

-

Step 1: Enable Unknown Sources

-

Before you can install any modded or third-party app on your Android device, you need to enable the option of unknown sources in your settings. This will allow you to install apps from sources other than the Google Play Store.

-

To enable unknown sources, go to your device's settings, then security, then unknown sources, and toggle it on.

-

Step 2: Download the APK File

-

Next, you need to download the APK file of Car Parking Mod APK 4.8.8.3 from a reliable and trusted source. You can use the link below to download the file directly to your device.

-

Download Car Parking Mod APK 4.8.8.3 here

-

Step 3: Install the APK File

-

Once you have downloaded the APK file, you need to install it on your device. To do this, locate the file in your device's storage and tap on it. You will see a prompt asking you to confirm the installation. Tap on install and wait for the process to finish.

-

Step 4: Enjoy the Game

-

After the installation is complete, you can launch the game from your app drawer or home screen and enjoy the game with all its features.

-

Pros and Cons of Car Parking Mod APK 4.8.8.3

-

Car Parking Mod APK 4.8.8.3 is a great game for anyone who loves driving and parking games, but it also has some pros and cons that you should be aware of before playing it.

-

Pros

- -

Cons

- -

Conclusion

-

Car Parking Mod APK 4.8.8.3 is a realistic and fun driving simulator game that tests your skills in parking various cars in different scenarios. The game has unlimited money, all cars unlocked, multiplayer mode, and more features that make it more enjoyable and exciting. The game also has realistic physics and graphics that create a realistic and immersive atmosphere for the game.

-

If you are looking for a driving and parking game that offers a lot of variety, challenge, and fun, then you should try Car Parking Mod APK 4.8.8.3. You can download and install it easily by following the steps above.

-

I hope you enjoyed reading this article and found it helpful. If you have any questions or feedback about Car Parking Mod APK 4.8.8.3, feel free to leave a comment below.

-

Frequently Asked Questions

-
    -
  1. What is the difference between Car Parking Mod APK 4.8.8.3 and Car Parking Multiplayer?
  2. -

    Car Parking Mod APK 4.8.8.3 is a modified version of Car Parking Multiplayer that offers unlimited money, all cars unlocked, multiplayer mode, and more features that make the game more enjoyable and exciting.

    -
  3. Is Car Parking Mod APK 4.8.8.3 safe to download and install?
  4. -

    Yes, Car Parking Mod APK 4.8.8.3 is safe to download and install as long as you use a reliable and trusted source like the one provided in this article.

    -
  5. How can I play Car Parking Mod APK 4 .8.8.3 with my friends?
  6. -

    You can play Car Parking Mod APK 4.8.8.3 with your friends by using the multiplayer mode. You can join or create rooms and chat with your friends while playing.

    -
  7. How can I customize my cars in Car Parking Mod APK 4.8.8.3?
  8. -

    You can customize your cars in Car Parking Mod APK 4.8.8.3 by using the customization options. You can change the color, wheels, spoilers, stickers, and more of your cars.

    -
  9. How can I get more money in Car Parking Mod APK 4.8.8.3?
  10. -

    You can get more money in Car Parking Mod APK 4.8.8.3 by completing levels or by using the in-game shop.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Highway Racing APK Hackeado The Best Racing Game on Android.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Highway Racing APK Hackeado The Best Racing Game on Android.md deleted file mode 100644 index 8ff5d2c0227f9db04f00ad1ee8a663301ec73ac7..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Highway Racing APK Hackeado The Best Racing Game on Android.md +++ /dev/null @@ -1,82 +0,0 @@ - -

CarX Highway Racing APK Hackeado: How to Download and Play

-

If you are a fan of car racing games, you might have heard of CarX Highway Racing. It is a realistic and thrilling game that lets you drive fast cars on highways, evade the police, and compete with other racers. But what if you want to enjoy the game without spending money or worrying about the limitations? That's where CarX Highway Racing APK Hackeado comes in. In this article, we will tell you what CarX Highway Racing is, how to download and install CarX Highway Racing APK Hackeado, why you should play it, and some tips and tricks to help you win the races.

-

What is CarX Highway Racing?

-

CarX Highway Racing is a game developed by CarX Technologies, a company that specializes in creating realistic car physics and graphics. The game was released in 2017 for Android and iOS devices, and has since gained millions of downloads and positive reviews. The game features over 40 different cars, from sports cars to muscle cars, that you can customize and upgrade. You can also choose from various game modes, such as campaign, time attack, survival, and online multiplayer. The game also has realistic weather effects, day and night cycles, and dynamic traffic.

-

carx highway racing apk hackeado


DOWNLOADhttps://urlin.us/2uT1Ws



-

Features of CarX Highway Racing

-

Some of the features that make CarX Highway Racing stand out from other car racing games are:

- -

How to download and install CarX Highway Racing APK Hackeado

-

If you want to play CarX Highway Racing without any restrictions or costs, you can download and install CarX Highway Racing APK Hackeado. This is a modified version of the game that gives you unlimited money, gold, fuel, and energy. You can use these resources to buy any car you want, upgrade it to the max level, and unlock all the tracks and modes. Here are the steps to download and install CarX Highway Racing APK Hackeado:

-
    -
  1. Go to [this link](^1^) and download the CarX Highway Racing APK Hackeado file.
  2. -
  3. Go to your device settings and enable the installation of apps from unknown sources.
  4. -
  5. Locate the downloaded file in your file manager and tap on it to install it.
  6. -
  7. Launch the game and enjoy!
  8. -
-

Why play CarX Highway Racing APK Hackeado?

-

CarX Highway Racing APK Hackeado is not only a fun and exciting game but also a great way to improve your driving skills and reflexes. Here are some of the benefits of playing CarX Highway Racing APK Hackeado:

-

Benefits of CarX Highway Racing APK Hackeado

- -

Tips and tricks for playing CarX Highway Racing APK Hackeado

-

To make the most out of CarX Highway Racing APK Hackeado, here are some tips and tricks that you can follow:

- -

Conclusion

-

CarX Highway Racing APK Hackeado is a great way to enjoy CarX Highway Racing without any limitations or costs. You can download and install CarX Highway Racing APK Hackeado easily and safely from [this link]. You can also benefit from playing CarX Highway Racing APK Hackeado by saving money and time, experimenting and having fun, and learning and improving. You can also follow our tips and tricks to win the races and become the best racer on the highway.

-

FAQs

-

Here are some of the frequently asked questions about CarX Highway Racing APK Hackeado:

-

carx highway racing mod apk unlimited money
-carx highway racing hack apk download
-carx highway racing apk mod menu
-carx highway racing cheats apk
-carx highway racing mod apk latest version
-carx highway racing hack apk android
-carx highway racing mod apk obb
-carx highway racing apk hack 2023
-carx highway racing mod apk revdl
-carx highway racing hack apk ios
-carx highway racing mod apk rexdl
-carx highway racing hack apk 1.74.8
-carx highway racing mod apk offline
-carx highway racing hack apk free download
-carx highway racing mod apk no root
-carx highway racing hack apk online
-carx highway racing mod apk data
-carx highway racing hack apk unlimited gold
-carx highway racing mod apk all cars unlocked
-carx highway racing hack apk no verification
-carx highway racing mod apk pure
-carx highway racing hack apk 2022
-carx highway racing mod apk happymod
-carx highway racing hack apk latest
-carx highway racing mod apk android 1

-
    -
  1. Is CarX Highway Racing APK Hackeado safe to download and install?
  2. -

    Yes, CarX Highway Racing APK Hackeado is safe to download and install. It does not contain any viruses, malware, or spyware. It also does not require any root or jailbreak access to your device.

    -
  3. Is CarX Highway Racing APK Hackeado compatible with my device?
  4. -

    CarX Highway Racing APK Hackeado is compatible with most Android devices that have Android 4.1 or higher. However, some devices may experience performance issues or crashes due to hardware limitations or compatibility issues.

    -
  5. How do I update CarX Highway Racing APK Hackeado?
  6. -

    To update CarX Highway Racing APK Hackeado, you need to download and install the latest version of CarX Highway Racing APK Hackeado from [this link]. You do not need to uninstall the previous version of CarX Highway Racing APK Hackeado before installing the new one.

    -
  7. Can I play CarX Highway Racing APK Hackeado online with other players?
  8. -

    Yes, you can play CarX Highway Racing APK Hackeado online with other players who have CarX Highway Racing APK Hackeado installed on their devices. However, you may not be able to play online with players who have the original version of CarX Highway Racing installed on their devices.

    -
  9. Can I get banned for playing CarX Highway Racing APK Hackeado?
  10. -

    No, you will not get banned for playing CarX Highway Racing APK Hackeado. The game does not have any anti-cheat system or detection mechanism that can identify or ban players who use CarX Highway Racing APK Hackeado.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Street The Best Street Racing Game for Android Users.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Street The Best Street Racing Game for Android Users.md deleted file mode 100644 index f208549dae7f4e740e8b2364f6fe20d4422191b5..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Street The Best Street Racing Game for Android Users.md +++ /dev/null @@ -1,130 +0,0 @@ -
-

How to Download CarX Street on Android

-

If you are looking for a thrilling racing game that lets you explore a dynamic open world, customize your car, and compete against other players, then you should check out CarX Street. This game is developed by CarX Technologies, the makers of CarX Drift Racing 2, and it offers realistic races on highways and city streets, plus top-speed drift races.

-

In this article, we will show you how to download CarX Street on your Android device and how to play it like a pro. You will learn about the game's features, how to install it from different sources, how to start your racing career, and how to improve your skills and performance.

-

how to download carx street on android


Download » https://urlin.us/2uSWJ0



-

What is CarX Street?

-

CarX Street is a free racing game that was released in 2023 as an open beta test. The game is set in Sunset City, a huge open world that you can explore freely with your car. You can join clubs, challenge bosses, buy houses for your cars, and collect parts for your vehicle.

-

The game features realistic physics and controls that make you feel like you are driving a real car. You can customize your car with various parts and accessories, such as mirrors, headlights, lights, skirt, bumper, rims, and more. You can also swap your engine for a different one and upgrade your engine, transmission, body, suspension, and tires.

-

The game has different modes of racing, such as highways, city streets, and drift zones. You can race against other players online or offline, or just cruise around the city at any time of day or night. The game has a dynamic day/night cycle that changes the gameplay and the graphics of the game.

-

How to Download and Install CarX Street on Android

There are two ways to download and install CarX Street on your Android device: from the Google Play Store or from APK sites. Here are the steps for each method:

-

Downloading from the Google Play Store

-

The easiest and safest way to get CarX Street on your Android device is to download it from the Google Play Store. Here is how to do it:

-
    -
  1. Open the Google Play Store app on your device and search for "CarX Street".
  2. -
  3. Tap on the game icon and then tap on the "Install" button.
  4. -
  5. Wait for the game to download and install on your device. You may need to grant some permissions for the game to run properly.
  6. -
  7. Once the game is installed, you can launch it from your app drawer or home screen.
  8. -
-

Downloading from APK Sites

-

If you cannot access the Google Play Store or you want to try a different version of the game, you can also download CarX Street from APK sites. However, this method is riskier and may expose your device to malware or viruses. Here are some tips on how to do it safely:

- types of cars available in the game, such as muscle cars, sports cars, supercars, and hypercars. Each car has different stats and performance, such as speed, acceleration, handling, and drift. You can also see the price and the rarity of each car.

-

How to install carx street on android device
-Carx street android download guide
-Steps to download carx street for android phone
-Carx street apk download for android
-How to get carx street on android tablet
-Carx street android game free download
-Download carx street from google play store
-How to play carx street on android
-Carx street android beta download
-Carx street open world racing game for android
-How to update carx street on android
-Carx street android download link
-Carx street android requirements and compatibility
-How to fix carx street download error on android
-Carx street android gameplay and review
-Download carx street mod apk for android
-How to uninstall carx street from android
-Carx street android tips and tricks
-Carx street android latest version download
-How to backup carx street data on android
-Carx street android offline download
-How to join carx street beta test on android
-Carx street android cheats and hacks
-Download carx street xapk file for android
-How to transfer carx street progress to another android device
-Carx street android graphics settings and optimization
-How to stream carx street on android
-Carx street android controller support and configuration
-Download carx street obb file for android
-How to change language in carx street on android
-Carx street android multiplayer mode and features
-How to redeem codes in carx street on android
-Carx street android best cars and tuning options
-Download carx street from apkcombo for android
-How to record carx street gameplay on android
-Carx street android sound and music settings
-How to contact carx street support service on android
-Carx street android in-app purchases and prices
-Download carx street from filehippo for android
-How to enable notifications for carx street on android
-Carx street android drift mode and controls
-How to clear cache for carx street on android
-Carx street android clubs and challenges
-Download carx street from apkpure for android
-How to share carx street screenshots on android
-Carx street android gas stations and fuel types
-How to disable ads in carx street on android
-Carx street android license agreement and privacy policy
-Download carx street from uptodown for android
-How to rate and review carx street on google play store

-

You can customize your car with various parts and accessories, such as mirrors, headlights, lights, skirt, bumper, rims, and more. You can also change the color, the vinyl, the license plate, and the stickers of your car. To customize your car, you need to use coins or diamonds, which are the in-game currencies.

-

Joining a Club

-

One of the best features of CarX Street is that you can join or create a club with other players. A club is a group of racers that share a common name, logo, and chat. You can join a club by searching for its name or by accepting an invitation from another player. You can also create your own club by choosing a name, a logo, and a description.

-

Joining a club has many benefits for your racing career. You can chat with other club members, share tips and tricks, and challenge them to friendly races. You can also participate in club events and missions, which are special races that reward you with coins, diamonds, parts, and reputation points. Reputation points are used to rank up your club and unlock new perks and rewards.

-

Racing Against Other Players

-

The main mode of CarX Street is racing against other players online or offline. You can choose from different modes of racing, such as highways, city streets, and drift zones. Each mode has different rules and objectives, such as reaching the finish line first, earning the most points by drifting, or escaping from the police.

-

To start a race, you need to open the map and select a location. You can see the difficulty level, the entry fee, and the reward for each location. You can also see the number of players online and offline in each location. You can join an existing race or create your own race by choosing the number of laps, the time limit, and the weather conditions.

-

Once you join a race, you need to use your skills and strategy to win. You can use the gas pedal, the brake pedal, the handbrake, and the nitro boost to control your car. You can also use the steering wheel or the tilt option to steer your car. You need to avoid crashing into obstacles or other cars, as this will damage your car and slow you down.

-

Drifting

-

One of the most fun and challenging aspects of CarX Street is drifting. Drifting is a technique that involves sliding your car sideways while maintaining control and speed. Drifting is useful for taking sharp turns without losing momentum and for earning points and rewards.

-

To drift in CarX Street, you need to use the handbrake or the brake pedal while turning your car. You need to balance your throttle and steering to maintain your drift angle and direction. You also need to avoid hitting walls or other cars while drifting, as this will end your drift combo.

-

Drifting is important for earning points and rewards in CarX Street. The more you drift, the more points you get. The points are multiplied by your drift combo, which is the number of consecutive drifts you perform without interruption. The points are also affected by your drift angle, speed, distance, and style.

-

You can use your drift points to buy new cars or parts for your car. You can also use them to unlock new locations or events in the game. Drifting is also essential for completing some missions or challenges in the game.

How to Improve Your Skills and Performance in CarX Street

-

If you want to become a better racer and a legend of Sunset City, you need to improve your skills and performance in CarX Street. Here are some advanced tips on how to do that:

-

Upgrading Your Car

-

One of the best ways to improve your car's performance is to upgrade it with part tuning. Part tuning is a feature that allows you to unlock the full potential of your car and improve its engine, transmission, body, suspension, and tires. You can access part tuning from the garage menu.

-

To use part tuning, you need to have parts for your car. You can get parts by winning races, completing missions, or buying them with coins or diamonds. You can also get parts by dismantling other cars or parts that you don't need.

-

Once you have parts, you can use them to upgrade your car's stats. You can see the current and the maximum stats of your car on the part tuning screen. You can also see the effect of each part on your car's performance. You can upgrade your car's stats up to 100%, but you need to have enough parts and coins for that.

-

Swapping Your Engine

-

Another way to improve your car's performance is to swap your engine for a different one. Engine swapping is a feature that allows you to change your car's engine type and power. You can access engine swapping from the garage menu.

-

To use engine swapping, you need to have engines for your car. You can get engines by winning races, completing missions, or buying them with coins or diamonds. You can also get engines by dismantling other cars or engines that you don't need.

-

Once you have engines, you can use them to swap your car's engine. You can see the current and the available engines for your car on the engine swapping screen. You can also see the effect of each engine on your car's performance. You can swap your car's engine as many times as you want, but you need to have enough engines and coins for that.

-

Engine swapping has advantages and disadvantages for your car's performance. Some engines may increase your car's speed, acceleration, or drift, but they may also decrease your car's handling, stability, or fuel efficiency. You need to choose the engine that suits your racing style and preference.

-

Using the Right Fuel

-

A third way to improve your car's performance is to use the right fuel for your car. Fuel is a resource that affects your car's speed, acceleration, and nitro boost. You can see your car's fuel level on the top left corner of the screen during a race.

-

To use fuel, you need to have fuel for your car. You can get fuel by visiting gas stations in the city or by buying them with coins or diamonds. You can also get fuel by completing missions or challenges in the game.

-

Once you have fuel, you can use it to fill up your car's tank. You can see the current and the maximum fuel level of your car on the fuel screen. You can also see the effect of each fuel type on your car's performance. You can fill up your car's tank as much as you want, but you need to have enough fuel and coins for that.

-

Fuel has different types and qualities that affect your car's performance. Some fuel types may increase your car's speed, acceleration, or nitro boost, but they may also decrease your car's handling, stability, or durability. You need to choose the fuel type that suits your racing style and preference.

-

Racing at Different Times of Day

-

A fourth way to improve your skills and performance in CarX Street is to race at different times of day. The game has a dynamic day/night cycle that changes the gameplay and the graphics of the game.

-

The time of day affects the visibility, the traffic, and the difficulty of the races. During the day, you can see more clearly, but there are more cars and pedestrians on the road. During the night, you can see less clearly, but there are fewer cars and pedestrians on the road.

-

The time of day also affects the atmosphere and the mood of the game. During the day, you can enjoy the bright colors and the sunny weather of Sunset City. During the night, you can admire the neon lights and the dark sky of Sunset City.

-

You can change the time of day by using a clock icon on the map screen. You can choose between morning, afternoon, evening, and night. You can also let the time of day change naturally as you play.

-

Conclusion

-

CarX Street is an amazing racing game that lets you experience the thrill of racing and drifting in a realistic open world. You can download and install it on your Android device from the Google Play Store or from APK sites. You can also play it like a pro by choosing a car, joining a club, racing against other players, and drifting. You can also improve your skills and performance by upgrading your car, swapping your engine, using the right fuel, and racing at different times of day.

-

If you are a fan of racing games, you should not miss CarX Street. It is one of the best racing games for Android that offers stunning graphics, realistic physics, and endless fun. Download it now and join the millions of players who are enjoying CarX Street.

-

FAQs

-

Here are some frequently asked questions about CarX Street:

-
    -
  1. How can I get more coins and diamonds in CarX Street?
  2. -

    You can get more coins and diamonds by winning races, completing missions, participating in club events, watching ads, or buying them with real money.

    -
  3. How can I unlock new cars or parts in CarX Street?
  4. -

    You can unlock new cars or parts by earning reputation points, which are used to rank up your level and unlock new rewards. You can also buy them with coins or diamonds.

    -
  5. How can I drift better in CarX Street?
  6. -

    You can drift better by using the handbrake or the brake pedal while turning your car. You also need to balance your throttle and steering to maintain your drift angle and direction. You can also use the drift assist option to help you drift easier.

    -
  7. How can I race with my friends in CarX Street?
  8. -

    You can race with your friends by joining or creating a club and inviting them to join. You can also challenge them to friendly races or join their races from the map screen.

    -
  9. How can I change the camera view in CarX Street?
  10. -

    You can change the camera view by tapping on the camera icon on the top right corner of the screen during a race. You can choose between different views, such as cockpit, hood, bumper, chase, or far chase.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/CSR Racing 2 MOD APK The Ultimate Fast and Furious Racing Game for Android.md b/spaces/1phancelerku/anime-remove-background/CSR Racing 2 MOD APK The Ultimate Fast and Furious Racing Game for Android.md deleted file mode 100644 index 8bf1ba3c0e0349718a86d009d02d8df557180e2c..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/CSR Racing 2 MOD APK The Ultimate Fast and Furious Racing Game for Android.md +++ /dev/null @@ -1,97 +0,0 @@ -
-

CSR Racing 2 Fast and Furious Mod APK: Everything You Need to Know

-

If you are a fan of racing games, you have probably heard of CSR Racing 2, one of the most popular and realistic games in the genre. And if you are a fan of Fast and Furious, you have probably been excited by the recent collaboration between the game and the movie franchise, bringing exclusive cars and challenges to the game. But what if you want to enjoy the game without any limitations or restrictions? That's where CSR Racing 2 Mod APK comes in. In this article, we will tell you everything you need to know about this modified version of the game, how to install it, and what benefits it offers.

-

What is CSR Racing 2?

-

CSR Racing 2 is a racing game developed by NaturalMotionGames Ltd and released in 2016. It is the sequel to CSR Racing, which was released in 2012. The game features stunning graphics, realistic physics, and immersive gameplay that make you feel like you are driving a real car. You can customize your car with various parts, paint jobs, decals, and more. You can also collect and upgrade over 200 licensed cars from top brands like Ferrari, Lamborghini, Bugatti, McLaren, and more.

-

csr racing 2 fast and furious mod apk


Download Filehttps://jinyurl.com/2uNOXw



-

A realistic racing game with stunning graphics and gameplay

-

One of the main attractions of CSR Racing 2 is its graphics. The game uses a cutting-edge 3D rendering technique called PBR (Physically Based Rendering) that creates realistic lighting, shadows, reflections, and textures. The cars look amazing, with detailed interiors, exteriors, and engine sounds. The tracks are also diverse and realistic, ranging from urban streets to desert roads. The gameplay is also smooth and responsive, with easy controls and realistic physics. You can choose from different modes, such as drag races, crew battles, ladder races, regulation races, and more.

-

A huge collection of licensed cars from top brands

-

Another attraction of CSR Racing 2 is its car collection. The game features over 200 licensed cars from top brands like Ferrari, Lamborghini, Bugatti, McLaren, and more. You can collect them by winning races, opening crates, or buying them with in-game currency. You can also upgrade them with various parts, such as engines, turbochargers, nitrous oxide systems, tires, transmissions, and more. You can also customize them with various paint jobs, decals, rims, spoilers, and more.

-

A competitive online mode with real-time races and events

-

The game also has a competitive online mode where you can race against other players from around the world in real-time. You can join or create a crew with your friends or other players and compete in crew battles, leaderboards, chat rooms, and more. You can also participate in various events that offer rewards and prizes for completing missions or reaching milestones. Some of the events are seasonal or limited-time only, so you have to be quick to join them.

-

What is Fast and Furious?

-

Fast and Furious is a popular movie franchise that features street racing and heists. The franchise started in 2001 with The Fast

What is Fast and Furious?

-

Fast and Furious is a popular movie franchise that features street racing and heists. The franchise started in 2001 with The Fast and the Furious, and has since released nine more movies, with the latest one being F9: The Fast Saga. The movies star Vin Diesel, Paul Walker, Michelle Rodriguez, Tyrese Gibson, Dwayne Johnson, Jason Statham, and many other actors. The movies are known for their thrilling action scenes, exotic locations, and diverse cast of characters.

-

A popular movie franchise featuring street racing and heists

-

The main plot of the Fast and Furious movies revolves around Dominic Toretto (Vin Diesel), a former street racer who leads a crew of skilled drivers and criminals. He is often pursued by law enforcement agents, such as Brian O'Conner (Paul Walker), who later becomes his friend and ally. Together, they face various enemies and challenges, such as drug lords, terrorists, hackers, and rogue agents. Along the way, they also form a family bond with each other and their loyal friends.

-

A collaboration with CSR Racing 2 to bring exclusive cars and challenges

-

In 2019, CSR Racing 2 partnered with Fast and Furious to bring some of the iconic cars from the movies to the game. Players can race with cars such as the Toyota Supra, the Veilside Mazda RX-7, the Mitsubishi Eclipse, and many more. They can also participate in special events that are inspired by the movies, such as the Hobbs & Shaw event, the Fate of the Furious event, and the Fast & Furious Finale event. These events offer rewards and prizes for completing missions or reaching milestones.

-

A limited-time event with rewards and prizes for completing missions

-

The latest event in CSR Racing 2 is the Fast & Furious Finale event, which celebrates the release of F9: The Fast Saga. The event runs from April 15 to June 30, 2021, and features eight cars from the movie. Players can race with cars such as the Dodge Charger Daytona, the Veilside Honda S2000, Jesse's Volkswagen Jetta, and more. They can also unlock exclusive liveries, decals, and parts for their cars. The event also has a storyline that follows the movie's plot and characters.

-

csr racing 2 mod apk unlimited money and keys
-csr racing 2 fast and furious cars mod apk
-csr racing 2 mod apk latest version download
-csr racing 2 mod apk offline mode
-csr racing 2 mod apk unlimited gold and cash
-csr racing 2 fast and furious flip car mod apk
-csr racing 2 mod apk all cars unlocked
-csr racing 2 mod apk no root required
-csr racing 2 mod apk free shopping
-csr racing 2 fast and furious jetta mod apk
-csr racing 2 mod apk unlimited nitro
-csr racing 2 fast and furious supra mod apk
-csr racing 2 mod apk anti ban
-csr racing 2 mod apk unlimited fuel
-csr racing 2 fast and furious charger mod apk
-csr racing 2 mod apk mega mod
-csr racing 2 fast and furious events mod apk
-csr racing 2 mod apk unlimited rp
-csr racing 2 mod apk no ads
-csr racing 2 fast and furious skyline mod apk
-csr racing 2 mod apk obb file download
-csr racing 2 fast and furious honda s2000 mod apk
-csr racing 2 mod apk unlimited everything
-csr racing 2 mod apk high graphics
-csr racing 2 fast and furious eclipse mod apk
-csr racing 2 mod apk ios download
-csr racing 2 fast and furious plymouth roadrunner mod apk
-csr racing 2 mod apk unlocked premium features
-csr racing 2 mod apk unlimited crew points
-csr racing 2 fast and furious mitsubishi lancer evolution vii mod apk
-csr racing 2 mod apk online multiplayer
-csr racing 2 fast and furious dodge challenger srt demon mod apk
-csr racing 2 mod apk unlimited rare imports
-csr racing 2 mod apk custom cars
-csr racing 2 fast and furious nissan silvia s15 mona lisa mod apk
-csr racing 2 mod apk unlimited fusion parts
-csr racing 2 fast and furious lamborghini murcielago lp640 mod apk
-csr racing 2 mod apk unlimited legendary keys
-csr racing 2 mod apk realistic physics
-csr racing 2 fast and furious ford gt40 mk ii mod apk

-

What is CSR Racing 2 Mod APK?

-

CSR Racing 2 Mod APK is a modified version of the original game that unlocks everything. It is a way to enjoy the game without spending money or waiting for upgrades. It is also a risk-free download that does not require rooting or jailbreaking your device.

-

A modified version of the original game that unlocks everything

-

With CSR Racing 2 Mod APK, you can access all the features and content of the game without any limitations or restrictions. You can get unlimited money, keys, gold, fuel, and cash to buy or upgrade any car you want. You can also unlock all the cars, tracks, modes, events, and customizations that are otherwise locked or premium. You can also bypass any ads or verification processes that might interrupt your gameplay.

-

A way to enjoy the game without spending money or waiting for upgrades

-

One of the drawbacks of playing CSR Racing 2 is that it can be quite expensive and time-consuming to progress in the game. You need to spend real money or earn in-game currency to buy or upgrade your cars. You also need to wait for your fuel to refill or your parts to be delivered. This can be frustrating and boring for some players who want to enjoy the game without any hassle. With CSR Racing 2 Mod APK, you don't have to worry about any of these issues. You can play the game at your own pace and style without any pressure or cost.

-

A risk-free download that does not require rooting or jailbreaking your device

-

Some modded games require you to root or jailbreak your device in order to install them. This can be risky and harmful for your device's security and performance. It can also void your warranty or cause compatibility issues with other apps. With CSR Racing 2 Mod APK, you don't have to do any of these things. You can simply download the mod apk file from a trusted source and install it on your device without any problem. You can also uninstall it anytime you want without any consequences.

How to Install CSR Racing 2 Mod APK?

-

If you are interested in trying out CSR Racing 2 Mod APK, you might be wondering how to install it on your device. Don't worry, it's very easy and simple. Just follow these steps and you will be ready to race in no time.

-

Step 1: Download the mod apk file from a trusted source

-

The first thing you need to do is to download the mod apk file from a trusted source. You can find many websites that offer the mod apk file, but be careful and choose a reliable one. Some websites might have fake or malicious files that can harm your device or steal your data. To avoid any risk, you can use this link to download the mod apk file. This is a safe and verified source that has been tested and reviewed by many users.

-

Step 2: Enable unknown sources in your device settings

-

The next thing you need to do is to enable unknown sources in your device settings. This is a security feature that prevents you from installing apps that are not from the official app store. Since the mod apk file is not from the app store, you need to enable this option to install it. To do this, go to your device settings and look for the security or privacy option. Then, find the unknown sources option and toggle it on. You might see a warning message, but don't worry, it's safe to proceed.

-

Step 3: Install the mod apk file and launch the game

-

The final thing you need to do is to install the mod apk file and launch the game. To do this, go to your file manager and locate the mod apk file that you downloaded. Tap on it and follow the instructions to install it. It might take a few seconds or minutes depending on your device. Once it's done, you can launch the game from your app drawer or home screen. You will see the CSR Racing 2 logo and then the game will load. You can now enjoy the game with all the features and content unlocked.

-

Conclusion

-

CSR Racing 2 is one of the best racing games available for mobile devices. It offers realistic graphics, gameplay, and physics that make you feel like you are driving a real car. It also has a huge collection of licensed cars from top brands that you can collect, customize, and upgrade. It also has a competitive online mode where you can race against other players from around the world in real-time. And if you are a fan of Fast and Furious, you can also enjoy the exclusive cars and challenges from the movie franchise.

-

But if you want to enjoy the game without any limitations or restrictions, you can try CSR Racing 2 Mod APK. This is a modified version of the game that unlocks everything for free. You can get unlimited money, keys, gold, fuel, and cash to buy or upgrade any car you want. You can also unlock all the cars, tracks, modes, events, and customizations that are otherwise locked or premium. You can also bypass any ads or verification processes that might interrupt your gameplay.

-

Installing CSR Racing 2 Mod APK is very easy and simple. You just need to download the mod apk file from a trusted source, enable unknown sources in your device settings, and install the mod apk file on your device. You don't need to root or jailbreak your device to do this. You can also uninstall it anytime you want without any consequences.

-

If you are looking for a fun and exciting racing game that offers everything you want, CSR Racing 2 Mod APK is the perfect choice for you. Download it now and start racing with your favorite cars.

-

FAQs

-

Here are some frequently asked questions about CSR Racing 2 Mod APK:

-

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download PowerShell 2.0 and WinRM 2.0 for Windows XP and Windows Server 2003.md b/spaces/1phancelerku/anime-remove-background/Download PowerShell 2.0 and WinRM 2.0 for Windows XP and Windows Server 2003.md deleted file mode 100644 index 4dffe5cf6853f1b1f4b3a257322fd4d2ec28cc14..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download PowerShell 2.0 and WinRM 2.0 for Windows XP and Windows Server 2003.md +++ /dev/null @@ -1,176 +0,0 @@ - -

How to Download PowerShell 2.0

-

PowerShell is a task automation and configuration management program from Microsoft, consisting of a command-line shell and the associated scripting language. It allows you to perform various operations on your system, such as managing files, processes, services, registry, network, security, and more.

-

download powershell 2.0


DOWNLOAD ✏ ✏ ✏ https://jinyurl.com/2uNTng



-

PowerShell 2.0 was released in October 2009 as part of Windows Management Framework (WMF) for Windows XP, Windows Vista, Windows Server 2003, and Windows Server 2008. It introduced many new features, such as modules, remoting, background jobs, transactions, debugging, eventing, and scripting enhancements.

-

Although newer versions of PowerShell have been released since then, you might need to download PowerShell 2.0 for some reasons. For example, you might have an older script or host program that is incompatible with newer versions of PowerShell or .NET Framework. Or, you might want to run commands or scripts that are designed for PowerShell 2.0 on a different computer that has a newer version of PowerShell installed.

-

In this article, we will show you how to download and install PowerShell 2.0 on different versions of Windows, how to use PowerShell 2.0 commands and scripts, and some tips and recommendations for using PowerShell 2.0.

-

How to Install PowerShell 2.0 on Different Windows Versions

-

The installation process of PowerShell 2.0 varies depending on the version of Windows that you have. Here are the steps for installing PowerShell 2.0 on different versions of Windows:

-

Windows 8.1 and Windows 8

-

On Windows 8.1 and Windows 8, the PowerShell 2.0 Engine feature is turned on by default. However, to use it, you need to turn on the option for Microsoft .NET Framework 3.5, which it requires. This section also explains how to turn the PowerShell 2.0 Engine feature on and off.

-

How to turn on .NET Framework 3.5

-
    -
  1. On the Start screen, type Windows Features.
  2. -
  3. On the Apps bar, click Settings, and then click Turn Windows features on or off.
  4. -
  5. In the Windows Features box, click .NET Framework 3.5 (includes .NET 2.0 and 3.0) to select it. When you select .NET Framework . 3.5 (includes .NET 2.0 and 3.0), you also select Windows PowerShell 2.0 Engine and Windows PowerShell Integrated Scripting Environment (ISE) for .NET Framework 3.5.
  6. -
  7. Click OK to install the selected features. You might need to restart your computer after the installation is complete.
  8. -
-

How to turn the PowerShell 2.0 Engine feature on and off

-
    -
  1. Follow steps 1 and 2 from the previous section to open the Windows Features box.
  2. -
  3. In the Windows Features box, expand .NET Framework 3.5 (includes .NET 2.0 and 3.0), and then select or clear the check box for Windows PowerShell 2.0 Engine, depending on whether you want to turn it on or off. You can also select or clear the check box for Windows PowerShell Integrated Scripting Environment (ISE) for .NET Framework 3.5, which is a graphical user interface for writing and testing scripts.
  4. -
  5. Click OK to apply the changes. You might need to restart your computer after the changes take effect.
  6. -
-

Windows Server 2012 R2 and Windows Server 2012

-

On Windows Server 2012 R2 and Windows Server 2012, the PowerShell 2.0 Engine feature is not installed by default. However, you can add it by using the Server Manager or the Windows PowerShell cmdlets. This section also explains how to start the PowerShell 2.0 Engine.

-

How to add the PowerShell 2.0 Engine and .NET Framework 3.5 features

-
    -
  1. In Server Manager, click Add roles and features.
  2. -
  3. In the Add Roles and Features Wizard, click Next, and then click Select a server from the server pool.
  4. -
  5. Select a server where you want to install the features, and then click Next.
  6. -
  7. In the Select features page, expand .NET Framework 3.5 Features, and then select the check boxes for .NET Framework 3.5 (includes .NET 2.0 and 3.0), .NET Framework 3.5 (includes .NET 2.0 and 3.0), and .NET Framework 3.5 (includes .NET 2.0 and 3.0). When you select these features, you also select the check boxes for .NET Framework 4.5 Features, < -.NET Framework 4.5 Features, and .NET Framework 4.5 Features. You can also select or clear the check boxes for other features that you want to install or uninstall.
  8. -
  9. In the same page, expand User Interfaces and Infrastructure, and then select or clear the check box for Powershell ISE (x86), depending on whether you want to install or uninstall it. This feature is a graphical user interface for writing and testing scripts that runs on a 32-bit version of Windows PowerShell. If you want to use a 64-bit version of Windows PowerShell ISE, you do not need to install this feature.
  10. -
  11. In the same page, expand Windows PowerShell, and then select or clear the check box for Powershell ISE (x64), depending on whether you want to install or uninstall it. This feature is a graphical user interface for writing and testing scripts that runs on a 64-bit version of Windows PowerShell. If you want to use a 32-bit version of Windows PowerShell ISE, you do not need to install this feature.
  12. -
  13. In the same page, expand Powershell ISE (x64), and then select or clear the check box for < -Powershell ISE (x64), depending on whether you want to install or uninstall it. This feature is a graphical user interface for writing and testing scripts that runs on a 64-bit version of Windows PowerShell ISE. If you want to use a 32-bit version of Windows PowerShell ISE, you do not need to install this feature.
  14. -
  15. Click Next, and then click Install to start the installation. You might need to restart your computer after the installation is complete.
  16. -
-

You can also use the Windows PowerShell cmdlets to add or remove the PowerShell 2.0 Engine and .NET Framework 3.5 features. For example, you can use the following command to install both features:

-Install-WindowsFeature -Name Net-Framework-Core,PowerShell-V2 -

You can use the following command to uninstall both features:

-

How to download powershell 2.0 for windows 10
-Download powershell 2.0 for windows server 2012 r2
-Download powershell 2.0 for windows xp sp3
-Download powershell 2.0 for windows vista sp2
-Download powershell 2.0 for windows server 2008 sp2
-Download powershell 2.0 for windows server 2003 sp2
-Download powershell 2.0 and winrm 2.0 for windows update
-Download powershell 2.0 engine for windows 8.1
-Download powershell 2.0 engine for windows 8
-Download powershell 2.0 engine for windows server 2012
-Download powershell 2.0 engine for windows server 2016
-Download powershell 2.0 engine for windows server 2019
-Download powershell 2.0 engine for windows server core
-Download powershell 2.0 engine for windows management framework
-Download powershell 2.0 engine for .net framework 3.5
-Download powershell 2.0 engine for .net framework 4
-Download powershell 2.0 engine for .net core
-Download powershell 2.0 engine from microsoft learn
-Download powershell 2.0 engine from github
-Download powershell 2.0 engine from microsoft docs
-Install powershell 2.0 on windows 10
-Install powershell 2.0 on windows server 2016
-Install powershell 2.0 on windows server core
-Install powershell 2.0 on windows management framework
-Install powershell 2.0 on .net framework 3.5
-Install powershell 2.0 on .net framework 4
-Install powershell 2.0 on .net core
-Install powershell 2.0 from microsoft learn
-Install powershell 2.0 from github
-Install powershell 2.0 from microsoft docs
-Uninstall powershell 3.0 and install powershell 2.0
-Uninstall powershell 4.0 and install powershell 2.0
-Uninstall powershell 5.1 and install powershell 2.0
-Uninstall powershell core and install powershell 2.0
-Uninstall windows management framework and install powershell 2.0
-Uninstall .net framework and install powershell 2.0
-Uninstall .net core and install powershell 2.0
-Upgrade from powershell v1 to v2 download link
-Upgrade from cmd to powershell v2 download link
-Upgrade from bash to powershell v2 download link
-Run commands in compatibility mode with download link to v1 or v3+
-Run scripts in compatibility mode with download link to v1 or v3+
-Run modules in compatibility mode with download link to v1 or v3+
-Run snap-ins in compatibility mode with download link to v1 or v3+
-Run providers in compatibility mode with download link to v1 or v3+.

-Uninstall-WindowsFeature -Name Net-Framework-Core,PowerShell-V2 -

How to start the PowerShell 2.0 Engine

-

To start the PowerShell 2.0 Engine, you need to use the -Version parameter of the powershell.exe command. For example, you can use the following command to start a PowerShell session with the PowerShell 2.0 Engine:

-powershell.exe -Version 2 -

You can also use the following command to start a Windows PowerShell ISE session with the PowerShell 2.0 Engine:

-powershell_ise.exe -Version 2 -

Windows Server 2008 R2, Windows Vista, Windows Server 2003, and Windows XP

-

On Windows Server 2008 R2, Windows Vista, Windows Server 2003, and Windows XP, the PowerShell 2.0 Engine feature is not installed by default. However, you can install it by downloading and installing Windows Management Framework (WMF) 3.0, which includes PowerShell 2.0, WinRM 2.0, and WMI 2.0. This section also explains how to start the PowerShell 2.0 Engine.

-

How to install Windows Management Framework 3.0

-
    -
  1. Go to the Windows Management Framework (WMF) 3.0 download page.
  2. -
  3. Select the appropriate package for your operating system and language, and then click Download.
  4. -
  5. Run the downloaded file and follow the instructions to install WMF 3.0 on your computer. You might need to restart your computer after the installation is complete.
  6. -
-

How to start the PowerShell 2.0 Engine

-

The steps for starting the PowerShell 2.0 Engine are the same as for Windows Server 2012 R2 and Windows Server 2012. You need to use the -Version parameter of the powershell.exe or powershell_ise.exe command.

-

How to Use PowerShell 2.0 Commands and Scripts

-

Once you have installed and started the PowerShell 2.0 Engine, you can use it to run commands and scripts that are designed for PowerShell 2.0. Here are some tips and examples for using PowerShell 2.0 commands and scripts:

-

How to start PowerShell with the PowerShell 2.0 Engine

-

If you want to start a new PowerShell session with the PowerShell 2.0 Engine, you can use one of the following methods:

- -

How to run PowerShell 2.0 commands and scripts

-

If you want to run a single command or a script file that is compatible with PowerShell 2.0, you can use one of the following methods:

- -

How to start a remote session or a background job with the PowerShell 2.0 Engine

-

If you want to start a remote session or a background job with another computer that has PowerShell 2.0 installed, you need to use the -ConfigurationName Microsoft.PowerShell.2.0 parameter of the New-PSSession, New-PSSessionOption, or Start-Job cmdlet. For example, you can use the following command to start a remote session with another computer named Server01 using the PowerShell 2.0 Engine:

-New-PSSession -ComputerName Server01 -ConfigurationName Microsoft.PowerShell.2.0 -

You can use the following command to start a background job on another computer named Server01 using the PowerShell 2.0 Engine:

-Start-Job -ComputerName Server01 -ConfigurationName Microsoft.PowerShell.2.0 -ScriptBlock Get-Process -

Conclusion

-

In this article, we have shown you how to download and install PowerShell 2.0 on different versions of Windows, how to use PowerShell 2.0 commands and scripts, and some tips and recommendations for using PowerShell 2.0. We hope that this article has been helpful and informative for you. Here are some FAQs that you might have about PowerShell 2.0:

-

FAQs

-

What are the system requirements for PowerShell 2.0?

-

The system requirements for PowerShell 2.0 depend on the version of Windows that you have. Here are the minimum system requirements for each version of Windows that supports PowerShell 2.0:

- - - - - - - - - - - - - - - - - - - - - -
Windows VersionMinimum System Requirements
Windows 8.1 and Windows 81 GHz processor, 1 GB RAM, 16 GB available disk space, DirectX 9 graphics device with WDDM 1.0 or higher driver
Windows Server 2012 R2 and Windows Server 20121.4 GHz processor, 512 MB RAM, 32 GB available disk space
Windows Server 2008 R2 and Windows Vista1 GHz processor, 512 MB RAM, 15 GB available disk space, Super VGA (800 x 600) or higher-resolution monitor
Windows Server 2003 and Windows XP233 MHz processor, 64 MB RAM, 1.5 GB available disk space, Super VGA (800 x 600) or higher-resolution monitor
-

Is PowerShell 2.0 compatible with newer versions of PowerShell?

-

PowerShell 2.0 is generally compatible with newer versions of PowerShell, such as PowerShell 3.0, PowerShell 4.0, PowerShell 5.0, PowerShell 5.1, and PowerShell Core. However, there might be some differences or limitations in the functionality, syntax, or behavior of some commands or features between different versions of PowerShell. For example, some cmdlets or parameters that are available in newer versions of PowerShell might not be available or work differently in PowerShell 2.0. Therefore, it is recommended that you test your commands or scripts before running them on different versions of PowerShell to ensure that they work as expected.

-

Is PowerShell 2.0 deprecated or insecure?

-

PowerShell 2.0 is not deprecated or insecure by itself, but it might be affected by the deprecation or security status of the underlying .NET Framework version that it uses. For example, .NET Framework 3.5, which is required by PowerShell 2.0 on Windows 8.1 and Windows Server 2012, is no longer supported by Microsoft as of January 14, 2020. This means that it will not receive any security updates or patches from Microsoft, which might expose your system to potential vulnerabilities or risks. Therefore, it is recommended that you upgrade to a newer version of PowerShell and .NET Framework if possible, or apply the latest security updates and best practices for your system if you need to use PowerShell 2.0.

-

Where can I find more information or help about PowerShell 2.0?

-

If you want to learn more about PowerShell 2.0, you can refer to the following resources:

- -

How can I uninstall or disable PowerShell 2.0?

-

If you want to uninstall or disable PowerShell 2.0, you can use one of the following methods:

-

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Urdu Subtitles for Game of Thrones Season 2 Episode 5 The Ghost of Harrenhal.md b/spaces/1phancelerku/anime-remove-background/Download Urdu Subtitles for Game of Thrones Season 2 Episode 5 The Ghost of Harrenhal.md deleted file mode 100644 index 8993640305abc614362b3d4d93c1916ac1596204..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Urdu Subtitles for Game of Thrones Season 2 Episode 5 The Ghost of Harrenhal.md +++ /dev/null @@ -1,201 +0,0 @@ - -

How to Watch Game of Thrones Season 2 Episode 5 with Urdu Subtitles

-

Game of Thrones is one of the most popular and acclaimed TV shows in history. Based on the fantasy novels by George R.R. Martin, it tells the story of a medieval world where several noble families vie for control over the Iron Throne, while an ancient threat looms beyond a massive wall in the north. The show is known for its complex characters, intricate plots, stunning visuals, and shocking twists.

-

game of thrones season 2 episode 5 urdu subtitles download


Download Zip ::: https://jinyurl.com/2uNKWZ



-

Season 2 Episode 5, titled "The Ghost of Harrenhal", is one of the most pivotal episodes in the series. It features several major events that change the course of the war for the throne, as well as some intriguing developments in other parts of the world. In this article, we will give you a brief recap of what happens in this episode, and then show you how to watch it with Urdu subtitles.

-

Why would you want to watch Game of Thrones with Urdu subtitles? Well, there are several reasons. Maybe you are a fan of Urdu language and culture, and you want to enjoy the show in a different way. Maybe you are learning Urdu, and you want to improve your skills by watching a high-quality show. Maybe you have trouble understanding some accents or dialogues in English, and you want to make sure you don't miss anything important. Whatever your reason is , we have got you covered. In this article, we will tell you how to download Urdu subtitles for Game of Thrones Season 2 Episode 5, and how to watch it online or offline with the subtitles. But first, let's recap what happens in this episode.

-

The Ghost of Harrenhal Recap

-

"The Ghost of Harrenhal" is the fifth episode of the second season of Game of Thrones. It aired on April 29, 2012, and was written by David Benioff and D.B. Weiss, and directed by David Petrarca. The episode has a runtime of 55 minutes, and has a rating of 8.8 out of 10 on IMDb. Here are the main events that take place in this episode:

-

game of thrones s02e05 urdu subs download
-got season 2 episode 5 urdu subtitles free
-watch game of thrones season 2 episode 5 with urdu subtitles
-game of thrones season 2 episode 5 urdu srt download
-download game of thrones season 2 episode 5 in urdu
-game of thrones season 2 episode 5 urdu dubbed download
-game of thrones season 2 episode 5 online urdu subtitles
-game of thrones season 2 episode 5 urdu translation download
-game of thrones season 2 episode 5 subtitles in urdu
-game of thrones season 2 episode 5 urdu subscene download
-game of thrones season 2 episode 5 urdu language download
-game of thrones season 2 episode 5 full urdu subtitles
-how to download game of thrones season 2 episode 5 urdu subtitles
-game of thrones season 2 episode 5 english to urdu subtitles
-game of thrones season 2 episode 5 urdu version download
-game of thrones season 2 episode 5 with urdu subs
-game of thrones season 2 episode 5 urdu subtitle file download
-game of thrones season 2 episode 5 hd urdu subtitles
-game of thrones season 2 episode 5 best urdu subtitles
-game of thrones season 2 episode 5 easy urdu subtitles download
-game of thrones season two episode five urdu subtitles download
-where to download game of thrones season 2 episode 5 urdu subtitles
-game of thrones season second episode fifth urdu subtitles download
-game of thrones season ii episode v urdu subtitles download
-game of thrones the ghost of harrenhal urdu subtitles download
-download got s02e05 with urdu subtitles
-got the ghost of harrenhal urdu subs download
-watch got s02e05 online with urdu subtitles
-got s02e05 urdu srt file download
-got s02e05 in urdu language download
-got s02e05 urdu dubbed version download
-got s02e05 online with urdu translation
-got s02e05 subtitles in urdu language
-got s02e05 subscene urdu subtitle download
-got s02e05 full episode with urdu subs
-how to get got s02e05 with urdu subtitles
-got s02e05 english to urdu subtitle conversion
-got s02e05 hd quality with urdu subs
-got s02e05 best quality with urdu subs
-got s02e05 easy to download with urdu subs
-watch the ghost of harrenhal with urdu subtitles online
-the ghost of harrenhal subscene in urdu language
-the ghost of harrenhal full video with urdu subs
-the ghost of harrenhal english to urdu subtitle translation
-the ghost of harrenhal hd video with urdu subs
-the ghost of harrenhal best video with urdu subs
-the ghost of harrenhal easy video with urdu subs

-

In the Stormlands

-

The episode begins with a shocking scene: the assassination of Renly Baratheon, one of the claimants to the Iron Throne, by a shadowy figure that resembles his brother Stannis. The shadow is actually a creature conjured by Melisandre, a red priestess who serves Stannis and believes him to be the chosen one of her god. The murder is witnessed by Catelyn Stark, the widow of Ned Stark who was executed by King Joffrey, and Brienne of Tarth, a female knight who swore loyalty to Renly. They are accused of the crime by Renly's guards, but they manage to escape with the help of Loras Tyrell, Renly's lover and ally.

-

With Renly dead, most of his bannermen switch their allegiance to Stannis, who now has the largest army in Westeros. However, some of them remain loyal to the Tyrells, who are not willing to bend the knee to Stannis. Stannis offers to make Loras his heir if he joins him, but Loras refuses. He also rejects Catelyn's plea to join forces with Robb Stark, her son and the King in the North, who is fighting against Joffrey. Stannis then prepares to march on King's Landing, the capital of the Seven Kingdoms, where Joffrey resides.

-

Brienne and Catelyn decide to leave the Stormlands and head north. Brienne swears an oath to serve Catelyn and protect her. She also vows to avenge Renly's death by killing Stannis.

-

In King's Landing

-

In the capital, Tyrion Lannister, the witty and clever brother of Cersei Lannister, Joffrey's mother and regent, is trying to keep the city safe and stable as the Hand of the King. He discovers that Cersei has ordered the alchemists to produce large quantities of wildfire, a highly flammable and explosive substance that can burn anything. Cersei plans to use it as a weapon against Stannis' fleet when he attacks the city. Tyrion is alarmed by this idea, as he knows that wildfire is very dangerous and unpredictable. He decides to take control of the wildfire production and distribution, and tells Cersei that he will use it in a smarter way.

-

Tyrion also has to deal with the growing unrest and discontent among the people of King's Landing, who are suffering from hunger, poverty, and fear. He tries to appease them by sending Princess Myrcella Baratheon, Joffrey's younger sister, to Dorne, a southern kingdom that is allied with the Lannisters. He hopes that this will secure their friendship and prevent them from joining Stannis or Robb. He also hopes that Myrcella will be safer and happier in Dorne than in King's Landing.

-

However, his plan backfires when he escorts Myrcella to the ship that will take her to Dorne. The people of King's Landing riot and attack him and his entourage, throwing stones and insults at them. They also target Joffrey, who responds by ordering his guards to kill them all. A bloody chaos ensues, in which several people are killed or injured, including some of Tyrion's allies. Tyrion manages to survive and reach the safety of the Red Keep, the royal castle. He confronts Joffrey for his cruelty and stupidity, and slaps him in front of everyone.

-

In Qarth

-

Meanwhile, across the Narrow Sea in Essos, Daenerys Targaryen, the last surviving member of the Targaryen dynasty that ruled Westeros before Robert Baratheon overthrew them, is trying to find allies and resources for her quest to reclaim the Iron Throne. She has three young dragons, the only ones in existence, but they are still too small and weak to be used in battle. She also has a small band of loyal followers, including Jorah Mormont, a former knight who serves as her adviser and protector, and her bloodriders, a group of Dothraki warriors who swore to follow her after the death of her husband Khal Drogo.

-

Daenerys and her followers have been welcomed in Qarth, a wealthy and exotic city in the east, by Xaro Xhoan Daxos, a powerful merchant and a member of the Thirteen, the rulers of Qarth. Xaro offers Daenerys his hospitality and his friendship, but he also has ulterior motives. He proposes to marry Daenerys and give her half of his wealth, in exchange for one of her dragons. Daenerys refuses, as she considers her dragons to be her children and her only hope.

-

Daenerys also meets two mysterious characters in Qarth: Quaithe, a masked woman who claims to be a shadowbinder from Asshai, a dark and mysterious land in the far east; and Pyat Pree, a bald and blue-lipped warlock who invites Daenerys to visit the House of the Undying, the headquarters of his order. Quaithe warns Daenerys to beware of those who seek to use or harm her, and tells her that she must go to Asshai to learn the truth about her destiny. Pyat Pree promises Daenerys that she will see wonders and visions in the House of the Undying, and that he has something that belongs to her.

-

At the end of the episode, Daenerys discovers that Pyat Pree was telling the truth: he has stolen her dragons and taken them to the House of the Undying. He lured them away from their cage with a decoy, and killed most of Daenerys' guards in the process. Daenerys is furious and distraught, and vows to get her dragons back.

-

Beyond the Wall

-

Finally, in the frozen lands beyond the Wall, a massive barrier of ice that separates Westeros from the wild lands in the north, Jon Snow, the bastard son of Ned Stark who joined the Night's Watch, a sworn brotherhood that guards the Wall and protects the realm from the dangers beyond, is on a dangerous mission. He is part of a small group of rangers led by Qhorin Halfhand, a legendary warrior who is respected and feared by both his allies and enemies. Their goal is to find and kill Mance Rayder, a former member of the Night's Watch who deserted and became the King-Beyond-the-Wall, uniting the wildlings under his command.

-

On their way, they encounter a group of wildlings led by Ygritte, a fiery-haired woman who catches Jon's eye. They manage to kill or capture most of them, except for Ygritte, who is taken prisoner by Jon. Qhorin orders Jon to execute her, but Jon hesitates. He does not want to kill an unarmed woman, especially one that he finds attractive. He tries to do it anyway, but Ygritte escapes. Jon chases her through the snow, but loses sight of his comrades. He catches up with Ygritte, but she tricks him into falling into a trap. She then taunts him for being a virgin and a crow (a derogatory term for members of the Night's Watch), and tells him that he knows nothing about the world.

-

Meanwhile, in Winterfell, the ancestral home of the Starks in the north, Bran Stark, Ned's youngest son who was crippled after being pushed from a tower by Jaime Lannister, Cersei's brother and lover, is having strange dreams. He dreams that he is his direwolf Summer, running through the woods and hunting. He also dreams that he meets Jojen Reed, a boy who claims to have similar dreams and abilities. Jojen tells Bran that he is a warg, someone who can enter the minds of animals and control them. He also tells him that he has "the sight", which allows him to see past and future events. He warns Bran that he is in danger, and that he must find "the three-eyed raven", a mysterious figure that appears in his dreams.

-

In Pyke, the seat of House Greyjoy on the Iron Islands, an archipelago off the west coast of Westeros, Theon Greyjoy, Ned's former ward who betrayed him and joined his father Balon Greyjoy in his rebellion against the Lannisters, is preparing to leave with his sister Yara Greyjoy and her fleet. He has been given the task of raiding the coast of the north, while Robb Stark is away fighting in the south. He hopes to prove himself to his father and his people, who have always looked down on him for being raised by the Starks. He also hopes to impress Yara, who is a skilled and respected captain and warrior, and who mocks him for being weak and foolish.

-

How to Download Urdu Subtitles for Game of Thrones Season 2 Episode 5

-

Now that you have a clear idea of what happens in "The Ghost of Harrenhal", you might be wondering how to watch it with Urdu subtitles. Subtitles are a great way to enhance your viewing experience, especially if you are not a native speaker of English, or if you want to learn a new language. Subtitles can help you improve your vocabulary, grammar, pronunciation, and comprehension skills, as well as expose you to different cultures and expressions. They can also help you enjoy the show more, as you won't miss any important details or dialogues that might be hard to catch or understand otherwise.

-

So, how can you get Urdu subtitles for Game of Thrones Season 2 Episode 5? Well, there are several sources that offer them, both free and paid. However, not all of them are reliable or accurate. Some of them might have poor quality, incorrect translations, missing or delayed lines, or even malware or viruses. Therefore, you need to be careful and choose the best source for your needs. Here are some of the factors that you should consider when looking for Urdu subtitles for Game of Thrones Season 2 Episode 5:

- - Quality: The subtitles should be clear, readable, and synchronized with the video and audio. They should also match the tone, style, and context of the show. - Accuracy: The subtitles should convey the meaning and intention of the original dialogue, without adding or omitting anything. They should also respect the grammar, spelling, and punctuation rules of Urdu. - Availability: The subtitles should be easy to find and download, without requiring any registration or payment. They should also be compatible with your device and media player. - Legality: The subtitles should be legal and authorized by the creators or owners of the show. They should not violate any copyright or intellectual property laws.

Based on these criteria, we have compiled a list of some of the best websites that offer Urdu subtitles for Game of Thrones Season 2 Episode 5. Here they are:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
WebsiteQualityAccuracyAvailabilityLegality
[isubdb.com]HighHighFree and easyLegal
[subscene.com]HighHighFree and easyLegal
[opensubtitles.org]MediumMediumFree but requires registrationLegal
[tvsubtitles.net]LowLowFree but slow and unreliableIllegal
-

As you can see from the table above, our recommendation is to use [isubdb.com] as your source of Urdu subtitles for Game of Thrones Season 2 Episode 5. This website has high-quality and accurate subtitles that are free and easy to download. It also has a large collection of subtitles for other episodes and seasons of Game of Thrones, as well as other shows and movies. It is also legal and safe to use.

-

To download Urdu subtitles from [isubdb.com], all you have to do is follow these simple steps:

- - Go to [isubdb.com] on your browser - Type "Game of Thrones" in the search box - Select "Game of Thrones - Season 2" from the results - Scroll down to "Episode 5 - The Ghost of Harrenhal" - Click on "Urdu" under "Subtitles" - Click on "Download" next to the subtitle file that you want - Save the file on your device - You have successfully downloaded the Urdu subtitles for Game of Thrones Season 2 Episode 5. Now, you can watch the episode with the subtitles on your device. But how do you do that? There are two options: online streaming or offline downloading. Let's see what they are and how they work.

How to Watch Game of Thrones Season 2 Episode 5 with Urdu Subtitles Online or Offline

-

Online streaming is the option of watching the episode on a platform that allows you to stream it over the internet, without having to download it on your device. This option is convenient and fast, as you can watch the episode anytime and anywhere, as long as you have a stable internet connection. However, this option also has some drawbacks, such as requiring a subscription or payment, consuming a lot of data, or being subject to geo-restrictions or censorship.

-

Offline downloading is the option of downloading the episode on your device, and then watching it with a media player that supports subtitles. This option is more flexible and reliable, as you can watch the episode offline, without worrying about internet issues or interruptions. However, this option also has some challenges, such as taking up a lot of space, exposing you to malware or viruses, or violating legal or ethical rules.

-

So, which option should you choose? Well, that depends on your preferences and circumstances. To help you decide, we have compared some of the best platforms and methods for online streaming and offline downloading of Game of Thrones Season 2 Episode 5 with Urdu subtitles. Here they are:

-

Online Streaming Options

-

There are many platforms that allow you to stream Game of Thrones online, with or without subtitles. However, not all of them are available or accessible in every country or region. Therefore, you need to check the availability and compatibility of the platform before choosing it. Here are some of the most popular and reliable platforms that offer online streaming of Game of Thrones Season 2 Episode 5 with Urdu subtitles:

- - - - - - - - - - - - - - - - - - - - - - - - - -
PlatformFeaturesPriceCompatibility
HBO Max- The official and legal platform for streaming Game of Thrones - High-quality video and audio - Supports multiple languages and subtitles - Offers other HBO shows and movies - Allows offline downloading on mobile devices$14.99 per monthAvailable in the US and some Latin American countries Compatible with most devices and browsers
Netflix- The most popular and widely used streaming platform - High-quality video and audio - Supports multiple languages and subtitles - Offers a large variety of shows and movies - Allows offline downloading on mobile devices$8.99 to $17.99 per month depending on the planAvailable in most countries except China, Syria, North Korea, and Crimea Compatible with most devices and browsers
Amazon Prime Video- The streaming platform of Amazon - High-quality video and audio - Supports multiple languages and subtitles - Offers other Amazon shows and movies - Allows offline downloading on mobile devices$8.99 per month or $119 per year for Prime membershipAvailable in most countries except China, Iran, North Korea, Syria, and Crimea Compatible with most devices and browsers
-

As you can see from the table above, our recommendation is to use HBO Max as your platform for online streaming of Game of Thrones Season 2 Episode 5 with Urdu subtitles. This is because HBO Max is the official and legal platform for streaming Game of Thrones, and it offers high-quality video and audio, as well as multiple languages and subtitles. It also offers other HBO shows and movies that you might enjoy, such as Westworld, The Sopranos, The Wire, etc. It also allows offline downloading on mobile devices, which is convenient if you want to watch the episode later.

-

To stream Game of Thrones Season 2 Episode 5 with Urdu subtitles on HBO Max, all you have to do is follow these simple steps:

- - Go to [hbomax.com] on your browser - Sign up for an account or log in if you already have one - Choose your plan and payment method - Search for "Game of Thrones" in the search box - Select "Game of Thrones - Season 2" from the results - Scroll down to "Episode 5 - The Ghost of Harrenhal" - Click on "Play" - Click on the settings icon at the bottom right corner of the screen - Click on "Subtitles" - Click on "Urdu" - Enjoy watching the episode with Urdu subtitles.

Offline Downloading Options

-

If you prefer to download the episode on your device and watch it offline, you have some other options as well. However, you need to be aware of the risks and challenges that come with this option, such as malware, viruses, legal issues, or ethical dilemmas. Therefore, you need to be careful and responsible when choosing this option. Here are some of the most common and effective methods for offline downloading of Game of Thrones Season 2 Episode 5 with Urdu subtitles:

- - - - - - - - - - - - - - - - - - - - - - - - - -
MethodSpeedSecurityLegality
TorrentingFastRiskyIllegal
Direct DownloadingSlowSaferIllegal
DVD/Blu-ray RippingMediumSafeLegal
-

As you can see from the table above, our recommendation is to use DVD/Blu-ray ripping as your method for offline downloading of Game of Thrones Season 2 Episode 5 with Urdu subtitles. This is because DVD/Blu-ray ripping is the only legal and safe method among the three, as it does not involve downloading or sharing pirated content. It also offers decent speed and quality, as well as the option to choose your preferred language and subtitles. However, this method also requires that you own or buy a physical copy of the episode on DVD or Blu-ray, which might be expensive or hard to find.

-

To rip Game of Thrones Season 2 Episode 5 with Urdu subtitles from DVD or Blu-ray, all you have to do is follow these simple steps:

- - Insert the DVD or Blu-ray disc into your computer's drive - Download and install a DVD/Blu-ray ripping software, such as [HandBrake] or [MakeMKV] - Open the software and select the disc as the source - Choose the output format and settings that suit your device and preferences - Select "Urdu" as the subtitle language - Click on "Start" or "Rip" to begin the process - Wait for the process to finish and save the file on your device - You have successfully ripped Game of Thrones Season 2 Episode 5 with Urdu subtitles from DVD or Blu-ray. Now, you can watch the episode with a media player that supports subtitles, such as [VLC] or [KMPlayer].

Conclusion

-

In this article, we have shown you how to watch Game of Thrones Season 2 Episode 5 with Urdu subtitles. We have given you a brief recap of what happens in this episode, and then explained how to download Urdu subtitles from various sources. We have also compared some of the best platforms and methods for online streaming and offline downloading of the episode with Urdu subtitles. We hope that you have found this article helpful and informative, and that you will enjoy watching this episode with Urdu subtitles.

-

If you are a fan of Game of Thrones, you might also want to check out our other articles on how to watch other episodes and seasons of the show with Urdu subtitles. You might also want to share your feedback and opinions on this episode and the show in general with us and other readers. You can do so by leaving a comment below or by contacting us through our website or social media channels.

-

Thank you for reading this article, and happy watching!

-

Frequently Asked Questions (FAQs)

-

Here are some of the most common questions that people ask about watching Game of Thrones Season 2 Episode 5 with Urdu subtitles:

-

Q: Where can I watch Game of Thrones Season 2 Episode 5 with Urdu subtitles for free?

-

A: There are some websites that offer free streaming or downloading of Game of Thrones Season 2 Episode 5 with Urdu subtitles, such as [isubdb.com], [subscene.com], or [opensubtitles.org]. However, these websites might not be legal or safe to use, as they might contain pirated content or malware. Therefore, we recommend that you use a paid or official platform for streaming or downloading the episode, such as HBO Max, Netflix, or Amazon Prime Video.

-

Q: How can I watch Game of Thrones Season 2 Episode 5 with Urdu subtitles on my TV?

-

A: There are several ways to watch Game of Thrones Season 2 Episode 5 with Urdu subtitles on your TV. One way is to connect your computer or laptop to your TV using an HDMI cable or a wireless connection. Another way is to use a streaming device, such as a Roku, Chromecast, Apple TV, or Fire TV, that supports the platform that you are using to stream the episode, such as HBO Max, Netflix, or Amazon Prime Video. A third way is to use a smart TV that has the platform that you are using to stream the episode built-in or available as an app. In any case, you need to make sure that the platform that you are using supports Urdu subtitles, and that you enable them before or during watching the episode.

-

Q: How can I watch Game of Thrones Season 2 Episode 5 with Urdu subtitles on my phone or tablet?

-

A: There are also several ways to watch Game of Thrones Season 2 Episode 5 with Urdu subtitles on your phone or tablet. One way is to use the browser on your device to access the website that offers online streaming or downloading of the episode with Urdu subtitles, such as [isubdb.com], [subscene.com], or [opensubtitles.org]. However, this way might not be very convenient or comfortable, as the website might not be optimized for mobile devices, and the subtitles might not be very clear or readable. Another way is to use the app of the platform that you are using to stream or download the episode, such as HBO Max, Netflix, or Amazon Prime Video. This way is more convenient and comfortable, as the app is designed for mobile devices, and the subtitles are more clear and readable. However, this way requires that you have a subscription or payment for the platform, and that you have enough space and data on your device. A third way is to download the episode and the subtitles on your computer or laptop, and then transfer them to your device using a USB cable or a wireless connection. This way is more flexible and reliable, as you can watch the episode offline, without worrying about internet issues or interruptions. However, this way also requires that you have enough space and data on your device, and that you use a media player that supports subtitles, such as [VLC] or [KMPlayer].

-

Q: How can I watch Game of Thrones Season 2 Episode 5 with Urdu subtitles in HD quality?

-

A: To watch Game of Thrones Season 2 Episode 5 with Urdu subtitles in HD quality, you need to make sure that both the video and the subtitles are in HD quality. The video quality depends on the source and the platform that you are using to stream or download the episode. The subtitles quality depends on the source and the format that you are using to download or enable them. Generally speaking, the higher the quality of the video and the subtitles, the larger the file size and the more data they consume. Therefore, you need to balance between quality and speed when choosing your source and platform. For example, if you want to stream the episode in HD quality with Urdu subtitles online, you might want to use HBO Max, Netflix, or Amazon Prime Video, as they offer high-quality video and audio, as well as multiple languages and subtitles. However, if you want to download the episode in HD quality with Urdu subtitles offline, you might want to use torrenting or direct downloading from a reliable website, such as [isubdb.com], [subscene.com], or [opensubtitles.org], as they offer high-quality video and subtitles files.

-

Q: How can I watch Game of Thrones Season 2 Episode 5 with Urdu subtitles with my friends?

-

A: If you want to watch Game of Thrones Season 2 Episode 5 with Urdu subtitles with your friends, you have some options as well. One option is to watch it together in person, either at your place or at their place. You can use any of the methods or platforms that we have mentioned above to watch the episode with Urdu subtitles on your TV, computer, laptop, phone, or tablet. You can also use speakers or headphones to enhance the sound quality. Another option is to watch it together online, using a platform or an app that allows you to watch videos with your friends remotely, such as [Watch2Gether], [Netflix Party], or [Scener]. These platforms or apps let you create a private room where you can invite your friends and watch the episode with Urdu subtitles synchronously. You can also chat and comment with your friends while watching the episode. However, these platforms or apps might require that you and your friends have a subscription or payment for the platform that you are using to stream the episode, such as HBO Max, Netflix, or Amazon Prime Video.

-

Whichever option you choose, watching Game of Thrones Season 2 Episode 5 with Urdu subtitles with your friends can be a fun and enjoyable experience. You can share your thoughts and feelings about the episode, discuss the characters and the plot, and have a good time together.

-

-

This is the end of the article. I hope that you have learned something new and useful from this article, and that you have enjoyed reading it. If you have any questions, comments, or suggestions about this article or the topic of watching Game of Thrones Season 2 Episode 5 with Urdu subtitles, please feel free to contact me through my website or social media channels. I would love to hear from you and help you with anything that you need. Thank you for your time and attention, and have a great day!

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Drag Racing Streets Mod Apk A Physics-Based Racing Game with Unlimited Money and Customization.md b/spaces/1phancelerku/anime-remove-background/Drag Racing Streets Mod Apk A Physics-Based Racing Game with Unlimited Money and Customization.md deleted file mode 100644 index c62461c2db6085a54ad8e48eacbda21d4ecba254..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Drag Racing Streets Mod Apk A Physics-Based Racing Game with Unlimited Money and Customization.md +++ /dev/null @@ -1,96 +0,0 @@ -
-

Drag Racing: Streets Mod APK - Unlimited Money and Fun

-

Do you love drag racing games? Do you want to experience the thrill of driving fast cars on realistic streets? Do you want to customize your own car and compete with other players online? If you answered yes to any of these questions, then you should try Drag Racing: Streets, a popular racing game for Android devices. And if you want to make the game even more fun and exciting, you should download Drag Racing: Streets Mod APK, a modified version of the game that gives you unlimited money, coins, levels, cars, and more. In this article, we will tell you everything you need to know about Drag Racing: Streets and its mod apk version.

-

drag racing streets mod apk unlimited money 2.0 7


Download File ————— https://jinyurl.com/2uNQV5



-

What is Drag Racing: Streets?

-

Drag Racing: Streets is a racing game developed by Square, a Russian studio that specializes in realistic car simulation games. The game lets you create your own car from scratch, choosing from hundreds of parts and options. You can also customize your car's appearance, performance, tuning, and upgrades. You can then take your car to the streets and race against real players from around the world in various modes and events. You can also build your own garage and collect different cars.

-

Features of Drag Racing: Streets

-

Drag Racing: Streets has many features that make it one of the best drag racing games on the market. Here are some of them:

-

Customize your car

-

You can create your own car from scratch, choosing from hundreds of parts and options. You can change the engine, transmission, suspension, brakes, tires, wheels, body kits, paint, stickers, and more. You can also adjust the settings of your car's performance, such as power, torque, weight distribution, gear ratios, boost pressure, nitrous oxide injection, etc. You can make your car look and perform exactly how you want it.

-

Race against real players

-

You can race against real players from around the world in various modes and events. You can join tournaments, championships, leagues, seasons, daily races, etc. You can also challenge your friends or random opponents in one-on-one duels. You can show off your skills and win prizes and rewards.

-

Build your own garage

-

You can build your own garage and collect different cars. You can buy new cars or win them in races or events. You can also sell or trade your cars with other players. You can have up to 16 cars in your garage at a time.

-

drag racing streets mod apk download latest version 2022
-drag racing streets unlimited money and gold mod apk
-drag racing streets hack mod apk free download
-drag racing streets mod apk android 1 com
-drag racing streets mod apk revdl com
-drag racing streets mod apk rexdl com
-drag racing streets mod apk offline
-drag racing streets mod apk no root
-drag racing streets mod apk obb
-drag racing streets mod apk data
-drag racing streets mod apk unlimited everything
-drag racing streets mod apk unlimited coins and gems
-drag racing streets mod apk unlimited nitro
-drag racing streets mod apk unlimited fuel
-drag racing streets mod apk unlimited cash
-drag racing streets mod apk all cars unlocked
-drag racing streets mod apk all parts unlocked
-drag racing streets mod apk full version
-drag racing streets mod apk premium
-drag racing streets mod apk pro
-drag racing streets mod apk vip
-drag racing streets mod apk mega
-drag racing streets mod apk god mode
-drag racing streets mod apk anti ban
-drag racing streets mod apk online
-drag racing streets physics engine mod apk
-drag racing streets car customization mod apk
-drag racing streets realistic graphics mod apk
-drag racing streets best cars mod apk
-drag racing streets best tunes mod apk
-drag racing streets cheats mod apk
-drag racing streets tips and tricks mod apk
-drag racing streets gameplay mod apk
-drag racing streets review mod apk
-drag racing streets update mod apk
-how to install drag racing streets mod apk
-how to play drag racing streets mod apk
-how to get drag racing streets mod apk
-how to download drag racing streets mod apk for android
-how to download drag racing streets mod apk for ios
-how to download drag racing streets mod apk for pc
-how to download drag racing streets mod apk for laptop
-how to download drag racing streets mod apk for windows 10
-how to download drag racing streets mod apk for macbook pro
-how to download drag racing streets mod apk for chromebook

-

Why download Drag Racing: Streets Mod APK?

-

Drag Racing: Streets is a fun and addictive game, but it can also be challenging and frustrating at times. You may need a lot of money and coins to buy new cars, parts, upgrades, etc. You may also need to unlock new levels and cars to access more features and modes. And you may face ads and pop-ups that interrupt your gameplay.

-

That's why you should download Drag Racing: Streets Mod APK, a modified version of the game that gives you unlimited money, coins, levels, cars, and more. With this mod apk version, you can enjoy the game without any limitations or restrictions. You can buy anything you want, unlock everything you need, and play without any ads or interruptions.

-

Unlimited money and coins

-

With Drag Racing: Streets Mod APK, you will get unlimited money and coins in your account. You can use them to buy new cars, parts, upgrades, etc.

Unlocked levels and cars

-

With Drag Racing: Streets Mod APK, you will get all the levels and cars unlocked in the game. You can access any mode or event you want, and choose any car you like. You can also try out different cars and see how they perform on the streets.

-

Free and safe to use

-

Drag Racing: Streets Mod APK is free and safe to use. You don't need to root your device or install any other apps to use it. You just need to download the mod apk file from a trusted source and install it on your device. You can also update the game regularly without losing your mod features.

-

How to download and install Drag Racing: Streets Mod APK?

-

If you want to download and install Drag Racing: Streets Mod APK, you need to follow these simple steps:

-

Step 1: Download the mod apk file

-

You can download the mod apk file from this link: Drag Racing: Streets Mod APK Download. The file size is about 300 MB, so make sure you have enough space on your device. You can also scan the file with an antivirus program before opening it.

-

Step 2: Enable unknown sources

-

Before you can install the mod apk file, you need to enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.

-

Step 3: Install the mod apk file

-

After you have enabled unknown sources, you can install the mod apk file. To do this, locate the file in your downloads folder and tap on it. You will see a pop-up window asking for your permission to install the app. Tap on Install and wait for the installation process to finish.

-

Step 4: Enjoy the game

-

Once the installation is done, you can open the game and enjoy it. You will see that you have unlimited money, coins, levels, cars, and more. You can start playing the game and have fun.

-

Conclusion

-

Drag Racing: Streets is a great racing game that lets you create your own car and race against real players on realistic streets. It has many features that make it fun and addictive. However, if you want to make the game even more enjoyable, you should download Drag Racing: Streets Mod APK, a modified version of the game that gives you unlimited money, coins, levels, cars, and more. With this mod apk version, you can buy anything you want, unlock everything you need, and play without any ads or interruptions. You can also download and install the mod apk easily and safely by following our guide.

-

If you are a fan of drag racing games, you should not miss Drag Racing: Streets Mod APK. Download it now and experience the thrill of driving fast cars on realistic streets.

-

FAQs

-

Here are some frequently asked questions about Drag Racing: Streets Mod APK:

-

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Enjoy Action RPG and Free Shopping with Pixel Blade M VIP Mod APK.md b/spaces/1phancelerku/anime-remove-background/Enjoy Action RPG and Free Shopping with Pixel Blade M VIP Mod APK.md deleted file mode 100644 index ce843f1a06cb0e20be758a1f256930d4af8e129e..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Enjoy Action RPG and Free Shopping with Pixel Blade M VIP Mod APK.md +++ /dev/null @@ -1,117 +0,0 @@ -
-

Pixel Blade M VIP Mod APK Free Shopping: A Review

-

If you are looking for a fun and exciting game that combines pixel-style graphics and hack-and-slash action, then you should check out Pixel Blade M VIP. This is a cubic action RPG game that lets you play as the last pixel hero in PIXEL WORLD. You can collect various weapons by hunting monsters in the dungeon and save the world from evil forces. But what if you want to enjoy the game without spending real money or watching annoying ads? Well, there is a solution for that: Pixel Blade M VIP Mod APK Free Shopping. This is a modified version of the game that gives you unlimited money and gems, free shopping for weapons and items, no ads, and no root required. In this article, we will review this mod apk and tell you why you should try it out.

-

pixel blade m vip mod apk free shopping


Download File >>> https://jinyurl.com/2uNKm6



-

What is Pixel Blade M VIP?

-

Pixel Blade M VIP is a 3D pixel fantasy hack-and-slash game developed by PixelStar Games. It is a sequel to the popular Pixel Blade game that has over 1 million downloads on Google Play. In this game, you can choose from different classes of pixel heroes, such as warrior, mage, archer, assassin, and more. You can also customize your hero's appearance, skills, and equipment. The game features various modes, such as story mode, boss mode, tower mode, raid mode, and PVP mode. You can explore different dungeons and fight against various monsters and bosses. You can also join a guild and cooperate with other players to defeat powerful enemies.

-

Features of Pixel Blade M VIP

-

Some of the features of Pixel Blade M VIP are:

- -

How to download and install Pixel Blade M VIP Mod APK

-

If you want to download and install Pixel Blade M VIP Mod APK Free Shopping, you need to follow these steps:

-
    -
  1. Click on this link to download the mod apk file.
  2. -
  3. Allow unknown sources on your device by going to Settings > Security > Unknown Sources.
  4. -
  5. Locate the downloaded file in your file manager and tap on it to install it.
  6. -
  7. Launch the game and enjoy free shopping and unlimited money.
  8. -
-

Why you should try Pixel Blade M VIP Mod APK Free Shopping

-

There are many reasons why you should try Pixel Blade M VIP Mod APK Free Shopping. Here are some of them:

-

Unlimited money and gems

-

With this mod apk, you will get unlimited money and gems in your account. You can use them to buy any weapon or item you want without worrying about the cost. You can also upgrade your weapons and items to make them more powerful. This will help you progress faster in the game and defeat stronger enemies.

-

pixel blade m unlimited money and keys mod apk
-pixel blade m god mode and free shopping mod apk
-pixel blade m 3d rpg mod apk with vip features
-pixel blade m hack mod apk for free shopping and keys
-pixel blade m latest version mod apk with god mode
-pixel blade m vip mod apk download for android
-pixel blade m mod apk free shopping and unlimited keys
-pixel blade m 9.2.7 mod apk with god mode and vip
-pixel blade m role-playing game mod apk free shopping
-pixel blade m 3d graphics mod apk with unlimited money
-pixel blade m adventure game mod apk with god mode
-pixel blade m vip features mod apk free download
-pixel blade m free shopping and keys mod apk 9.2.7
-pixel blade m unlimited money and god mode mod apk
-pixel blade m 3d rpg hack mod apk with vip
-pixel blade m latest mod apk free shopping and keys
-pixel blade m 9.2.7 vip mod apk download for android
-pixel blade m role-playing game hack mod apk with god mode
-pixel blade m 3d graphics hack mod apk with unlimited money
-pixel blade m adventure game hack mod apk with vip features
-pixel blade m free download mod apk with free shopping and keys
-pixel blade m unlimited money and keys hack mod apk
-pixel blade m god mode and free shopping hack mod apk
-pixel blade m 3d rpg hack apk with vip features and free shopping
-pixel blade m hack apk for free shopping and keys and god mode
-pixel blade m latest version hack apk with god mode and vip
-pixel blade m vip hack apk download for android free shopping
-pixel blade m hack apk free shopping and unlimited keys and money
-pixel blade m 9.2.7 hack apk with god mode and vip features
-pixel blade m role-playing game hack apk free shopping and keys
-pixel blade m 3d graphics hack apk with unlimited money and god mode
-pixel blade m adventure game hack apk with vip features and free shopping
-pixel blade m free download hack apk with free shopping and keys and god mode
-download pixel blade m vip mod apk for android free shopping
-download pixel blade m mod apk free shopping and unlimited keys and money
-download pixel blade m 9.2.7 mod apk with god mode and vip features
-download pixel blade m role-playing game mod apk free shopping and keys
-download pixel blade m 3d graphics mod apk with unlimited money and god mode
-download pixel blade m adventure game mod apk with vip features and free shopping
-download pixel blade m latest version mod apk with god mode and vip features
-download pixel blade m vip hack apk for android free shopping and keys
-download pixel blade m hack apk free shopping and unlimited keys and money
-download pixel blade m 9.2.7 hack apk with god mode and vip features
-download pixel blade m role-playing game hack apk free shopping and keys
-download pixel blade m 3d graphics hack apk with unlimited money and god mode
-download pixel blade m adventure game hack apk with vip features and free shopping
-download pixel blade m latest version hack apk with god mode and vip features

-

No ads and

No ads and no root required

-

Another reason why you should try Pixel Blade M VIP Mod APK Free Shopping is that it does not have any ads or require root access. Ads can be annoying and distracting when you are playing a game. They can also consume your data and battery. With this mod apk, you can enjoy the game without any interruptions or pop-ups. Moreover, you do not need to root your device to use this mod apk. Rooting can be risky and void your warranty. It can also expose your device to malware and viruses. With this mod apk, you can play the game safely and smoothly.

-

Enjoy the pixel-style graphics and action-packed gameplay

-

The last reason why you should try Pixel Blade M VIP Mod APK Free Shopping is that it has amazing pixel-style graphics and action-packed gameplay. If you are a fan of retro games and pixel art, you will love this game. The game has colorful and detailed graphics that create a nostalgic atmosphere. The game also has fast-paced and thrilling gameplay that will keep you hooked. You can slash, hack, and shoot your way through various dungeons and enemies. You can also use different skills and combos to unleash powerful attacks. The game is fun and addictive for anyone who loves action RPG games.

-

Pros and cons of Pixel Blade M VIP Mod APK Free Shopping

-

Like any other mod apk, Pixel Blade M VIP Mod APK Free Shopping has its pros and cons. Here are some of them:

-

Pros

- -

Cons

- -

Conclusion

-

Pixel Blade M VIP Mod APK Free Shopping is a modified version of Pixel Blade M VIP that gives you unlimited money and gems, free shopping for weapons and items, no ads, and no root required. It is a great way to enjoy the pixel-style graphics and action-packed gameplay of Pixel Blade M VIP without spending real money or watching annoying ads. However, it also has some drawbacks, such as compatibility issues, glitches, bugs, or lack of updates. Therefore, you should use this mod apk at your own risk and discretion.

-

FAQs

-

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r34.py b/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r34.py deleted file mode 100644 index 5f78337a3d1f9eb6e9145eb5093618796c6842d2..0000000000000000000000000000000000000000 --- a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r34.py +++ /dev/null @@ -1,26 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.loss = "arcface" -config.network = "r34" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 128 -config.lr = 0.1 # batch size is 512 - -config.rec = "/train_tmp/ms1m-retinaface-t1" -config.num_classes = 93431 -config.num_image = 5179510 -config.num_epoch = 25 -config.warmup_epoch = -1 -config.decay_epoch = [10, 16, 22] -config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/spaces/A00001/bingothoo/src/components/ui/select.tsx b/spaces/A00001/bingothoo/src/components/ui/select.tsx deleted file mode 100644 index 77f12c2996f541b97663de4c9e20ab34d4ec2fac..0000000000000000000000000000000000000000 --- a/spaces/A00001/bingothoo/src/components/ui/select.tsx +++ /dev/null @@ -1,123 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SelectPrimitive from '@radix-ui/react-select' - -import { cn } from '@/lib/utils' -import { - IconArrowDown, - IconCheck, - IconChevronUpDown -} from '@/components/ui/icons' - -const Select = SelectPrimitive.Root - -const SelectGroup = SelectPrimitive.Group - -const SelectValue = SelectPrimitive.Value - -const SelectTrigger = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - {children} - - - - -)) -SelectTrigger.displayName = SelectPrimitive.Trigger.displayName - -const SelectContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, position = 'popper', ...props }, ref) => ( - - - - {children} - - - -)) -SelectContent.displayName = SelectPrimitive.Content.displayName - -const SelectLabel = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SelectLabel.displayName = SelectPrimitive.Label.displayName - -const SelectItem = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - - - - - {children} - -)) -SelectItem.displayName = SelectPrimitive.Item.displayName - -const SelectSeparator = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SelectSeparator.displayName = SelectPrimitive.Separator.displayName - -export { - Select, - SelectGroup, - SelectValue, - SelectTrigger, - SelectContent, - SelectLabel, - SelectItem, - SelectSeparator -} diff --git a/spaces/A666sxr/Genshin_TTS/transforms.py b/spaces/A666sxr/Genshin_TTS/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/A666sxr/Genshin_TTS/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/AIWaves/Software_Company/src/agents/State.py b/spaces/AIWaves/Software_Company/src/agents/State.py deleted file mode 100644 index fa4b050eb09fba46a9a9431f39ac281d2abca016..0000000000000000000000000000000000000000 --- a/spaces/AIWaves/Software_Company/src/agents/State.py +++ /dev/null @@ -1,142 +0,0 @@ -from Component import * - - -class State: - """ - Sub-scenes of role activities, responsible for storing the tasks that each role needs to do - """ - def __init__(self, **kwargs): - self.next_states = {} - self.name = kwargs["name"] - - self.environment_prompt = ( - kwargs["environment_prompt"] if "environment_prompt" in kwargs else "" - ) - - self.roles = kwargs["roles"] if "roles" in kwargs else (list(kwargs["agent_states"].keys()) if "agent_states" in kwargs else [0]) - if len(self.roles) == 0: - self.roles = [0] - self.begin_role = ( - kwargs["begin_role"] if "begin_role" in kwargs else self.roles[0] - ) - self.begin_query = kwargs["begin_query"] if "begin_query" in kwargs else None - - self.is_begin = True - - self.summary_prompt = ( - kwargs["summary_prompt"] if "summary_prompt" in kwargs else None - ) - self.current_role = self.begin_role - self.components = ( - self.init_components(kwargs["agent_states"]) - if "agent_states" in kwargs - else {} - ) - self.index = ( - self.roles.index(self.begin_role) if self.begin_role in self.roles else 0 - ) - self.chat_nums = 0 - - def init_components(self, agent_states_dict: dict): - agent_states = {} - for role, components in agent_states_dict.items(): - component_dict = {} - for component, component_args in components.items(): - if component: - # "role" "style" - if component == "style": - component_dict["style"] = StyleComponent(component_args["role"]) - - # "task" - elif component == "task": - component_dict["task"] = TaskComponent(component_args["task"]) - - # "rule" - elif component == "rule": - component_dict["rule"] = RuleComponent(component_args["rule"]) - - # "demonstration" - elif component == "demonstrations": - component_dict["demonstrations"] = DemonstrationComponent( - component_args["demonstrations"] - ) - - # "output" - elif component == "output": - component_dict["output"] = OutputComponent( - component_args["output"] - ) - - elif component == "last": - component_dict["last"] = LastComponent( - component_args["last_prompt"] - ) - - # "demonstrations" - elif component == "cot": - component_dict["cot"] = CoTComponent( - component_args["demonstrations"] - ) - elif component == "CustomizeComponent": - component_dict["CustomizeComponent"] = CustomizeComponent( - component_args["template"], component_args["keywords"] - ) - - elif component == "system" : - component_dict["system"] = SystemComponent( - component_args["system_prompt"] - ) - - # =================================================================================# - - # "output" - elif component == "StaticComponent": - component_dict["StaticComponent"] = StaticComponent( - component_args["output"] - ) - - # "top_k" "type" "knowledge_base" "system_prompt" "last_prompt" - elif component == "KnowledgeBaseComponent": - component_dict["tool"] = KnowledgeBaseComponent( - component_args["top_k"], - component_args["type"], - component_args["knowledge_path"], - ) - - elif component == "CategoryRequirementsComponent": - component_dict[ - "CategoryRequirementsComponent" - ] = CategoryRequirementsComponent( - component_args["information_path"] - ) - - elif component == "FunctionComponent": - component_dict["FunctionComponent"] = FunctionComponent(component_args[""]) - # "short_memory_extract_words" "long_memory_extract_words" "system_prompt" "last_prompt" - elif component == "ExtractComponent": - component_dict["ExtractComponent"] = ExtractComponent( - component_args["extract_words"], - component_args["system_prompt"], - component_args["last_prompt"], - ) - elif component == "WebSearchComponent": - component_dict["WebSearchComponent"] = WebSearchComponent( - component_args["engine_name"], component_args["api"] - ) - elif component == "WebCrawlComponent": - component_dict["WebCrawlComponent"] = WebCrawlComponent( - component_args["name"] - ) - - elif component == "CodeComponent": - component_dict["CodeComponent"] = CodeComponent( - component_args["file_name"], component_args["keyword"] - ) - - # ==================================================== - else: - continue - - agent_states[role] = component_dict - - return agent_states diff --git a/spaces/Abhilashvj/planogram-compliance/segment/train.py b/spaces/Abhilashvj/planogram-compliance/segment/train.py deleted file mode 100644 index e663e2c6e286259bf126399b495b1bfe508b592d..0000000000000000000000000000000000000000 --- a/spaces/Abhilashvj/planogram-compliance/segment/train.py +++ /dev/null @@ -1,1104 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Train a YOLOv5 segment model on a segment dataset -Models and datasets download automatically from the latest YOLOv5 release. - -Usage - Single-GPU training: - $ python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 # from pretrained (recommended) - $ python segment/train.py --data coco128-seg.yaml --weights '' --cfg yolov5s-seg.yaml --img 640 # from scratch - -Usage - Multi-GPU DDP training: - $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 - -Models: https://github.com/ultralytics/yolov5/tree/master/models -Datasets: https://github.com/ultralytics/yolov5/tree/master/data -Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data -""" - -import argparse -import math -import os -import random -import sys -import time -from copy import deepcopy -from datetime import datetime -from pathlib import Path - -import numpy as np -import torch -import torch.distributed as dist -import torch.nn as nn -import yaml -from torch.optim import lr_scheduler -from tqdm import tqdm - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -import segment.val as validate # for end-of-epoch mAP -from models.experimental import attempt_load -from models.yolo import SegmentationModel -from utils.autoanchor import check_anchors -from utils.autobatch import check_train_batch_size -from utils.callbacks import Callbacks -from utils.downloads import attempt_download, is_url -from utils.general import ( - LOGGER, - TQDM_BAR_FORMAT, - check_amp, - check_dataset, - check_file, - check_git_info, - check_git_status, - check_img_size, - check_requirements, - check_suffix, - check_yaml, - colorstr, - get_latest_run, - increment_path, - init_seeds, - intersect_dicts, - labels_to_class_weights, - labels_to_image_weights, - one_cycle, - print_args, - print_mutation, - strip_optimizer, - yaml_save, -) -from utils.loggers import GenericLogger -from utils.plots import plot_evolve, plot_labels -from utils.segment.dataloaders import create_dataloader -from utils.segment.loss import ComputeLoss -from utils.segment.metrics import KEYS, fitness -from utils.segment.plots import plot_images_and_masks, plot_results_with_masks -from utils.torch_utils import ( - EarlyStopping, - ModelEMA, - de_parallel, - select_device, - smart_DDP, - smart_optimizer, - smart_resume, - torch_distributed_zero_first, -) - -LOCAL_RANK = int( - os.getenv("LOCAL_RANK", -1) -) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv("RANK", -1)) -WORLD_SIZE = int(os.getenv("WORLD_SIZE", 1)) -GIT_INFO = check_git_info() - - -def train( - hyp, opt, device, callbacks -): # hyp is path/to/hyp.yaml or hyp dictionary - ( - save_dir, - epochs, - batch_size, - weights, - single_cls, - evolve, - data, - cfg, - resume, - noval, - nosave, - workers, - freeze, - mask_ratio, - ) = ( - Path(opt.save_dir), - opt.epochs, - opt.batch_size, - opt.weights, - opt.single_cls, - opt.evolve, - opt.data, - opt.cfg, - opt.resume, - opt.noval, - opt.nosave, - opt.workers, - opt.freeze, - opt.mask_ratio, - ) - # callbacks.run('on_pretrain_routine_start') - - # Directories - w = save_dir / "weights" # weights dir - (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir - last, best = w / "last.pt", w / "best.pt" - - # Hyperparameters - if isinstance(hyp, str): - with open(hyp, errors="ignore") as f: - hyp = yaml.safe_load(f) # load hyps dict - LOGGER.info( - colorstr("hyperparameters: ") - + ", ".join(f"{k}={v}" for k, v in hyp.items()) - ) - opt.hyp = hyp.copy() # for saving hyps to checkpoints - - # Save run settings - if not evolve: - yaml_save(save_dir / "hyp.yaml", hyp) - yaml_save(save_dir / "opt.yaml", vars(opt)) - - # Loggers - data_dict = None - if RANK in {-1, 0}: - logger = GenericLogger(opt=opt, console_logger=LOGGER) - - # Config - plots = not evolve and not opt.noplots # create plots - overlap = not opt.no_overlap - cuda = device.type != "cpu" - init_seeds(opt.seed + 1 + RANK, deterministic=True) - with torch_distributed_zero_first(LOCAL_RANK): - data_dict = data_dict or check_dataset(data) # check if None - train_path, val_path = data_dict["train"], data_dict["val"] - nc = 1 if single_cls else int(data_dict["nc"]) # number of classes - names = ( - {0: "item"} - if single_cls and len(data_dict["names"]) != 1 - else data_dict["names"] - ) # class names - is_coco = isinstance(val_path, str) and val_path.endswith( - "coco/val2017.txt" - ) # COCO dataset - - # Model - check_suffix(weights, ".pt") # check weights - pretrained = weights.endswith(".pt") - if pretrained: - with torch_distributed_zero_first(LOCAL_RANK): - weights = attempt_download( - weights - ) # download if not found locally - ckpt = torch.load( - weights, map_location="cpu" - ) # load checkpoint to CPU to avoid CUDA memory leak - model = SegmentationModel( - cfg or ckpt["model"].yaml, ch=3, nc=nc, anchors=hyp.get("anchors") - ).to(device) - exclude = ( - ["anchor"] if (cfg or hyp.get("anchors")) and not resume else [] - ) # exclude keys - csd = ( - ckpt["model"].float().state_dict() - ) # checkpoint state_dict as FP32 - csd = intersect_dicts( - csd, model.state_dict(), exclude=exclude - ) # intersect - model.load_state_dict(csd, strict=False) # load - LOGGER.info( - f"Transferred {len(csd)}/{len(model.state_dict())} items from {weights}" - ) # report - else: - model = SegmentationModel( - cfg, ch=3, nc=nc, anchors=hyp.get("anchors") - ).to( - device - ) # create - amp = check_amp(model) # check AMP - - # Freeze - freeze = [ - f"model.{x}." - for x in (freeze if len(freeze) > 1 else range(freeze[0])) - ] # layers to freeze - for k, v in model.named_parameters(): - v.requires_grad = True # train all layers - # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) - if any(x in k for x in freeze): - LOGGER.info(f"freezing {k}") - v.requires_grad = False - - # Image size - gs = max(int(model.stride.max()), 32) # grid size (max stride) - imgsz = check_img_size( - opt.imgsz, gs, floor=gs * 2 - ) # verify imgsz is gs-multiple - - # Batch size - if ( - RANK == -1 and batch_size == -1 - ): # single-GPU only, estimate best batch size - batch_size = check_train_batch_size(model, imgsz, amp) - logger.update_params({"batch_size": batch_size}) - # loggers.on_params_update({"batch_size": batch_size}) - - # Optimizer - nbs = 64 # nominal batch size - accumulate = max( - round(nbs / batch_size), 1 - ) # accumulate loss before optimizing - hyp["weight_decay"] *= batch_size * accumulate / nbs # scale weight_decay - optimizer = smart_optimizer( - model, opt.optimizer, hyp["lr0"], hyp["momentum"], hyp["weight_decay"] - ) - - # Scheduler - if opt.cos_lr: - lf = one_cycle(1, hyp["lrf"], epochs) # cosine 1->hyp['lrf'] - else: - lf = ( - lambda x: (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] - ) # linear - scheduler = lr_scheduler.LambdaLR( - optimizer, lr_lambda=lf - ) # plot_lr_scheduler(optimizer, scheduler, epochs) - - # EMA - ema = ModelEMA(model) if RANK in {-1, 0} else None - - # Resume - best_fitness, start_epoch = 0.0, 0 - if pretrained: - if resume: - best_fitness, start_epoch, epochs = smart_resume( - ckpt, optimizer, ema, weights, epochs, resume - ) - del ckpt, csd - - # DP mode - if cuda and RANK == -1 and torch.cuda.device_count() > 1: - LOGGER.warning( - "WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n" - "See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started." - ) - model = torch.nn.DataParallel(model) - - # SyncBatchNorm - if opt.sync_bn and cuda and RANK != -1: - model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) - LOGGER.info("Using SyncBatchNorm()") - - # Trainloader - train_loader, dataset = create_dataloader( - train_path, - imgsz, - batch_size // WORLD_SIZE, - gs, - single_cls, - hyp=hyp, - augment=True, - cache=None if opt.cache == "val" else opt.cache, - rect=opt.rect, - rank=LOCAL_RANK, - workers=workers, - image_weights=opt.image_weights, - quad=opt.quad, - prefix=colorstr("train: "), - shuffle=True, - mask_downsample_ratio=mask_ratio, - overlap_mask=overlap, - ) - labels = np.concatenate(dataset.labels, 0) - mlc = int(labels[:, 0].max()) # max label class - assert ( - mlc < nc - ), f"Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}" - - # Process 0 - if RANK in {-1, 0}: - val_loader = create_dataloader( - val_path, - imgsz, - batch_size // WORLD_SIZE * 2, - gs, - single_cls, - hyp=hyp, - cache=None if noval else opt.cache, - rect=True, - rank=-1, - workers=workers * 2, - pad=0.5, - mask_downsample_ratio=mask_ratio, - overlap_mask=overlap, - prefix=colorstr("val: "), - )[0] - - if not resume: - if not opt.noautoanchor: - check_anchors( - dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz - ) # run AutoAnchor - model.half().float() # pre-reduce anchor precision - - if plots: - plot_labels(labels, names, save_dir) - # callbacks.run('on_pretrain_routine_end', labels, names) - - # DDP mode - if cuda and RANK != -1: - model = smart_DDP(model) - - # Model attributes - nl = ( - de_parallel(model).model[-1].nl - ) # number of detection layers (to scale hyps) - hyp["box"] *= 3 / nl # scale to layers - hyp["cls"] *= nc / 80 * 3 / nl # scale to classes and layers - hyp["obj"] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers - hyp["label_smoothing"] = opt.label_smoothing - model.nc = nc # attach number of classes to model - model.hyp = hyp # attach hyperparameters to model - model.class_weights = ( - labels_to_class_weights(dataset.labels, nc).to(device) * nc - ) # attach class weights - model.names = names - - # Start training - t0 = time.time() - nb = len(train_loader) # number of batches - nw = max( - round(hyp["warmup_epochs"] * nb), 100 - ) # number of warmup iterations, max(3 epochs, 100 iterations) - # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training - last_opt_step = -1 - maps = np.zeros(nc) # mAP per class - results = ( - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - ) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) - scheduler.last_epoch = start_epoch - 1 # do not move - scaler = torch.cuda.amp.GradScaler(enabled=amp) - stopper, stop = EarlyStopping(patience=opt.patience), False - compute_loss = ComputeLoss(model, overlap=overlap) # init loss class - # callbacks.run('on_train_start') - LOGGER.info( - f"Image sizes {imgsz} train, {imgsz} val\n" - f"Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n" - f"Logging results to {colorstr('bold', save_dir)}\n" - f"Starting training for {epochs} epochs..." - ) - for epoch in range( - start_epoch, epochs - ): # epoch ------------------------------------------------------------------ - # callbacks.run('on_train_epoch_start') - model.train() - - # Update image weights (optional, single-GPU only) - if opt.image_weights: - cw = ( - model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc - ) # class weights - iw = labels_to_image_weights( - dataset.labels, nc=nc, class_weights=cw - ) # image weights - dataset.indices = random.choices( - range(dataset.n), weights=iw, k=dataset.n - ) # rand weighted idx - - # Update mosaic border (optional) - # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) - # dataset.mosaic_border = [b - imgsz, -b] # height, width borders - - mloss = torch.zeros(4, device=device) # mean losses - if RANK != -1: - train_loader.sampler.set_epoch(epoch) - pbar = enumerate(train_loader) - LOGGER.info( - ("\n" + "%11s" * 8) - % ( - "Epoch", - "GPU_mem", - "box_loss", - "seg_loss", - "obj_loss", - "cls_loss", - "Instances", - "Size", - ) - ) - if RANK in {-1, 0}: - pbar = tqdm( - pbar, total=nb, bar_format=TQDM_BAR_FORMAT - ) # progress bar - optimizer.zero_grad() - for i, ( - imgs, - targets, - paths, - _, - masks, - ) in ( - pbar - ): # batch ------------------------------------------------------ - # callbacks.run('on_train_batch_start') - ni = ( - i + nb * epoch - ) # number integrated batches (since train start) - imgs = ( - imgs.to(device, non_blocking=True).float() / 255 - ) # uint8 to float32, 0-255 to 0.0-1.0 - - # Warmup - if ni <= nw: - xi = [0, nw] # x interp - # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) - accumulate = max( - 1, np.interp(ni, xi, [1, nbs / batch_size]).round() - ) - for j, x in enumerate(optimizer.param_groups): - # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 - x["lr"] = np.interp( - ni, - xi, - [ - hyp["warmup_bias_lr"] if j == 0 else 0.0, - x["initial_lr"] * lf(epoch), - ], - ) - if "momentum" in x: - x["momentum"] = np.interp( - ni, xi, [hyp["warmup_momentum"], hyp["momentum"]] - ) - - # Multi-scale - if opt.multi_scale: - sz = ( - random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs - ) # size - sf = sz / max(imgs.shape[2:]) # scale factor - if sf != 1: - ns = [ - math.ceil(x * sf / gs) * gs for x in imgs.shape[2:] - ] # new shape (stretched to gs-multiple) - imgs = nn.functional.interpolate( - imgs, size=ns, mode="bilinear", align_corners=False - ) - - # Forward - with torch.cuda.amp.autocast(amp): - pred = model(imgs) # forward - loss, loss_items = compute_loss( - pred, targets.to(device), masks=masks.to(device).float() - ) - if RANK != -1: - loss *= WORLD_SIZE # gradient averaged between devices in DDP mode - if opt.quad: - loss *= 4.0 - - # Backward - scaler.scale(loss).backward() - - # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html - if ni - last_opt_step >= accumulate: - scaler.unscale_(optimizer) # unscale gradients - torch.nn.utils.clip_grad_norm_( - model.parameters(), max_norm=10.0 - ) # clip gradients - scaler.step(optimizer) # optimizer.step - scaler.update() - optimizer.zero_grad() - if ema: - ema.update(model) - last_opt_step = ni - - # Log - if RANK in {-1, 0}: - mloss = (mloss * i + loss_items) / ( - i + 1 - ) # update mean losses - mem = f"{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G" # (GB) - pbar.set_description( - ("%11s" * 2 + "%11.4g" * 6) - % ( - f"{epoch}/{epochs - 1}", - mem, - *mloss, - targets.shape[0], - imgs.shape[-1], - ) - ) - # callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths) - # if callbacks.stop_training: - # return - - # Mosaic plots - if plots: - if ni < 3: - plot_images_and_masks( - imgs, - targets, - masks, - paths, - save_dir / f"train_batch{ni}.jpg", - ) - if ni == 10: - files = sorted(save_dir.glob("train*.jpg")) - logger.log_images(files, "Mosaics", epoch) - # end batch ------------------------------------------------------------------------------------------------ - - # Scheduler - lr = [x["lr"] for x in optimizer.param_groups] # for loggers - scheduler.step() - - if RANK in {-1, 0}: - # mAP - # callbacks.run('on_train_epoch_end', epoch=epoch) - ema.update_attr( - model, - include=[ - "yaml", - "nc", - "hyp", - "names", - "stride", - "class_weights", - ], - ) - final_epoch = (epoch + 1 == epochs) or stopper.possible_stop - if not noval or final_epoch: # Calculate mAP - results, maps, _ = validate.run( - data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz, - half=amp, - model=ema.ema, - single_cls=single_cls, - dataloader=val_loader, - save_dir=save_dir, - plots=False, - callbacks=callbacks, - compute_loss=compute_loss, - mask_downsample_ratio=mask_ratio, - overlap=overlap, - ) - - # Update best mAP - fi = fitness( - np.array(results).reshape(1, -1) - ) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] - stop = stopper(epoch=epoch, fitness=fi) # early stop check - if fi > best_fitness: - best_fitness = fi - log_vals = list(mloss) + list(results) + lr - # callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) - # Log val metrics and media - metrics_dict = dict(zip(KEYS, log_vals)) - logger.log_metrics(metrics_dict, epoch) - - # Save model - if (not nosave) or (final_epoch and not evolve): # if save - ckpt = { - "epoch": epoch, - "best_fitness": best_fitness, - "model": deepcopy(de_parallel(model)).half(), - "ema": deepcopy(ema.ema).half(), - "updates": ema.updates, - "optimizer": optimizer.state_dict(), - "opt": vars(opt), - "git": GIT_INFO, # {remote, branch, commit} if a git repo - "date": datetime.now().isoformat(), - } - - # Save last, best and delete - torch.save(ckpt, last) - if best_fitness == fi: - torch.save(ckpt, best) - if opt.save_period > 0 and epoch % opt.save_period == 0: - torch.save(ckpt, w / f"epoch{epoch}.pt") - logger.log_model(w / f"epoch{epoch}.pt") - del ckpt - # callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) - - # EarlyStopping - if RANK != -1: # if DDP training - broadcast_list = [stop if RANK == 0 else None] - dist.broadcast_object_list( - broadcast_list, 0 - ) # broadcast 'stop' to all ranks - if RANK != 0: - stop = broadcast_list[0] - if stop: - break # must break all DDP ranks - - # end epoch ---------------------------------------------------------------------------------------------------- - # end training ----------------------------------------------------------------------------------------------------- - if RANK in {-1, 0}: - LOGGER.info( - f"\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours." - ) - for f in last, best: - if f.exists(): - strip_optimizer(f) # strip optimizers - if f is best: - LOGGER.info(f"\nValidating {f}...") - results, _, _ = validate.run( - data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz, - model=attempt_load(f, device).half(), - iou_thres=0.65 - if is_coco - else 0.60, # best pycocotools at iou 0.65 - single_cls=single_cls, - dataloader=val_loader, - save_dir=save_dir, - save_json=is_coco, - verbose=True, - plots=plots, - callbacks=callbacks, - compute_loss=compute_loss, - mask_downsample_ratio=mask_ratio, - overlap=overlap, - ) # val best model with plots - if is_coco: - # callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) - metrics_dict = dict( - zip(KEYS, list(mloss) + list(results) + lr) - ) - logger.log_metrics(metrics_dict, epoch) - - # callbacks.run('on_train_end', last, best, epoch, results) - # on train end callback using genericLogger - logger.log_metrics(dict(zip(KEYS[4:16], results)), epochs) - if not opt.evolve: - logger.log_model(best, epoch) - if plots: - plot_results_with_masks( - file=save_dir / "results.csv" - ) # save results.png - files = [ - "results.png", - "confusion_matrix.png", - *(f"{x}_curve.png" for x in ("F1", "PR", "P", "R")), - ] - files = [ - (save_dir / f) for f in files if (save_dir / f).exists() - ] # filter - LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") - logger.log_images(files, "Results", epoch + 1) - logger.log_images( - sorted(save_dir.glob("val*.jpg")), "Validation", epoch + 1 - ) - torch.cuda.empty_cache() - return results - - -def parse_opt(known=False): - parser = argparse.ArgumentParser() - parser.add_argument( - "--weights", - type=str, - default=ROOT / "yolov5s-seg.pt", - help="initial weights path", - ) - parser.add_argument("--cfg", type=str, default="", help="model.yaml path") - parser.add_argument( - "--data", - type=str, - default=ROOT / "data/coco128-seg.yaml", - help="dataset.yaml path", - ) - parser.add_argument( - "--hyp", - type=str, - default=ROOT / "data/hyps/hyp.scratch-low.yaml", - help="hyperparameters path", - ) - parser.add_argument( - "--epochs", type=int, default=100, help="total training epochs" - ) - parser.add_argument( - "--batch-size", - type=int, - default=16, - help="total batch size for all GPUs, -1 for autobatch", - ) - parser.add_argument( - "--imgsz", - "--img", - "--img-size", - type=int, - default=640, - help="train, val image size (pixels)", - ) - parser.add_argument( - "--rect", action="store_true", help="rectangular training" - ) - parser.add_argument( - "--resume", - nargs="?", - const=True, - default=False, - help="resume most recent training", - ) - parser.add_argument( - "--nosave", action="store_true", help="only save final checkpoint" - ) - parser.add_argument( - "--noval", action="store_true", help="only validate final epoch" - ) - parser.add_argument( - "--noautoanchor", action="store_true", help="disable AutoAnchor" - ) - parser.add_argument( - "--noplots", action="store_true", help="save no plot files" - ) - parser.add_argument( - "--evolve", - type=int, - nargs="?", - const=300, - help="evolve hyperparameters for x generations", - ) - parser.add_argument("--bucket", type=str, default="", help="gsutil bucket") - parser.add_argument( - "--cache", - type=str, - nargs="?", - const="ram", - help="image --cache ram/disk", - ) - parser.add_argument( - "--image-weights", - action="store_true", - help="use weighted image selection for training", - ) - parser.add_argument( - "--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu" - ) - parser.add_argument( - "--multi-scale", action="store_true", help="vary img-size +/- 50%%" - ) - parser.add_argument( - "--single-cls", - action="store_true", - help="train multi-class data as single-class", - ) - parser.add_argument( - "--optimizer", - type=str, - choices=["SGD", "Adam", "AdamW"], - default="SGD", - help="optimizer", - ) - parser.add_argument( - "--sync-bn", - action="store_true", - help="use SyncBatchNorm, only available in DDP mode", - ) - parser.add_argument( - "--workers", - type=int, - default=8, - help="max dataloader workers (per RANK in DDP mode)", - ) - parser.add_argument( - "--project", - default=ROOT / "runs/train-seg", - help="save to project/name", - ) - parser.add_argument("--name", default="exp", help="save to project/name") - parser.add_argument( - "--exist-ok", - action="store_true", - help="existing project/name ok, do not increment", - ) - parser.add_argument("--quad", action="store_true", help="quad dataloader") - parser.add_argument( - "--cos-lr", action="store_true", help="cosine LR scheduler" - ) - parser.add_argument( - "--label-smoothing", - type=float, - default=0.0, - help="Label smoothing epsilon", - ) - parser.add_argument( - "--patience", - type=int, - default=100, - help="EarlyStopping patience (epochs without improvement)", - ) - parser.add_argument( - "--freeze", - nargs="+", - type=int, - default=[0], - help="Freeze layers: backbone=10, first3=0 1 2", - ) - parser.add_argument( - "--save-period", - type=int, - default=-1, - help="Save checkpoint every x epochs (disabled if < 1)", - ) - parser.add_argument( - "--seed", type=int, default=0, help="Global training seed" - ) - parser.add_argument( - "--local_rank", - type=int, - default=-1, - help="Automatic DDP Multi-GPU argument, do not modify", - ) - - # Instance Segmentation Args - parser.add_argument( - "--mask-ratio", - type=int, - default=4, - help="Downsample the truth masks to saving memory", - ) - parser.add_argument( - "--no-overlap", - action="store_true", - help="Overlap masks train faster at slightly less mAP", - ) - - return parser.parse_known_args()[0] if known else parser.parse_args() - - -def main(opt, callbacks=Callbacks()): - # Checks - if RANK in {-1, 0}: - print_args(vars(opt)) - check_git_status() - check_requirements() - - # Resume - if ( - opt.resume and not opt.evolve - ): # resume from specified or most recent last.pt - last = Path( - check_file(opt.resume) - if isinstance(opt.resume, str) - else get_latest_run() - ) - opt_yaml = last.parent.parent / "opt.yaml" # train options yaml - opt_data = opt.data # original dataset - if opt_yaml.is_file(): - with open(opt_yaml, errors="ignore") as f: - d = yaml.safe_load(f) - else: - d = torch.load(last, map_location="cpu")["opt"] - opt = argparse.Namespace(**d) # replace - opt.cfg, opt.weights, opt.resume = "", str(last), True # reinstate - if is_url(opt_data): - opt.data = check_file(opt_data) # avoid HUB resume auth timeout - else: - opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = ( - check_file(opt.data), - check_yaml(opt.cfg), - check_yaml(opt.hyp), - str(opt.weights), - str(opt.project), - ) # checks - assert len(opt.cfg) or len( - opt.weights - ), "either --cfg or --weights must be specified" - if opt.evolve: - if opt.project == str( - ROOT / "runs/train" - ): # if default project name, rename to runs/evolve - opt.project = str(ROOT / "runs/evolve") - opt.exist_ok, opt.resume = ( - opt.resume, - False, - ) # pass resume to exist_ok and disable resume - if opt.name == "cfg": - opt.name = Path(opt.cfg).stem # use model.yaml as name - opt.save_dir = str( - increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) - ) - - # DDP mode - device = select_device(opt.device, batch_size=opt.batch_size) - if LOCAL_RANK != -1: - msg = "is not compatible with YOLOv5 Multi-GPU DDP training" - assert not opt.image_weights, f"--image-weights {msg}" - assert not opt.evolve, f"--evolve {msg}" - assert ( - opt.batch_size != -1 - ), f"AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size" - assert ( - opt.batch_size % WORLD_SIZE == 0 - ), f"--batch-size {opt.batch_size} must be multiple of WORLD_SIZE" - assert ( - torch.cuda.device_count() > LOCAL_RANK - ), "insufficient CUDA devices for DDP command" - torch.cuda.set_device(LOCAL_RANK) - device = torch.device("cuda", LOCAL_RANK) - dist.init_process_group( - backend="nccl" if dist.is_nccl_available() else "gloo" - ) - - # Train - if not opt.evolve: - train(opt.hyp, opt, device, callbacks) - - # Evolve hyperparameters (optional) - else: - # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) - meta = { - "lr0": ( - 1, - 1e-5, - 1e-1, - ), # initial learning rate (SGD=1E-2, Adam=1E-3) - "lrf": ( - 1, - 0.01, - 1.0, - ), # final OneCycleLR learning rate (lr0 * lrf) - "momentum": (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 - "weight_decay": (1, 0.0, 0.001), # optimizer weight decay - "warmup_epochs": (1, 0.0, 5.0), # warmup epochs (fractions ok) - "warmup_momentum": (1, 0.0, 0.95), # warmup initial momentum - "warmup_bias_lr": (1, 0.0, 0.2), # warmup initial bias lr - "box": (1, 0.02, 0.2), # box loss gain - "cls": (1, 0.2, 4.0), # cls loss gain - "cls_pw": (1, 0.5, 2.0), # cls BCELoss positive_weight - "obj": (1, 0.2, 4.0), # obj loss gain (scale with pixels) - "obj_pw": (1, 0.5, 2.0), # obj BCELoss positive_weight - "iou_t": (0, 0.1, 0.7), # IoU training threshold - "anchor_t": (1, 2.0, 8.0), # anchor-multiple threshold - "anchors": (2, 2.0, 10.0), # anchors per output grid (0 to ignore) - "fl_gamma": ( - 0, - 0.0, - 2.0, - ), # focal loss gamma (efficientDet default gamma=1.5) - "hsv_h": (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) - "hsv_s": ( - 1, - 0.0, - 0.9, - ), # image HSV-Saturation augmentation (fraction) - "hsv_v": (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) - "degrees": (1, 0.0, 45.0), # image rotation (+/- deg) - "translate": (1, 0.0, 0.9), # image translation (+/- fraction) - "scale": (1, 0.0, 0.9), # image scale (+/- gain) - "shear": (1, 0.0, 10.0), # image shear (+/- deg) - "perspective": ( - 0, - 0.0, - 0.001, - ), # image perspective (+/- fraction), range 0-0.001 - "flipud": (1, 0.0, 1.0), # image flip up-down (probability) - "fliplr": (0, 0.0, 1.0), # image flip left-right (probability) - "mosaic": (1, 0.0, 1.0), # image mixup (probability) - "mixup": (1, 0.0, 1.0), # image mixup (probability) - "copy_paste": (1, 0.0, 1.0), - } # segment copy-paste (probability) - - with open(opt.hyp, errors="ignore") as f: - hyp = yaml.safe_load(f) # load hyps dict - if "anchors" not in hyp: # anchors commented in hyp.yaml - hyp["anchors"] = 3 - if opt.noautoanchor: - del hyp["anchors"], meta["anchors"] - opt.noval, opt.nosave, save_dir = ( - True, - True, - Path(opt.save_dir), - ) # only val/save final epoch - # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices - evolve_yaml, evolve_csv = ( - save_dir / "hyp_evolve.yaml", - save_dir / "evolve.csv", - ) - if opt.bucket: - os.system( - f"gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}" - ) # download evolve.csv if exists - - for _ in range(opt.evolve): # generations to evolve - if ( - evolve_csv.exists() - ): # if evolve.csv exists: select best hyps and mutate - # Select parent(s) - parent = ( - "single" # parent selection method: 'single' or 'weighted' - ) - x = np.loadtxt(evolve_csv, ndmin=2, delimiter=",", skiprows=1) - n = min(5, len(x)) # number of previous results to consider - x = x[np.argsort(-fitness(x))][:n] # top n mutations - w = fitness(x) - fitness(x).min() + 1e-6 # weights (sum > 0) - if parent == "single" or len(x) == 1: - # x = x[random.randint(0, n - 1)] # random selection - x = x[ - random.choices(range(n), weights=w)[0] - ] # weighted selection - elif parent == "weighted": - x = (x * w.reshape(n, 1)).sum( - 0 - ) / w.sum() # weighted combination - - # Mutate - mp, s = 0.8, 0.2 # mutation probability, sigma - npr = np.random - npr.seed(int(time.time())) - g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 - ng = len(meta) - v = np.ones(ng) - while all( - v == 1 - ): # mutate until a change occurs (prevent duplicates) - v = ( - g - * (npr.random(ng) < mp) - * npr.randn(ng) - * npr.random() - * s - + 1 - ).clip(0.3, 3.0) - for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) - hyp[k] = float(x[i + 7] * v[i]) # mutate - - # Constrain to limits - for k, v in meta.items(): - hyp[k] = max(hyp[k], v[1]) # lower limit - hyp[k] = min(hyp[k], v[2]) # upper limit - hyp[k] = round(hyp[k], 5) # significant digits - - # Train mutation - results = train(hyp.copy(), opt, device, callbacks) - callbacks = Callbacks() - # Write mutation results - print_mutation(KEYS, results, hyp.copy(), save_dir, opt.bucket) - - # Plot results - plot_evolve(evolve_csv) - LOGGER.info( - f"Hyperparameter evolution finished {opt.evolve} generations\n" - f"Results saved to {colorstr('bold', save_dir)}\n" - f"Usage example: $ python train.py --hyp {evolve_yaml}" - ) - - -def run(**kwargs): - # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') - opt = parse_opt(True) - for k, v in kwargs.items(): - setattr(opt, k, v) - main(opt) - return opt - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/server/auth.ts b/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/server/auth.ts deleted file mode 100644 index 96793da575f6f01695d714a71d45ee181b9e1964..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/server/auth.ts +++ /dev/null @@ -1,118 +0,0 @@ -import { Issuer, BaseClient, type UserinfoResponse, TokenSet } from "openid-client"; -import { addHours, addYears } from "date-fns"; -import { - COOKIE_NAME, - OPENID_CLIENT_ID, - OPENID_CLIENT_SECRET, - OPENID_PROVIDER_URL, - OPENID_SCOPES, -} from "$env/static/private"; -import { sha256 } from "$lib/utils/sha256"; -import { z } from "zod"; -import { dev } from "$app/environment"; -import type { Cookies } from "@sveltejs/kit"; - -export interface OIDCSettings { - redirectURI: string; -} - -export interface OIDCUserInfo { - token: TokenSet; - userData: UserinfoResponse; -} - -export const requiresUser = !!OPENID_CLIENT_ID && !!OPENID_CLIENT_SECRET; - -export function refreshSessionCookie(cookies: Cookies, sessionId: string) { - cookies.set(COOKIE_NAME, sessionId, { - path: "/", - // So that it works inside the space's iframe - sameSite: dev ? "lax" : "none", - secure: !dev, - httpOnly: true, - expires: addYears(new Date(), 1), - }); -} - -export const authCondition = (locals: App.Locals) => { - return locals.user - ? { userId: locals.user._id } - : { sessionId: locals.sessionId, userId: { $exists: false } }; -}; - -/** - * Generates a CSRF token using the user sessionId. Note that we don't need a secret because sessionId is enough. - */ -export async function generateCsrfToken(sessionId: string, redirectUrl: string): Promise { - const data = { - expiration: addHours(new Date(), 1).getTime(), - redirectUrl, - }; - - return Buffer.from( - JSON.stringify({ - data, - signature: await sha256(JSON.stringify(data) + "##" + sessionId), - }) - ).toString("base64"); -} - -async function getOIDCClient(settings: OIDCSettings): Promise { - const issuer = await Issuer.discover(OPENID_PROVIDER_URL); - return new issuer.Client({ - client_id: OPENID_CLIENT_ID, - client_secret: OPENID_CLIENT_SECRET, - redirect_uris: [settings.redirectURI], - response_types: ["code"], - }); -} - -export async function getOIDCAuthorizationUrl( - settings: OIDCSettings, - params: { sessionId: string } -): Promise { - const client = await getOIDCClient(settings); - const csrfToken = await generateCsrfToken(params.sessionId, settings.redirectURI); - const url = client.authorizationUrl({ - scope: OPENID_SCOPES, - state: csrfToken, - }); - - return url; -} - -export async function getOIDCUserData(settings: OIDCSettings, code: string): Promise { - const client = await getOIDCClient(settings); - const token = await client.callback(settings.redirectURI, { code }); - const userData = await client.userinfo(token); - - return { token, userData }; -} - -export async function validateAndParseCsrfToken( - token: string, - sessionId: string -): Promise<{ - /** This is the redirect url that was passed to the OIDC provider */ - redirectUrl: string; -} | null> { - try { - const { data, signature } = z - .object({ - data: z.object({ - expiration: z.number().int(), - redirectUrl: z.string().url(), - }), - signature: z.string().length(64), - }) - .parse(JSON.parse(token)); - const reconstructSign = await sha256(JSON.stringify(data) + "##" + sessionId); - - if (data.expiration > Date.now() && signature === reconstructSign) { - return { redirectUrl: data.redirectUrl }; - } - } catch (e) { - console.error(e); - } - return null; -} diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/Wuguokai.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/Wuguokai.py deleted file mode 100644 index 311131cf32c3d603761423664bf4552d061e51a6..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/Wuguokai.py +++ /dev/null @@ -1,63 +0,0 @@ -from __future__ import annotations - -import random - -import requests - -from ...typing import Any, CreateResult -from ..base_provider import BaseProvider, format_prompt - - -class Wuguokai(BaseProvider): - url = 'https://chat.wuguokai.xyz' - supports_gpt_35_turbo = True - working = False - - @staticmethod - def create_completion( - model: str, - messages: list[dict[str, str]], - stream: bool, - **kwargs: Any, - ) -> CreateResult: - headers = { - 'authority': 'ai-api.wuguokai.xyz', - 'accept': 'application/json, text/plain, */*', - 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', - 'content-type': 'application/json', - 'origin': 'https://chat.wuguokai.xyz', - 'referer': 'https://chat.wuguokai.xyz/', - 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"Windows"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-site', - 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36' - } - data ={ - "prompt": format_prompt(messages), - "options": {}, - "userId": f"#/chat/{random.randint(1,99999999)}", - "usingContext": True - } - response = requests.post("https://ai-api20.wuguokai.xyz/api/chat-process", headers=headers, timeout=3, json=data, proxies=kwargs['proxy'] if 'proxy' in kwargs else {}) - _split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试") - if response.status_code == 200: - if len(_split) > 1: - yield _split[1].strip() - else: - yield _split[0].strip() - else: - raise Exception(f"Error: {response.status_code} {response.reason}") - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool") - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/spaces/Aer0xander/sd-to-diffusers/hf_utils.py b/spaces/Aer0xander/sd-to-diffusers/hf_utils.py deleted file mode 100644 index 269eb5e9755b1af33e677977cb657f34689e11d5..0000000000000000000000000000000000000000 --- a/spaces/Aer0xander/sd-to-diffusers/hf_utils.py +++ /dev/null @@ -1,50 +0,0 @@ -from huggingface_hub import get_hf_file_metadata, hf_hub_url, hf_hub_download, scan_cache_dir, whoami, list_models - - -def get_my_model_names(token): - - try: - author = whoami(token=token) - model_infos = list_models(author=author["name"], use_auth_token=token) - return [model.modelId for model in model_infos], None - - except Exception as e: - return [], e - -def download_file(repo_id: str, filename: str, token: str): - """Download a file from a repo on the Hugging Face Hub. - - Returns: - file_path (:obj:`str`): The path to the downloaded file. - revision (:obj:`str`): The commit hash of the file. - """ - - md = get_hf_file_metadata(hf_hub_url(repo_id=repo_id, filename=filename), token=token) - revision = md.commit_hash - - file_path = hf_hub_download(repo_id=repo_id, filename=filename, revision=revision, token=token) - - return file_path, revision - -def delete_file(revision: str): - """Delete a file from local cache. - - Args: - revision (:obj:`str`): The commit hash of the file. - Returns: - None - """ - scan_cache_dir().delete_revisions(revision).execute() - -def get_pr_url(api, repo_id, title): - try: - discussions = api.get_repo_discussions(repo_id=repo_id) - except Exception: - return None - for discussion in discussions: - if ( - discussion.status == "open" - and discussion.is_pull_request - and discussion.title == title - ): - return f"https://huggingface.co/{repo_id}/discussions/{discussion.num}" \ No newline at end of file diff --git a/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/longcode/prod_cons.h b/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/longcode/prod_cons.h deleted file mode 100644 index c9004bb8043a12e32814436baa6262a00c8ef68e..0000000000000000000000000000000000000000 --- a/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/longcode/prod_cons.h +++ /dev/null @@ -1,433 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -#include "libipc/def.h" - -#include "libipc/platform/detail.h" -#include "libipc/circ/elem_def.h" -#include "libipc/utility/log.h" -#include "libipc/utility/utility.h" - -namespace ipc { - -//////////////////////////////////////////////////////////////// -/// producer-consumer implementation -//////////////////////////////////////////////////////////////// - -template -struct prod_cons_impl; - -template <> -struct prod_cons_impl> { - - template - struct elem_t { - std::aligned_storage_t data_ {}; - }; - - alignas(cache_line_size) std::atomic rd_; // read index - alignas(cache_line_size) std::atomic wt_; // write index - - constexpr circ::u2_t cursor() const noexcept { - return 0; - } - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed)); - if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) { - return false; // full - } - std::forward(f)(&(elems[cur_wt].data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - /** - * In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'. - * So we could just disconnect all connections of receiver, and return false. - */ - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(~static_cast(0u)); - return false; - } - - template - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed)); - if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::forward(f)(&(elems[cur_rd].data_)); - std::forward(out)(true); - rd_.fetch_add(1, std::memory_order_release); - return true; - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - if (circ::index_of(cur_rd) == - circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - using flag_t = std::uint64_t; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - circ::u2_t cur_ct, nxt_ct; - for (unsigned k = 0;;) { - cur_ct = ct_.load(std::memory_order_relaxed); - if (circ::index_of(nxt_ct = cur_ct + 1) == - circ::index_of(rd_.load(std::memory_order_acquire))) { - return false; // full - } - if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - auto* el = elems + circ::index_of(cur_ct); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - while (1) { - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if (cur_ct != wt_.load(std::memory_order_relaxed)) { - return true; - } - if ((~cac_ct) != cur_ct) { - return true; - } - if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) { - return true; - } - wt_.store(nxt_ct, std::memory_order_release); - cur_ct = nxt_ct; - nxt_ct = cur_ct + 1; - el = elems + circ::index_of(cur_ct); - } - return true; - } - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - auto cur_wt = wt_.load(std::memory_order_acquire); - auto id_rd = circ::index_of(cur_rd); - auto id_wt = circ::index_of(cur_wt); - if (id_rd == id_wt) { - auto* el = elems + id_wt; - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if ((~cac_ct) != cur_wt) { - return false; // empty - } - if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) { - wt_.store(cur_wt + 1, std::memory_order_release); - } - k = 0; - } - else { - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - - enum : rc_t { - ep_mask = 0x00000000ffffffffull, - ep_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - }; - - alignas(cache_line_size) std::atomic wt_; // write index - alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer - - circ::u2_t cursor() const noexcept { - return wt_.load(std::memory_order_acquire); - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) { - return false; // has not finished yet - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - epoch_ += ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) { - if (cur == cursor()) return false; // acquire - auto* el = elems + circ::index_of(cur++); - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & ep_mask) == 0) { - std::forward(out)(true); - return true; - } - auto nxt_rc = cur_rc & ~static_cast(wrapper->connected_id()); - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)((nxt_rc & ep_mask) == 0); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - using flag_t = std::uint64_t; - - enum : rc_t { - rc_mask = 0x00000000ffffffffull, - ep_mask = 0x00ffffffffffffffull, - ep_incr = 0x0100000000000000ull, - ic_mask = 0xff000000ffffffffull, - ic_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - alignas(cache_line_size) std::atomic epoch_ { 0 }; - - circ::u2_t cursor() const noexcept { - return ct_.load(std::memory_order_acquire); - } - - constexpr static rc_t inc_rc(rc_t rc) noexcept { - return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask); - } - - constexpr static rc_t inc_mask(rc_t rc) noexcept { - return inc_rc(rc) & ~rc_mask; - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.load(std::memory_order_acquire); - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_relaxed); - circ::cc_t rem_cc = cur_rc & rc_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) { - return false; // has not finished yet - } - else if (!rem_cc) { - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if ((cur_fl != cur_ct) && cur_fl) { - return false; // full - } - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed) && - epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & rc_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed)) { - if (epoch == epoch_.load(std::memory_order_acquire)) { - break; - } - else if (push(wrapper, std::forward(f), elems)) { - return true; - } - epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) { - auto* el = elems + circ::index_of(cur); - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if (cur_fl != ~static_cast(cur)) { - return false; // empty - } - ++cur; - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & rc_mask) == 0) { - std::forward(out)(true); - el->f_ct_.store(cur + N - 1, std::memory_order_release); - return true; - } - auto nxt_rc = inc_rc(cur_rc) & ~static_cast(wrapper->connected_id()); - bool last_one = false; - if ((last_one = (nxt_rc & rc_mask) == 0)) { - el->f_ct_.store(cur + N - 1, std::memory_order_release); - } - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)(last_one); - return true; - } - ipc::yield(k); - } - } -}; - -} // namespace ipc diff --git a/spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/filtered_lrelu.cpp b/spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/filtered_lrelu.cpp deleted file mode 100644 index 4e253d1f3ffe84e54e667bf61a45dfe66264a73c..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/filtered_lrelu.cpp +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -#include -#include -#include -#include "filtered_lrelu.h" - -//------------------------------------------------------------------------ - -static std::tuple filtered_lrelu( - torch::Tensor x, torch::Tensor fu, torch::Tensor fd, torch::Tensor b, torch::Tensor si, - int up, int down, int px0, int px1, int py0, int py1, int sx, int sy, float gain, float slope, float clamp, bool flip_filters, bool writeSigns) -{ - // Set CUDA device. - TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); - const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); - - // Validate arguments. - TORCH_CHECK(fu.device() == x.device() && fd.device() == x.device() && b.device() == x.device(), "all input tensors must reside on the same device"); - TORCH_CHECK(fu.dtype() == torch::kFloat && fd.dtype() == torch::kFloat, "fu and fd must be float32"); - TORCH_CHECK(b.dtype() == x.dtype(), "x and b must have the same dtype"); - TORCH_CHECK(x.dtype() == torch::kHalf || x.dtype() == torch::kFloat, "x and b must be float16 or float32"); - TORCH_CHECK(x.dim() == 4, "x must be rank 4"); - TORCH_CHECK(x.size(0) * x.size(1) <= INT_MAX && x.size(2) <= INT_MAX && x.size(3) <= INT_MAX, "x is too large"); - TORCH_CHECK(x.numel() > 0, "x is empty"); - TORCH_CHECK((fu.dim() == 1 || fu.dim() == 2) && (fd.dim() == 1 || fd.dim() == 2), "fu and fd must be rank 1 or 2"); - TORCH_CHECK(fu.size(0) <= INT_MAX && fu.size(-1) <= INT_MAX, "fu is too large"); - TORCH_CHECK(fd.size(0) <= INT_MAX && fd.size(-1) <= INT_MAX, "fd is too large"); - TORCH_CHECK(fu.numel() > 0, "fu is empty"); - TORCH_CHECK(fd.numel() > 0, "fd is empty"); - TORCH_CHECK(b.dim() == 1 && b.size(0) == x.size(1), "b must be a vector with the same number of channels as x"); - TORCH_CHECK(up >= 1 && down >= 1, "up and down must be at least 1"); - - // Figure out how much shared memory is available on the device. - int maxSharedBytes = 0; - AT_CUDA_CHECK(cudaDeviceGetAttribute(&maxSharedBytes, cudaDevAttrMaxSharedMemoryPerBlockOptin, x.device().index())); - int sharedKB = maxSharedBytes >> 10; - - // Populate enough launch parameters to check if a CUDA kernel exists. - filtered_lrelu_kernel_params p; - p.up = up; - p.down = down; - p.fuShape = make_int2((int)fu.size(-1), fu.dim() == 2 ? (int)fu.size(0) : 0); // shape [n, 0] indicates separable filter. - p.fdShape = make_int2((int)fd.size(-1), fd.dim() == 2 ? (int)fd.size(0) : 0); - filtered_lrelu_kernel_spec test_spec = choose_filtered_lrelu_kernel(p, sharedKB); - if (!test_spec.exec) - { - // No kernel found - return empty tensors and indicate missing kernel with return code of -1. - return std::make_tuple(torch::Tensor(), torch::Tensor(), -1); - } - - // Input/output element size. - int64_t sz = (x.dtype() == torch::kHalf) ? 2 : 4; - - // Input sizes. - int64_t xw = (int)x.size(3); - int64_t xh = (int)x.size(2); - int64_t fut_w = (int)fu.size(-1) - 1; - int64_t fut_h = (int)fu.size(0) - 1; - int64_t fdt_w = (int)fd.size(-1) - 1; - int64_t fdt_h = (int)fd.size(0) - 1; - - // Logical size of upsampled buffer. - int64_t cw = xw * up + (px0 + px1) - fut_w; - int64_t ch = xh * up + (py0 + py1) - fut_h; - TORCH_CHECK(cw > fdt_w && ch > fdt_h, "upsampled buffer must be at least the size of downsampling filter"); - TORCH_CHECK(cw <= INT_MAX && ch <= INT_MAX, "upsampled buffer is too large"); - - // Compute output size and allocate. - int64_t yw = (cw - fdt_w + (down - 1)) / down; - int64_t yh = (ch - fdt_h + (down - 1)) / down; - TORCH_CHECK(yw > 0 && yh > 0, "output must be at least 1x1"); - TORCH_CHECK(yw <= INT_MAX && yh <= INT_MAX, "output is too large"); - torch::Tensor y = torch::empty({x.size(0), x.size(1), yh, yw}, x.options(), x.suggest_memory_format()); - - // Allocate sign tensor. - torch::Tensor so; - torch::Tensor s = si; - bool readSigns = !!s.numel(); - int64_t sw_active = 0; // Active width of sign tensor. - if (writeSigns) - { - sw_active = yw * down - (down - 1) + fdt_w; // Active width in elements. - int64_t sh = yh * down - (down - 1) + fdt_h; // Height = active height. - int64_t sw = (sw_active + 15) & ~15; // Width = active width in elements, rounded up to multiple of 16. - TORCH_CHECK(sh <= INT_MAX && (sw >> 2) <= INT_MAX, "signs is too large"); - s = so = torch::empty({x.size(0), x.size(1), sh, sw >> 2}, x.options().dtype(torch::kUInt8), at::MemoryFormat::Contiguous); - } - else if (readSigns) - sw_active = s.size(3) << 2; - - // Validate sign tensor if in use. - if (readSigns || writeSigns) - { - TORCH_CHECK(s.is_contiguous(), "signs must be contiguous"); - TORCH_CHECK(s.dtype() == torch::kUInt8, "signs must be uint8"); - TORCH_CHECK(s.device() == x.device(), "signs must reside on the same device as x"); - TORCH_CHECK(s.dim() == 4, "signs must be rank 4"); - TORCH_CHECK(s.size(0) == x.size(0) && s.size(1) == x.size(1), "signs must have same batch & channels as x"); - TORCH_CHECK(s.size(2) <= INT_MAX && s.size(3) <= INT_MAX, "signs is too large"); - } - - // Populate rest of CUDA kernel parameters. - p.x = x.data_ptr(); - p.y = y.data_ptr(); - p.b = b.data_ptr(); - p.s = (readSigns || writeSigns) ? s.data_ptr() : 0; - p.fu = fu.data_ptr(); - p.fd = fd.data_ptr(); - p.pad0 = make_int2(px0, py0); - p.gain = gain; - p.slope = slope; - p.clamp = clamp; - p.flip = (flip_filters) ? 1 : 0; - p.xShape = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); - p.yShape = make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0)); - p.sShape = (readSigns || writeSigns) ? make_int2((int)s.size(3), (int)s.size(2)) : make_int2(0, 0); // Width is in bytes. Contiguous. - p.sOfs = make_int2(sx, sy); - p.swLimit = (sw_active + 3) >> 2; // Rounded up to bytes. - - // x, y, b strides are in bytes. - p.xStride = make_longlong4(sz * x.stride(3), sz * x.stride(2), sz * x.stride(1), sz * x.stride(0)); - p.yStride = make_longlong4(sz * y.stride(3), sz * y.stride(2), sz * y.stride(1), sz * y.stride(0)); - p.bStride = sz * b.stride(0); - - // fu, fd strides are in elements. - p.fuStride = make_longlong3(fu.stride(-1), fu.dim() == 2 ? fu.stride(0) : 0, 0); - p.fdStride = make_longlong3(fd.stride(-1), fd.dim() == 2 ? fd.stride(0) : 0, 0); - - // Determine if indices don't fit in int32. Support negative strides although Torch currently never produces those. - bool index64b = false; - if (std::abs(p.bStride * x.size(1)) > INT_MAX) index64b = true; - if (std::min(x.size(0) * p.xStride.w, 0ll) + std::min(x.size(1) * p.xStride.z, 0ll) + std::min(x.size(2) * p.xStride.y, 0ll) + std::min(x.size(3) * p.xStride.x, 0ll) < -INT_MAX) index64b = true; - if (std::max(x.size(0) * p.xStride.w, 0ll) + std::max(x.size(1) * p.xStride.z, 0ll) + std::max(x.size(2) * p.xStride.y, 0ll) + std::max(x.size(3) * p.xStride.x, 0ll) > INT_MAX) index64b = true; - if (std::min(y.size(0) * p.yStride.w, 0ll) + std::min(y.size(1) * p.yStride.z, 0ll) + std::min(y.size(2) * p.yStride.y, 0ll) + std::min(y.size(3) * p.yStride.x, 0ll) < -INT_MAX) index64b = true; - if (std::max(y.size(0) * p.yStride.w, 0ll) + std::max(y.size(1) * p.yStride.z, 0ll) + std::max(y.size(2) * p.yStride.y, 0ll) + std::max(y.size(3) * p.yStride.x, 0ll) > INT_MAX) index64b = true; - if (s.numel() > INT_MAX) index64b = true; - - // Choose CUDA kernel. - filtered_lrelu_kernel_spec spec = { 0 }; - AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "filtered_lrelu_cuda", [&] - { - if constexpr (sizeof(scalar_t) <= 4) // Exclude doubles. constexpr prevents template instantiation. - { - // Choose kernel based on index type, datatype and sign read/write modes. - if (!index64b && writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if (!index64b && !writeSigns && readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if (!index64b && !writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if ( index64b && writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if ( index64b && !writeSigns && readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if ( index64b && !writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - } - }); - TORCH_CHECK(spec.exec, "internal error - CUDA kernel not found") // This should not happen because we tested earlier that kernel exists. - - // Launch CUDA kernel. - void* args[] = {&p}; - int bx = spec.numWarps * 32; - int gx = (p.yShape.x - 1) / spec.tileOut.x + 1; - int gy = (p.yShape.y - 1) / spec.tileOut.y + 1; - int gz = p.yShape.z * p.yShape.w; - - // Repeat multiple horizontal tiles in a CTA? - if (spec.xrep) - { - p.tilesXrep = spec.xrep; - p.tilesXdim = gx; - - gx = (gx + p.tilesXrep - 1) / p.tilesXrep; - std::swap(gx, gy); - } - else - { - p.tilesXrep = 0; - p.tilesXdim = 0; - } - - // Launch filter setup kernel. - AT_CUDA_CHECK(cudaLaunchKernel(spec.setup, 1, 1024, args, 0, at::cuda::getCurrentCUDAStream())); - - // Copy kernels to constant memory. - if ( writeSigns && !readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); - else if (!writeSigns && readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); - else if (!writeSigns && !readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); - - // Set cache and shared memory configurations for main kernel. - AT_CUDA_CHECK(cudaFuncSetCacheConfig(spec.exec, cudaFuncCachePreferShared)); - if (spec.dynamicSharedKB) // Need dynamically allocated shared memory? - AT_CUDA_CHECK(cudaFuncSetAttribute(spec.exec, cudaFuncAttributeMaxDynamicSharedMemorySize, spec.dynamicSharedKB << 10)); - AT_CUDA_CHECK(cudaFuncSetSharedMemConfig(spec.exec, cudaSharedMemBankSizeFourByte)); - - // Launch main kernel. - const int maxSubGz = 65535; // CUDA maximum for block z dimension. - for (int zofs=0; zofs < gz; zofs += maxSubGz) // Do multiple launches if gz is too big. - { - p.blockZofs = zofs; - int subGz = std::min(maxSubGz, gz - zofs); - AT_CUDA_CHECK(cudaLaunchKernel(spec.exec, dim3(gx, gy, subGz), bx, args, spec.dynamicSharedKB << 10, at::cuda::getCurrentCUDAStream())); - } - - // Done. - return std::make_tuple(y, so, 0); -} - -//------------------------------------------------------------------------ - -static torch::Tensor filtered_lrelu_act(torch::Tensor x, torch::Tensor si, int sx, int sy, float gain, float slope, float clamp, bool writeSigns) -{ - // Set CUDA device. - TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); - const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); - - // Validate arguments. - TORCH_CHECK(x.dim() == 4, "x must be rank 4"); - TORCH_CHECK(x.size(0) * x.size(1) <= INT_MAX && x.size(2) <= INT_MAX && x.size(3) <= INT_MAX, "x is too large"); - TORCH_CHECK(x.numel() > 0, "x is empty"); - TORCH_CHECK(x.dtype() == torch::kHalf || x.dtype() == torch::kFloat || x.dtype() == torch::kDouble, "x must be float16, float32 or float64"); - - // Output signs if we don't have sign input. - torch::Tensor so; - torch::Tensor s = si; - bool readSigns = !!s.numel(); - if (writeSigns) - { - int64_t sw = x.size(3); - sw = (sw + 15) & ~15; // Round to a multiple of 16 for coalescing. - s = so = torch::empty({x.size(0), x.size(1), x.size(2), sw >> 2}, x.options().dtype(torch::kUInt8), at::MemoryFormat::Contiguous); - } - - // Validate sign tensor if in use. - if (readSigns || writeSigns) - { - TORCH_CHECK(s.is_contiguous(), "signs must be contiguous"); - TORCH_CHECK(s.dtype() == torch::kUInt8, "signs must be uint8"); - TORCH_CHECK(s.device() == x.device(), "signs must reside on the same device as x"); - TORCH_CHECK(s.dim() == 4, "signs must be rank 4"); - TORCH_CHECK(s.size(0) == x.size(0) && s.size(1) == x.size(1), "signs must have same batch & channels as x"); - TORCH_CHECK(s.size(2) <= INT_MAX && (s.size(3) << 2) <= INT_MAX, "signs tensor is too large"); - } - - // Initialize CUDA kernel parameters. - filtered_lrelu_act_kernel_params p; - p.x = x.data_ptr(); - p.s = (readSigns || writeSigns) ? s.data_ptr() : 0; - p.gain = gain; - p.slope = slope; - p.clamp = clamp; - p.xShape = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); - p.xStride = make_longlong4(x.stride(3), x.stride(2), x.stride(1), x.stride(0)); - p.sShape = (readSigns || writeSigns) ? make_int2((int)s.size(3) << 2, (int)s.size(2)) : make_int2(0, 0); // Width is in elements. Contiguous. - p.sOfs = make_int2(sx, sy); - - // Choose CUDA kernel. - void* func = 0; - AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "filtered_lrelu_act_cuda", [&] - { - if (writeSigns) - func = choose_filtered_lrelu_act_kernel(); - else if (readSigns) - func = choose_filtered_lrelu_act_kernel(); - else - func = choose_filtered_lrelu_act_kernel(); - }); - TORCH_CHECK(func, "internal error - CUDA kernel not found"); - - // Launch CUDA kernel. - void* args[] = {&p}; - int bx = 128; // 4 warps per block. - - // Logical size of launch = writeSigns ? p.s : p.x - uint32_t gx = writeSigns ? p.sShape.x : p.xShape.x; - uint32_t gy = writeSigns ? p.sShape.y : p.xShape.y; - uint32_t gz = p.xShape.z * p.xShape.w; // Same as in p.sShape if signs are in use. - gx = (gx - 1) / bx + 1; - - // Make sure grid y and z dimensions are within CUDA launch limits. Kernel loops internally to do the rest. - const uint32_t gmax = 65535; - gy = std::min(gy, gmax); - gz = std::min(gz, gmax); - - // Launch. - AT_CUDA_CHECK(cudaLaunchKernel(func, dim3(gx, gy, gz), bx, args, 0, at::cuda::getCurrentCUDAStream())); - return so; -} - -//------------------------------------------------------------------------ - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("filtered_lrelu", &filtered_lrelu); // The whole thing. - m.def("filtered_lrelu_act_", &filtered_lrelu_act); // Activation and sign tensor handling only. Modifies data tensor in-place. -} - -//------------------------------------------------------------------------ \ No newline at end of file diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_sde_ve.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_sde_ve.py deleted file mode 100644 index 339edfbb02eb6ac0f79b3969004418bb29e212b5..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_sde_ve.py +++ /dev/null @@ -1,288 +0,0 @@ -# Copyright 2023 Google Brain and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch - -import math -from dataclasses import dataclass -from typing import Optional, Tuple, Union - -import torch - -from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import BaseOutput, randn_tensor -from .scheduling_utils import SchedulerMixin, SchedulerOutput - - -@dataclass -class SdeVeOutput(BaseOutput): - """ - Output class for the ScoreSdeVeScheduler's step function output. - - Args: - prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the - denoising loop. - prev_sample_mean (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - Mean averaged `prev_sample`. Same as `prev_sample`, only mean-averaged over previous timesteps. - """ - - prev_sample: torch.FloatTensor - prev_sample_mean: torch.FloatTensor - - -class ScoreSdeVeScheduler(SchedulerMixin, ConfigMixin): - """ - The variance exploding stochastic differential equation (SDE) scheduler. - - For more information, see the original paper: https://arxiv.org/abs/2011.13456 - - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and - [`~SchedulerMixin.from_pretrained`] functions. - - Args: - num_train_timesteps (`int`): number of diffusion steps used to train the model. - snr (`float`): - coefficient weighting the step from the model_output sample (from the network) to the random noise. - sigma_min (`float`): - initial noise scale for sigma sequence in sampling procedure. The minimum sigma should mirror the - distribution of the data. - sigma_max (`float`): maximum value used for the range of continuous timesteps passed into the model. - sampling_eps (`float`): the end value of sampling, where timesteps decrease progressively from 1 to - epsilon. - correct_steps (`int`): number of correction steps performed on a produced sample. - """ - - order = 1 - - @register_to_config - def __init__( - self, - num_train_timesteps: int = 2000, - snr: float = 0.15, - sigma_min: float = 0.01, - sigma_max: float = 1348.0, - sampling_eps: float = 1e-5, - correct_steps: int = 1, - ): - # standard deviation of the initial noise distribution - self.init_noise_sigma = sigma_max - - # setable values - self.timesteps = None - - self.set_sigmas(num_train_timesteps, sigma_min, sigma_max, sampling_eps) - - def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: - """ - Ensures interchangeability with schedulers that need to scale the denoising model input depending on the - current timestep. - - Args: - sample (`torch.FloatTensor`): input sample - timestep (`int`, optional): current timestep - - Returns: - `torch.FloatTensor`: scaled input sample - """ - return sample - - def set_timesteps( - self, num_inference_steps: int, sampling_eps: float = None, device: Union[str, torch.device] = None - ): - """ - Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference. - - Args: - num_inference_steps (`int`): - the number of diffusion steps used when generating samples with a pre-trained model. - sampling_eps (`float`, optional): - final timestep value (overrides value given at Scheduler instantiation). - - """ - sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps - - self.timesteps = torch.linspace(1, sampling_eps, num_inference_steps, device=device) - - def set_sigmas( - self, num_inference_steps: int, sigma_min: float = None, sigma_max: float = None, sampling_eps: float = None - ): - """ - Sets the noise scales used for the diffusion chain. Supporting function to be run before inference. - - The sigmas control the weight of the `drift` and `diffusion` components of sample update. - - Args: - num_inference_steps (`int`): - the number of diffusion steps used when generating samples with a pre-trained model. - sigma_min (`float`, optional): - initial noise scale value (overrides value given at Scheduler instantiation). - sigma_max (`float`, optional): - final noise scale value (overrides value given at Scheduler instantiation). - sampling_eps (`float`, optional): - final timestep value (overrides value given at Scheduler instantiation). - - """ - sigma_min = sigma_min if sigma_min is not None else self.config.sigma_min - sigma_max = sigma_max if sigma_max is not None else self.config.sigma_max - sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps - if self.timesteps is None: - self.set_timesteps(num_inference_steps, sampling_eps) - - self.sigmas = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) - self.discrete_sigmas = torch.exp(torch.linspace(math.log(sigma_min), math.log(sigma_max), num_inference_steps)) - self.sigmas = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps]) - - def get_adjacent_sigma(self, timesteps, t): - return torch.where( - timesteps == 0, - torch.zeros_like(t.to(timesteps.device)), - self.discrete_sigmas[timesteps - 1].to(timesteps.device), - ) - - def step_pred( - self, - model_output: torch.FloatTensor, - timestep: int, - sample: torch.FloatTensor, - generator: Optional[torch.Generator] = None, - return_dict: bool = True, - ) -> Union[SdeVeOutput, Tuple]: - """ - Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion - process from the learned model outputs (most often the predicted noise). - - Args: - model_output (`torch.FloatTensor`): direct output from learned diffusion model. - timestep (`int`): current discrete timestep in the diffusion chain. - sample (`torch.FloatTensor`): - current instance of sample being created by diffusion process. - generator: random number generator. - return_dict (`bool`): option for returning tuple rather than SchedulerOutput class - - Returns: - [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`: [`~schedulers.scheduling_sde_ve.SdeVeOutput`] if - `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. - - """ - if self.timesteps is None: - raise ValueError( - "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" - ) - - timestep = timestep * torch.ones( - sample.shape[0], device=sample.device - ) # torch.repeat_interleave(timestep, sample.shape[0]) - timesteps = (timestep * (len(self.timesteps) - 1)).long() - - # mps requires indices to be in the same device, so we use cpu as is the default with cuda - timesteps = timesteps.to(self.discrete_sigmas.device) - - sigma = self.discrete_sigmas[timesteps].to(sample.device) - adjacent_sigma = self.get_adjacent_sigma(timesteps, timestep).to(sample.device) - drift = torch.zeros_like(sample) - diffusion = (sigma**2 - adjacent_sigma**2) ** 0.5 - - # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) - # also equation 47 shows the analog from SDE models to ancestral sampling methods - diffusion = diffusion.flatten() - while len(diffusion.shape) < len(sample.shape): - diffusion = diffusion.unsqueeze(-1) - drift = drift - diffusion**2 * model_output - - # equation 6: sample noise for the diffusion term of - noise = randn_tensor( - sample.shape, layout=sample.layout, generator=generator, device=sample.device, dtype=sample.dtype - ) - prev_sample_mean = sample - drift # subtract because `dt` is a small negative timestep - # TODO is the variable diffusion the correct scaling term for the noise? - prev_sample = prev_sample_mean + diffusion * noise # add impact of diffusion field g - - if not return_dict: - return (prev_sample, prev_sample_mean) - - return SdeVeOutput(prev_sample=prev_sample, prev_sample_mean=prev_sample_mean) - - def step_correct( - self, - model_output: torch.FloatTensor, - sample: torch.FloatTensor, - generator: Optional[torch.Generator] = None, - return_dict: bool = True, - ) -> Union[SchedulerOutput, Tuple]: - """ - Correct the predicted sample based on the output model_output of the network. This is often run repeatedly - after making the prediction for the previous timestep. - - Args: - model_output (`torch.FloatTensor`): direct output from learned diffusion model. - sample (`torch.FloatTensor`): - current instance of sample being created by diffusion process. - generator: random number generator. - return_dict (`bool`): option for returning tuple rather than SchedulerOutput class - - Returns: - [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`: [`~schedulers.scheduling_sde_ve.SdeVeOutput`] if - `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. - - """ - if self.timesteps is None: - raise ValueError( - "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" - ) - - # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" - # sample noise for correction - noise = randn_tensor(sample.shape, layout=sample.layout, generator=generator).to(sample.device) - - # compute step size from the model_output, the noise, and the snr - grad_norm = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean() - noise_norm = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean() - step_size = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 - step_size = step_size * torch.ones(sample.shape[0]).to(sample.device) - # self.repeat_scalar(step_size, sample.shape[0]) - - # compute corrected sample: model_output term and noise term - step_size = step_size.flatten() - while len(step_size.shape) < len(sample.shape): - step_size = step_size.unsqueeze(-1) - prev_sample_mean = sample + step_size * model_output - prev_sample = prev_sample_mean + ((step_size * 2) ** 0.5) * noise - - if not return_dict: - return (prev_sample,) - - return SchedulerOutput(prev_sample=prev_sample) - - def add_noise( - self, - original_samples: torch.FloatTensor, - noise: torch.FloatTensor, - timesteps: torch.FloatTensor, - ) -> torch.FloatTensor: - # Make sure sigmas and timesteps have the same device and dtype as original_samples - timesteps = timesteps.to(original_samples.device) - sigmas = self.discrete_sigmas.to(original_samples.device)[timesteps] - noise = ( - noise * sigmas[:, None, None, None] - if noise is not None - else torch.randn_like(original_samples) * sigmas[:, None, None, None] - ) - noisy_samples = noise + original_samples - return noisy_samples - - def __len__(self): - return self.config.num_train_timesteps diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index e742d9a5ec2b6addf829cb802de27ea1afd53301..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50b-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3_r50-d8_512x1024_80k_cityscapes.py' -model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50s-d8_512x512_80k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50s-d8_512x512_80k_ade20k.py deleted file mode 100644 index 600b701a7194ead496cc924bee897b6096e1c7ca..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50s-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = [ - '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - backbone=dict(stem_channels=128), - decode_head=dict(num_classes=150), - auxiliary_head=dict(num_classes=150)) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r50-d8_512x512_160k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r50-d8_512x512_160k_ade20k.py deleted file mode 100644 index 1549a4d5bf10cd3fd6e3bd57bf7a48e7e5e1ede8..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r50-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/gcnet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/parallel/scatter_gather.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/parallel/scatter_gather.py deleted file mode 100644 index 900ff88566f8f14830590459dc4fd16d4b382e47..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/parallel/scatter_gather.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from torch.nn.parallel._functions import Scatter as OrigScatter - -from ._functions import Scatter -from .data_container import DataContainer - - -def scatter(inputs, target_gpus, dim=0): - """Scatter inputs to target gpus. - - The only difference from original :func:`scatter` is to add support for - :type:`~mmcv.parallel.DataContainer`. - """ - - def scatter_map(obj): - if isinstance(obj, torch.Tensor): - if target_gpus != [-1]: - return OrigScatter.apply(target_gpus, None, dim, obj) - else: - # for CPU inference we use self-implemented scatter - return Scatter.forward(target_gpus, obj) - if isinstance(obj, DataContainer): - if obj.cpu_only: - return obj.data - else: - return Scatter.forward(target_gpus, obj.data) - if isinstance(obj, tuple) and len(obj) > 0: - return list(zip(*map(scatter_map, obj))) - if isinstance(obj, list) and len(obj) > 0: - out = list(map(list, zip(*map(scatter_map, obj)))) - return out - if isinstance(obj, dict) and len(obj) > 0: - out = list(map(type(obj), zip(*map(scatter_map, obj.items())))) - return out - return [obj for targets in target_gpus] - - # After scatter_map is called, a scatter_map cell will exist. This cell - # has a reference to the actual function scatter_map, which has references - # to a closure that has a reference to the scatter_map cell (because the - # fn is recursive). To avoid this reference cycle, we set the function to - # None, clearing the cell - try: - return scatter_map(inputs) - finally: - scatter_map = None - - -def scatter_kwargs(inputs, kwargs, target_gpus, dim=0): - """Scatter with support for kwargs dictionary.""" - inputs = scatter(inputs, target_gpus, dim) if inputs else [] - kwargs = scatter(kwargs, target_gpus, dim) if kwargs else [] - if len(inputs) < len(kwargs): - inputs.extend([() for _ in range(len(kwargs) - len(inputs))]) - elif len(kwargs) < len(inputs): - kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))]) - inputs = tuple(inputs) - kwargs = tuple(kwargs) - return inputs, kwargs diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/cldm/hack.py b/spaces/Anonymous-sub/Rerender/ControlNet/cldm/hack.py deleted file mode 100644 index 454361e9d036cd1a6a79122c2fd16b489e4767b1..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/cldm/hack.py +++ /dev/null @@ -1,111 +0,0 @@ -import torch -import einops - -import ldm.modules.encoders.modules -import ldm.modules.attention - -from transformers import logging -from ldm.modules.attention import default - - -def disable_verbosity(): - logging.set_verbosity_error() - print('logging improved.') - return - - -def enable_sliced_attention(): - ldm.modules.attention.CrossAttention.forward = _hacked_sliced_attentin_forward - print('Enabled sliced_attention.') - return - - -def hack_everything(clip_skip=0): - disable_verbosity() - ldm.modules.encoders.modules.FrozenCLIPEmbedder.forward = _hacked_clip_forward - ldm.modules.encoders.modules.FrozenCLIPEmbedder.clip_skip = clip_skip - print('Enabled clip hacks.') - return - - -# Written by Lvmin -def _hacked_clip_forward(self, text): - PAD = self.tokenizer.pad_token_id - EOS = self.tokenizer.eos_token_id - BOS = self.tokenizer.bos_token_id - - def tokenize(t): - return self.tokenizer(t, truncation=False, add_special_tokens=False)["input_ids"] - - def transformer_encode(t): - if self.clip_skip > 1: - rt = self.transformer(input_ids=t, output_hidden_states=True) - return self.transformer.text_model.final_layer_norm(rt.hidden_states[-self.clip_skip]) - else: - return self.transformer(input_ids=t, output_hidden_states=False).last_hidden_state - - def split(x): - return x[75 * 0: 75 * 1], x[75 * 1: 75 * 2], x[75 * 2: 75 * 3] - - def pad(x, p, i): - return x[:i] if len(x) >= i else x + [p] * (i - len(x)) - - raw_tokens_list = tokenize(text) - tokens_list = [] - - for raw_tokens in raw_tokens_list: - raw_tokens_123 = split(raw_tokens) - raw_tokens_123 = [[BOS] + raw_tokens_i + [EOS] for raw_tokens_i in raw_tokens_123] - raw_tokens_123 = [pad(raw_tokens_i, PAD, 77) for raw_tokens_i in raw_tokens_123] - tokens_list.append(raw_tokens_123) - - tokens_list = torch.IntTensor(tokens_list).to(self.device) - - feed = einops.rearrange(tokens_list, 'b f i -> (b f) i') - y = transformer_encode(feed) - z = einops.rearrange(y, '(b f) i c -> b (f i) c', f=3) - - return z - - -# Stolen from https://github.com/basujindal/stable-diffusion/blob/main/optimizedSD/splitAttention.py -def _hacked_sliced_attentin_forward(self, x, context=None, mask=None): - h = self.heads - - q = self.to_q(x) - context = default(context, x) - k = self.to_k(context) - v = self.to_v(context) - del context, x - - q, k, v = map(lambda t: einops.rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) - - limit = k.shape[0] - att_step = 1 - q_chunks = list(torch.tensor_split(q, limit // att_step, dim=0)) - k_chunks = list(torch.tensor_split(k, limit // att_step, dim=0)) - v_chunks = list(torch.tensor_split(v, limit // att_step, dim=0)) - - q_chunks.reverse() - k_chunks.reverse() - v_chunks.reverse() - sim = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device) - del k, q, v - for i in range(0, limit, att_step): - q_buffer = q_chunks.pop() - k_buffer = k_chunks.pop() - v_buffer = v_chunks.pop() - sim_buffer = torch.einsum('b i d, b j d -> b i j', q_buffer, k_buffer) * self.scale - - del k_buffer, q_buffer - # attention, what we cannot get enough of, by chunks - - sim_buffer = sim_buffer.softmax(dim=-1) - - sim_buffer = torch.einsum('b i j, b j d -> b i d', sim_buffer, v_buffer) - del v_buffer - sim[i:i + att_step, :, :] = sim_buffer - - del sim_buffer - sim = einops.rearrange(sim, '(b h) n d -> b n (h d)', h=h) - return self.to_out(sim) diff --git a/spaces/Apex-X/GODROOP/predictor.py b/spaces/Apex-X/GODROOP/predictor.py deleted file mode 100644 index 877fd725d21bddf5e788677eefbc917ddc79f52b..0000000000000000000000000000000000000000 --- a/spaces/Apex-X/GODROOP/predictor.py +++ /dev/null @@ -1,22 +0,0 @@ -import threading -import numpy -from PIL import Image - -from roop.typing import Frame - -# Define any other necessary variables or constants here - -def predict_frame(target_frame: Frame) -> bool: - # Modify this function as needed for your specific use case, without NSFW prediction - # For example, you can implement custom image analysis or processing here - return False - -def predict_image(target_path: str) -> bool: - # Modify this function as needed for your specific use case, without NSFW prediction - # For example, you can check the image based on your application's requirements - return False - -def predict_video(target_path: str) -> bool: - # Modify this function as needed for your specific use case, without NSFW prediction - # For example, you can analyze video frames for other purposes - return False diff --git a/spaces/Artples/llama-2-7b-chat/app.py b/spaces/Artples/llama-2-7b-chat/app.py deleted file mode 100644 index 4abcd26d2297880a033103381b9efa0cdb9bb916..0000000000000000000000000000000000000000 --- a/spaces/Artples/llama-2-7b-chat/app.py +++ /dev/null @@ -1,467 +0,0 @@ -"""Run codes.""" -# pylint: disable=line-too-long, broad-exception-caught, invalid-name, missing-function-docstring, too-many-instance-attributes, missing-class-docstring -# ruff: noqa: E501 -import os -import platform -import random -import time -from dataclasses import asdict, dataclass -from pathlib import Path - -# from types import SimpleNamespace -import gradio as gr -import psutil -from about_time import about_time -from ctransformers import AutoModelForCausalLM -from dl_hf_model import dl_hf_model -from loguru import logger - -filename_list = [ - "Wizard-Vicuna-7B-Uncensored.ggmlv3.q2_K.bin", - "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_L.bin", - "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_M.bin", - "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_S.bin", - "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_0.bin", - "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_1.bin", - "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M.bin", - "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_S.bin", - "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_0.bin", - "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_1.bin", - "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_K_M.bin", - "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_K_S.bin", - "Wizard-Vicuna-7B-Uncensored.ggmlv3.q6_K.bin", - "Wizard-Vicuna-7B-Uncensored.ggmlv3.q8_0.bin", -] - -URL = "https://huggingface.co/TheBloke/Wizard-Vicuna-7B-Uncensored-GGML/raw/main/Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M.bin" # 4.05G - -url = "https://huggingface.co/savvamadar/ggml-gpt4all-j-v1.3-groovy/blob/main/ggml-gpt4all-j-v1.3-groovy.bin" -url = "https://huggingface.co/TheBloke/Llama-2-13B-GGML/blob/main/llama-2-13b.ggmlv3.q4_K_S.bin" # 7.37G -# url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_L.bin" -url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_L.bin" # 6.93G -# url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_L.binhttps://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q4_K_M.bin" # 7.87G - -url = "https://huggingface.co/localmodels/Llama-2-13B-Chat-ggml/blob/main/llama-2-13b-chat.ggmlv3.q4_K_S.bin" # 7.37G - -_ = ( - "golay" in platform.node() - or "okteto" in platform.node() - or Path("/kaggle").exists() - # or psutil.cpu_count(logical=False) < 4 - or 1 # run 7b in hf -) - -if _: - # url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q2_K.bin" - url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q2_K.bin" # 2.87G - url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q4_K_M.bin" # 2.87G - - -prompt_template = """Below is an instruction that describes a task. Write a response that appropriately completes the request. - -### Instruction: {user_prompt} - -### Response: -""" - -prompt_template = """System: You are a helpful, -respectful and honest assistant. Always answer as -helpfully as possible, while being safe. Your answers -should not include any harmful, unethical, racist, -sexist, toxic, dangerous, or illegal content. Please -ensure that your responses are socially unbiased and -positive in nature. If a question does not make any -sense, or is not factually coherent, explain why instead -of answering something not correct. If you don't know -the answer to a question, please don't share false -information. -User: {prompt} -Assistant: """ - -prompt_template = """System: You are a helpful assistant. -User: {prompt} -Assistant: """ - -prompt_template = """Question: {question} -Answer: Let's work this out in a step by step way to be sure we have the right answer.""" - -prompt_template = """[INST] <> -You are a helpful, respectful and honest assistant. Always answer as helpfully as possible assistant. Think step by step. -<> - -What NFL team won the Super Bowl in the year Justin Bieber was born? -[/INST]""" - -prompt_template = """[INST] <> -You are an unhelpful assistant. Always answer as helpfully as possible. Think step by step. <> - -{question} [/INST] -""" - -prompt_template = """[INST] <> -You are a helpful assistant. -<> - -{question} [/INST] -""" - -_ = [elm for elm in prompt_template.splitlines() if elm.strip()] -stop_string = [elm.split(":")[0] + ":" for elm in _][-2] - -logger.debug(f"{stop_string=}") - -_ = psutil.cpu_count(logical=False) - 1 -cpu_count: int = int(_) if _ else 1 -logger.debug(f"{cpu_count=}") - -LLM = None - -try: - model_loc, file_size = dl_hf_model(url) -except Exception as exc_: - logger.error(exc_) - raise SystemExit(1) from exc_ - -LLM = AutoModelForCausalLM.from_pretrained( - model_loc, - model_type="llama", - # threads=cpu_count, -) - -logger.info(f"done load llm {model_loc=} {file_size=}G") - -os.environ["TZ"] = "Asia/Shanghai" -try: - time.tzset() # type: ignore # pylint: disable=no-member -except Exception: - # Windows - logger.warning("Windows, cant run time.tzset()") - -_ = """ -ns = SimpleNamespace( - response="", - generator=(_ for _ in []), -) -# """ - -@dataclass -class GenerationConfig: - temperature: float = 0.7 - top_k: int = 50 - top_p: float = 0.9 - repetition_penalty: float = 1.0 - max_new_tokens: int = 512 - seed: int = 42 - reset: bool = False - stream: bool = True - # threads: int = cpu_count - # stop: list[str] = field(default_factory=lambda: [stop_string]) - - -def generate( - question: str, - llm=LLM, - config: GenerationConfig = GenerationConfig(), -): - """Run model inference, will return a Generator if streaming is true.""" - # _ = prompt_template.format(question=question) - # print(_) - - prompt = prompt_template.format(question=question) - - return llm( - prompt, - **asdict(config), - ) - - -logger.debug(f"{asdict(GenerationConfig())=}") - - -def user(user_message, history): - # return user_message, history + [[user_message, None]] - history.append([user_message, None]) - return user_message, history # keep user_message - - -def user1(user_message, history): - # return user_message, history + [[user_message, None]] - history.append([user_message, None]) - return "", history # clear user_message - - -def bot_(history): - user_message = history[-1][0] - resp = random.choice(["How are you?", "I love you", "I'm very hungry"]) - bot_message = user_message + ": " + resp - history[-1][1] = "" - for character in bot_message: - history[-1][1] += character - time.sleep(0.02) - yield history - - history[-1][1] = resp - yield history - - -def bot(history): - user_message = history[-1][0] - response = [] - - logger.debug(f"{user_message=}") - - with about_time() as atime: # type: ignore - flag = 1 - prefix = "" - then = time.time() - - logger.debug("about to generate") - - config = GenerationConfig(reset=True) - for elm in generate(user_message, config=config): - if flag == 1: - logger.debug("in the loop") - prefix = f"({time.time() - then:.2f}s) " - flag = 0 - print(prefix, end="", flush=True) - logger.debug(f"{prefix=}") - print(elm, end="", flush=True) - # logger.debug(f"{elm}") - - response.append(elm) - history[-1][1] = prefix + "".join(response) - yield history - - _ = ( - f"(time elapsed: {atime.duration_human}, " # type: ignore - f"{atime.duration/len(''.join(response)):.2f}s/char)" # type: ignore - ) - - history[-1][1] = "".join(response) + f"\n{_}" - yield history - - -def predict_api(prompt): - logger.debug(f"{prompt=}") - try: - # user_prompt = prompt - config = GenerationConfig( - temperature=0.2, - top_k=10, - top_p=0.9, - repetition_penalty=1.0, - max_new_tokens=512, # adjust as needed - seed=42, - reset=True, # reset history (cache) - stream=False, - # threads=cpu_count, - # stop=prompt_prefix[1:2], - ) - - response = generate( - prompt, - config=config, - ) - - logger.debug(f"api: {response=}") - except Exception as exc: - logger.error(exc) - response = f"{exc=}" - # bot = {"inputs": [response]} - # bot = [(prompt, response)] - - return response - - -css = """ - .importantButton { - background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important; - border: none !important; - } - .importantButton:hover { - background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important; - border: none !important; - } - .disclaimer {font-variant-caps: all-small-caps; font-size: xx-small;} - .xsmall {font-size: x-small;} -""" -etext = """In America, where cars are an important part of the national psyche, a decade ago people had suddenly started to drive less, which had not happened since the oil shocks of the 1970s. """ -examples_list = [ - ["What NFL team won the Super Bowl in the year Justin Bieber was born?"], - [ - "What NFL team won the Super Bowl in the year Justin Bieber was born? Think step by step." - ], - ["How to pick a lock? Provide detailed steps."], - ["If it takes 10 hours to dry 10 clothes, assuming all the clothes are hanged together at the same time for drying , then how long will it take to dry a cloth?"], - ["is infinity + 1 bigger than infinity?"], - ["Explain the plot of Cinderella in a sentence."], - [ - "How long does it take to become proficient in French, and what are the best methods for retaining information?" - ], - ["What are some common mistakes to avoid when writing code?"], - ["Build a prompt to generate a beautiful portrait of a horse"], - ["Suggest four metaphors to describe the benefits of AI"], - ["Write a pop song about leaving home for the sandy beaches."], - ["Write a summary demonstrating my ability to tame lions"], - ["鲁迅和周树人什么关系? 说中文。"], - ["鲁迅和周树人什么关系?"], - ["鲁迅和周树人什么关系? 用英文回答。"], - ["从前有一头牛,这头牛后面有什么?"], - ["正无穷大加一大于正无穷大吗?"], - ["正无穷大加正无穷大大于正无穷大吗?"], - ["-2的平方根等于什么?"], - ["树上有5只鸟,猎人开枪打死了一只。树上还有几只鸟?"], - ["树上有11只鸟,猎人开枪打死了一只。树上还有几只鸟?提示:需考虑鸟可能受惊吓飞走。"], - ["以红楼梦的行文风格写一张委婉的请假条。不少于320字。"], - [f"{etext} 翻成中文,列出3个版本。"], - [f"{etext} \n 翻成中文,保留原意,但使用文学性的语言。不要写解释。列出3个版本。"], - ["假定 1 + 2 = 4, 试求 7 + 8。"], - ["给出判断一个数是不是质数的 javascript 码。"], - ["给出实现python 里 range(10)的 javascript 码。"], - ["给出实现python 里 [*(range(10)]的 javascript 码。"], - ["Erkläre die Handlung von Cinderella in einem Satz."], - ["Erkläre die Handlung von Cinderella in einem Satz. Auf Deutsch."], -] - -logger.info("start block") - -with gr.Blocks( - title=f"{Path(model_loc).name}", - theme=gr.themes.Soft(text_size="sm", spacing_size="sm"), - css=css, -) as block: - # buff_var = gr.State("") - with gr.Accordion("🎈 Info", open=False): - # gr.HTML( - # """
Duplicate and spin a CPU UPGRADE to avoid the queue
""" - # ) - gr.Markdown( - f"""
{Path(model_loc).name}
- Most examples are meant for another model. - You probably should try to test - some related prompts.""", - elem_classes="xsmall", - ) - - # chatbot = gr.Chatbot().style(height=700) # 500 - chatbot = gr.Chatbot(height=500) - - # buff = gr.Textbox(show_label=False, visible=True) - - with gr.Row(): - with gr.Column(scale=5): - msg = gr.Textbox( - label="Chat Message Box", - placeholder="Ask me anything (press Shift+Enter or click Submit to send)", - show_label=False, - # container=False, - lines=6, - max_lines=30, - show_copy_button=True, - # ).style(container=False) - ) - with gr.Column(scale=1, min_width=50): - with gr.Row(): - submit = gr.Button("Submit", elem_classes="xsmall") - stop = gr.Button("Stop", visible=True) - clear = gr.Button("Clear History", visible=True) - with gr.Row(visible=False): - with gr.Accordion("Advanced Options:", open=False): - with gr.Row(): - with gr.Column(scale=2): - system = gr.Textbox( - label="System Prompt", - value=prompt_template, - show_label=False, - container=False, - # ).style(container=False) - ) - with gr.Column(): - with gr.Row(): - change = gr.Button("Change System Prompt") - reset = gr.Button("Reset System Prompt") - - with gr.Accordion("Example Inputs", open=True): - examples = gr.Examples( - examples=examples_list, - inputs=[msg], - examples_per_page=40, - ) - - # with gr.Row(): - with gr.Accordion("Disclaimer", open=False): - _ = Path(model_loc).name - gr.Markdown( - f"Disclaimer: Lauche - AI (POWERED BY LLAMA 2) can produce factually incorrect output, and should not be relied on to produce " - "factually accurate information. Lauche - AI (POWERED BY LLAMA 2) was trained on various public datasets; while great efforts " - "have been taken to clean the pretraining data, it is possible that this model could generate lewd, " - "biased, or otherwise offensive outputs." - " - - - " - "Our Impressum: https://lauche.eu/n-impressum" - " - - - " - "Visit this space on our website: ai-app.lauche.online", - elem_classes=["disclaimer"], - ) - - msg_submit_event = msg.submit( - # fn=conversation.user_turn, - fn=user, - inputs=[msg, chatbot], - outputs=[msg, chatbot], - queue=True, - show_progress="full", - # api_name=None, - ).then(bot, chatbot, chatbot, queue=True) - submit_click_event = submit.click( - # fn=lambda x, y: ("",) + user(x, y)[1:], # clear msg - fn=user1, # clear msg - inputs=[msg, chatbot], - outputs=[msg, chatbot], - queue=True, - # queue=False, - show_progress="full", - # api_name=None, - ).then(bot, chatbot, chatbot, queue=True) - stop.click( - fn=None, - inputs=None, - outputs=None, - cancels=[msg_submit_event, submit_click_event], - queue=False, - ) - clear.click(lambda: None, None, chatbot, queue=False) - - with gr.Accordion("For Chat/Translation API", open=False, visible=False): - input_text = gr.Text() - api_btn = gr.Button("Go", variant="primary") - out_text = gr.Text() - - api_btn.click( - predict_api, - input_text, - out_text, - api_name="api", - ) - - # block.load(update_buff, [], buff, every=1) - # block.load(update_buff, [buff_var], [buff_var, buff], every=1) - -# concurrency_count=5, max_size=20 -# max_size=36, concurrency_count=14 -# CPU cpu_count=2 16G, model 7G -# CPU UPGRADE cpu_count=8 32G, model 7G - -# does not work -_ = """ -# _ = int(psutil.virtual_memory().total / 10**9 // file_size - 1) -# concurrency_count = max(_, 1) -if psutil.cpu_count(logical=False) >= 8: - # concurrency_count = max(int(32 / file_size) - 1, 1) -else: - # concurrency_count = max(int(16 / file_size) - 1, 1) -# """ - -concurrency_count = 1 -logger.info(f"{concurrency_count=}") - -block.queue(concurrency_count=concurrency_count, max_size=5).launch(debug=True) diff --git a/spaces/AsakuraMizu/moe-tts/text/english.py b/spaces/AsakuraMizu/moe-tts/text/english.py deleted file mode 100644 index 6817392ba8a9eb830351de89fb7afc5ad72f5e42..0000000000000000000000000000000000000000 --- a/spaces/AsakuraMizu/moe-tts/text/english.py +++ /dev/null @@ -1,188 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - - -# Regular expression matching whitespace: - - -import re -import inflect -from unidecode import unidecode -import eng_to_ipa as ipa -_inflect = inflect.engine() -_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])') -_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)') -_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)') -_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)') -_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)') -_number_re = re.compile(r'[0-9]+') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - - -# List of (ipa, lazy ipa) pairs: -_lazy_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('æ', 'e'), - ('ɑ', 'a'), - ('ɔ', 'o'), - ('ð', 'z'), - ('θ', 's'), - ('ɛ', 'e'), - ('ɪ', 'i'), - ('ʊ', 'u'), - ('ʒ', 'ʥ'), - ('ʤ', 'ʥ'), - ('ˈ', '↓'), -]] - -# List of (ipa, lazy ipa2) pairs: -_lazy_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('ð', 'z'), - ('θ', 's'), - ('ʒ', 'ʑ'), - ('ʤ', 'dʑ'), - ('ˈ', '↓'), -]] - -# List of (ipa, ipa2) pairs -_ipa_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('ʤ', 'dʒ'), - ('ʧ', 'tʃ') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def collapse_whitespace(text): - return re.sub(r'\s+', ' ', text) - - -def _remove_commas(m): - return m.group(1).replace(',', '') - - -def _expand_decimal_point(m): - return m.group(1).replace('.', ' point ') - - -def _expand_dollars(m): - match = m.group(1) - parts = match.split('.') - if len(parts) > 2: - return match + ' dollars' # Unexpected format - dollars = int(parts[0]) if parts[0] else 0 - cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 - if dollars and cents: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit) - elif dollars: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - return '%s %s' % (dollars, dollar_unit) - elif cents: - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s' % (cents, cent_unit) - else: - return 'zero dollars' - - -def _expand_ordinal(m): - return _inflect.number_to_words(m.group(0)) - - -def _expand_number(m): - num = int(m.group(0)) - if num > 1000 and num < 3000: - if num == 2000: - return 'two thousand' - elif num > 2000 and num < 2010: - return 'two thousand ' + _inflect.number_to_words(num % 100) - elif num % 100 == 0: - return _inflect.number_to_words(num // 100) + ' hundred' - else: - return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ') - else: - return _inflect.number_to_words(num, andword='') - - -def normalize_numbers(text): - text = re.sub(_comma_number_re, _remove_commas, text) - text = re.sub(_pounds_re, r'\1 pounds', text) - text = re.sub(_dollars_re, _expand_dollars, text) - text = re.sub(_decimal_number_re, _expand_decimal_point, text) - text = re.sub(_ordinal_re, _expand_ordinal, text) - text = re.sub(_number_re, _expand_number, text) - return text - - -def mark_dark_l(text): - return re.sub(r'l([^aeiouæɑɔəɛɪʊ ]*(?: |$))', lambda x: 'ɫ'+x.group(1), text) - - -def english_to_ipa(text): - text = unidecode(text).lower() - text = expand_abbreviations(text) - text = normalize_numbers(text) - phonemes = ipa.convert(text) - phonemes = collapse_whitespace(phonemes) - return phonemes - - -def english_to_lazy_ipa(text): - text = english_to_ipa(text) - for regex, replacement in _lazy_ipa: - text = re.sub(regex, replacement, text) - return text - - -def english_to_ipa2(text): - text = english_to_ipa(text) - text = mark_dark_l(text) - for regex, replacement in _ipa_to_ipa2: - text = re.sub(regex, replacement, text) - return text.replace('...', '…') - - -def english_to_lazy_ipa2(text): - text = english_to_ipa(text) - for regex, replacement in _lazy_ipa2: - text = re.sub(regex, replacement, text) - return text diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/big5freq.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/big5freq.py deleted file mode 100644 index 87d9f972edde20d1f8e391b8010703242a8de977..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/big5freq.py +++ /dev/null @@ -1,386 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -# Big5 frequency table -# by Taiwan's Mandarin Promotion Council -# -# -# 128 --> 0.42261 -# 256 --> 0.57851 -# 512 --> 0.74851 -# 1024 --> 0.89384 -# 2048 --> 0.97583 -# -# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98 -# Random Distribution Ration = 512/(5401-512)=0.105 -# -# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR - -BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75 - -# Char to FreqOrder table -BIG5_TABLE_SIZE = 5376 -# fmt: off -BIG5_CHAR_TO_FREQ_ORDER = ( - 1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16 -3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32 -1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48 - 63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64 -3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80 -4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96 -5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112 - 630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128 - 179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144 - 995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160 -2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176 -1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192 -3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208 - 706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224 -1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240 -3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256 -2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272 - 437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288 -3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304 -1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320 -5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336 - 266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352 -5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368 -1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384 - 32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400 - 188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416 -3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432 -3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448 - 324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464 -2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480 -2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496 - 314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512 - 287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528 -3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544 -1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560 -1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576 -1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592 -2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608 - 265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624 -4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640 -1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656 -5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672 -2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688 - 383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704 - 98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720 - 523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736 - 710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752 -5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768 - 379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784 -1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800 - 585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816 - 690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832 -5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848 -1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864 - 544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880 -3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896 -4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912 -3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928 - 279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944 - 610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960 -1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976 -4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992 -3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008 -3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024 -2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040 -5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056 -3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072 -5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088 -1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104 -2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120 -1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136 - 78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152 -1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168 -4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184 -3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200 - 534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216 - 165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232 - 626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248 -2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264 -5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280 -1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296 -2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312 -1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328 -1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344 -5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360 -5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376 -5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392 -3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408 -4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424 -4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440 -2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456 -5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472 -3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488 - 598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504 -5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520 -5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536 -1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552 -2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568 -3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584 -4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600 -5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616 -3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632 -4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648 -1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664 -1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680 -4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696 -1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712 - 240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728 -1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744 -1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760 -3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776 - 619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792 -5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808 -2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824 -1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840 -1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856 -5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872 - 829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888 -4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904 - 375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920 -2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936 - 444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952 -1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968 -1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984 - 730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000 -4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016 -4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032 -1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048 -3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064 -5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080 -5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096 -1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112 -2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128 -1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144 -3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160 -2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176 -3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192 -2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208 -4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224 -4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240 -3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256 - 97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272 -3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288 - 424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304 -3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320 -4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336 -3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352 -1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368 -5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384 - 199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400 -5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416 -1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432 - 391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448 -4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464 -4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480 - 397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496 -2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512 -2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528 -3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544 -1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560 -4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576 -2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592 -1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608 -1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624 -2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640 -3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656 -1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672 -5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688 -1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704 -4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720 -1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736 - 135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752 -1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768 -4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784 -4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800 -2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816 -1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832 -4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848 - 660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864 -5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880 -2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896 -3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912 -4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928 - 790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944 -5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960 -5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976 -1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992 -4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008 -4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024 -2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040 -3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056 -3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072 -2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088 -1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104 -4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120 -3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136 -3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152 -2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168 -4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184 -5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200 -3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216 -2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232 -3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248 -1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264 -2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280 -3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296 -4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312 -2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328 -2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344 -5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360 -1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376 -2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392 -1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408 -3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424 -4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440 -2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456 -3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472 -3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488 -2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504 -4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520 -2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536 -3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552 -4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568 -5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584 -3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600 - 194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616 -1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632 -4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648 -1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664 -4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680 -5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696 - 510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712 -5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728 -5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744 -2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760 -3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776 -2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792 -2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808 - 681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824 -1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840 -4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856 -3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872 -3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888 - 838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904 -2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920 - 625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936 -2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952 -4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968 -1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984 -4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000 -1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016 -3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032 - 574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048 -3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064 -5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080 -5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096 -3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112 -3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128 -1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144 -2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160 -5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176 -1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192 -1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208 -3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224 - 919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240 -1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256 -4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272 -5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288 -2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304 -3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320 - 516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336 -1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352 -2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368 -2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384 -5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400 -5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416 -5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432 -2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448 -2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464 -1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480 -4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496 -3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512 -3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528 -4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544 -4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560 -2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576 -2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592 -5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608 -4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624 -5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640 -4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656 - 502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672 - 121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688 -1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704 -3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720 -4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736 -1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752 -5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768 -2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784 -2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800 -3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816 -5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832 -1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848 -3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864 -5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880 -1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896 -5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912 -2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928 -3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944 -2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960 -3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976 -3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992 -3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008 -4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024 - 803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040 -2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056 -4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072 -3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088 -5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104 -1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120 -5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136 - 425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152 -1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168 - 479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184 -4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200 -1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216 -4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232 -1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248 - 433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264 -3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280 -4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296 -5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312 - 938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328 -3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344 - 890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360 -2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 -) -# fmt: on diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h deleted file mode 100644 index 03f4211003f42f601f0cfcf4a690f5da4a0a1f67..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -#pragma once -#include - -namespace detectron2 { - -at::Tensor ROIAlignRotated_forward_cpu( - const at::Tensor& input, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int sampling_ratio); - -at::Tensor ROIAlignRotated_backward_cpu( - const at::Tensor& grad, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int batch_size, - const int channels, - const int height, - const int width, - const int sampling_ratio); - -#if defined(WITH_CUDA) || defined(WITH_HIP) -at::Tensor ROIAlignRotated_forward_cuda( - const at::Tensor& input, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int sampling_ratio); - -at::Tensor ROIAlignRotated_backward_cuda( - const at::Tensor& grad, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int batch_size, - const int channels, - const int height, - const int width, - const int sampling_ratio); -#endif - -// Interface for Python -inline at::Tensor ROIAlignRotated_forward( - const at::Tensor& input, - const at::Tensor& rois, - const double spatial_scale, - const int64_t pooled_height, - const int64_t pooled_width, - const int64_t sampling_ratio) { - if (input.is_cuda()) { -#if defined(WITH_CUDA) || defined(WITH_HIP) - return ROIAlignRotated_forward_cuda( - input, - rois, - spatial_scale, - pooled_height, - pooled_width, - sampling_ratio); -#else - AT_ERROR("Detectron2 is not compiled with GPU support!"); -#endif - } - return ROIAlignRotated_forward_cpu( - input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio); -} - -inline at::Tensor ROIAlignRotated_backward( - const at::Tensor& grad, - const at::Tensor& rois, - const double spatial_scale, - const int64_t pooled_height, - const int64_t pooled_width, - const int64_t batch_size, - const int64_t channels, - const int64_t height, - const int64_t width, - const int64_t sampling_ratio) { - if (grad.is_cuda()) { -#if defined(WITH_CUDA) || defined(WITH_HIP) - return ROIAlignRotated_backward_cuda( - grad, - rois, - spatial_scale, - pooled_height, - pooled_width, - batch_size, - channels, - height, - width, - sampling_ratio); -#else - AT_ERROR("Detectron2 is not compiled with GPU support!"); -#endif - } - return ROIAlignRotated_backward_cpu( - grad, - rois, - spatial_scale, - pooled_height, - pooled_width, - batch_size, - channels, - height, - width, - sampling_ratio); -} - -} // namespace detectron2 diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/data/test_rotation_transform.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/data/test_rotation_transform.py deleted file mode 100644 index 0e8299ed78a425c91fc2e43fede0b26461d1c9ff..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/data/test_rotation_transform.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import numpy as np -import unittest - -from detectron2.data.transforms.transform import RotationTransform - - -class TestRotationTransform(unittest.TestCase): - def assertEqualsArrays(self, a1, a2): - self.assertTrue(np.allclose(a1, a2)) - - def randomData(self, h=5, w=5): - image = np.random.rand(h, w) - coords = np.array([[i, j] for j in range(h + 1) for i in range(w + 1)], dtype=float) - return image, coords, h, w - - def test180(self): - image, coords, h, w = self.randomData(6, 6) - rot = RotationTransform(h, w, 180, expand=False, center=None) - self.assertEqualsArrays(rot.apply_image(image), image[::-1, ::-1]) - rotated_coords = [[w - c[0], h - c[1]] for c in coords] - self.assertEqualsArrays(rot.apply_coords(coords), rotated_coords) - - def test45_coords(self): - _, coords, h, w = self.randomData(4, 6) - rot = RotationTransform(h, w, 45, expand=False, center=None) - rotated_coords = [ - [(x + y - (h + w) / 2) / np.sqrt(2) + w / 2, h / 2 + (y + (w - h) / 2 - x) / np.sqrt(2)] - for (x, y) in coords - ] - self.assertEqualsArrays(rot.apply_coords(coords), rotated_coords) - - def test90(self): - image, coords, h, w = self.randomData() - rot = RotationTransform(h, w, 90, expand=False, center=None) - self.assertEqualsArrays(rot.apply_image(image), image.T[::-1]) - rotated_coords = [[c[1], w - c[0]] for c in coords] - self.assertEqualsArrays(rot.apply_coords(coords), rotated_coords) - - def test90_expand(self): # non-square image - image, coords, h, w = self.randomData(h=5, w=8) - rot = RotationTransform(h, w, 90, expand=True, center=None) - self.assertEqualsArrays(rot.apply_image(image), image.T[::-1]) - rotated_coords = [[c[1], w - c[0]] for c in coords] - self.assertEqualsArrays(rot.apply_coords(coords), rotated_coords) - - def test_center_expand(self): - # center has no effect if expand=True because it only affects shifting - image, coords, h, w = self.randomData(h=5, w=8) - angle = np.random.randint(360) - rot1 = RotationTransform(h, w, angle, expand=True, center=None) - rot2 = RotationTransform(h, w, angle, expand=True, center=(0, 0)) - rot3 = RotationTransform(h, w, angle, expand=True, center=(h, w)) - rot4 = RotationTransform(h, w, angle, expand=True, center=(2, 5)) - for r1 in [rot1, rot2, rot3, rot4]: - for r2 in [rot1, rot2, rot3, rot4]: - self.assertEqualsArrays(r1.apply_image(image), r2.apply_image(image)) - self.assertEqualsArrays(r1.apply_coords(coords), r2.apply_coords(coords)) - - def test_inverse_transform(self): - image, coords, h, w = self.randomData(h=5, w=8) - rot = RotationTransform(h, w, 90, expand=True, center=None) - rot_image = rot.apply_image(image) - self.assertEqualsArrays(rot.inverse().apply_image(rot_image), image) - rot = RotationTransform(h, w, 65, expand=True, center=None) - rotated_coords = rot.apply_coords(coords) - self.assertEqualsArrays(rot.inverse().apply_coords(rotated_coords), coords) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/Bambicita/rvc-models/infer_pack/modules.py b/spaces/Bambicita/rvc-models/infer_pack/modules.py deleted file mode 100644 index 960481cedad9a6106f2bf0b9e86e82b120f7b33f..0000000000000000000000000000000000000000 --- a/spaces/Bambicita/rvc-models/infer_pack/modules.py +++ /dev/null @@ -1,522 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from infer_pack.transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/Benson/text-generation/Examples/Descargar Ftbol Real 2010 Para Java.md b/spaces/Benson/text-generation/Examples/Descargar Ftbol Real 2010 Para Java.md deleted file mode 100644 index 43ce60170eb0d74f475e93e06fcc7060390b2a62..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Ftbol Real 2010 Para Java.md +++ /dev/null @@ -1,56 +0,0 @@ -
-

Descargar Real Football 2010 para Java: Una guía para los aficionados al fútbol

-

Si eres fanático del fútbol y tienes un dispositivo con Java, quizás te interese descargar Real Football 2010, uno de los mejores juegos de fútbol para móviles. En este artículo, te mostraremos cómo descargar y jugar a este juego, así como lo que lo hace tan divertido y realista.

-

Introducción

-

El fútbol es uno de los deportes más populares del mundo, y millones de personas lo disfrutan viendo y jugando. Sin embargo, no todos tienen la oportunidad de jugar al fútbol en la vida real, o ver a sus equipos y jugadores favoritos en la televisión. Es por eso que los juegos de fútbol son tan populares, especialmente en dispositivos móviles, ya que le permiten experimentar la emoción y la emoción del fútbol en cualquier momento y en cualquier lugar.

-

descargar fútbol real 2010 para java


Download Zip ✯✯✯ https://bltlly.com/2v6LA3



-

¿Qué es el fútbol real 2010?

-

Real Football 2010 es un juego de simulación de fútbol desarrollado por Gameloft, un desarrollador líder y editor de juegos móviles. Fue lanzado en 2009 para varias plataformas, incluyendo Java ME, Android, iOS, Windows Mobile y Nintendo DS. Es la séptima entrega de la serie Real Football, que comenzó en 2004.

-

¿Por qué descargar Real Football 2010 para Java?

-

Real Football 2010 es uno de los mejores juegos de fútbol para dispositivos Java, ya que ofrece una experiencia de juego realista e inmersiva. Puedes elegir entre más de 200 equipos y 8 ligas, incluyendo la Liga Premier Inglesa, La Liga, Serie A, Bundesliga y más. También puedes jugar como tus jugadores favoritos, como Lionel Messi, Cristiano Ronaldo, Wayne Rooney, Kaka, etc. Incluso puedes crear tu propio jugador y equipo, y personalizarlos con varias opciones.

-

Cómo descargar Real Football 2010 para Java

-

Si quieres descargar Real Football 2010 para tu dispositivo Java, debes seguir estos sencillos pasos:

-

Paso 1: Encuentra una fuente confiable

- -

Paso 2: Elija su dispositivo y el tamaño de la pantalla

-

Lo siguiente que tienes que hacer es elegir el dispositivo y el tamaño de la pantalla. Los diferentes dispositivos tienen diferentes tamaños de pantalla y resoluciones, por lo que necesitas encontrar la versión del juego que coincida con tu dispositivo. Por ejemplo, si tienes un teléfono Nokia con un tamaño de pantalla de 240x320, necesitas descargar el juego con esa resolución. Puede consultar las especificaciones de su dispositivo en línea o en el manual.

-

Paso 3: Descargar e instalar el juego

-

Lo último que tienes que hacer es descargar e instalar el juego en tu dispositivo. Puedes descargar el juego directamente desde el navegador de tu dispositivo o transferirlo desde tu computadora usando un cable USB o Bluetooth. Una vez que hayas descargado el archivo del juego (normalmente un archivo .jar), debes abrirlo y seguir las instrucciones para instalarlo. Es posible que necesites permitir que el juego acceda a la memoria o red de tu dispositivo.

-

Cómo jugar Real Football 2010 en Java

-

Una vez que haya instalado el juego en su dispositivo, usted está listo para jugar Real Football 2010 y disfrutar de sus increíbles características. Aquí hay algunos consejos sobre cómo jugar el juego y qué esperar de él:

-

Modos de juego

-

Real Football 2010 ofrece varios modos de juego que se adaptan a diferentes preferencias y niveles de habilidad. Puede elegir entre los siguientes modos:

-

-

Entrar en el modo de leyenda

-

Este es el modo más desafiante y gratificante, donde puedes crear tu propio jugador y guiarlo a través de su carrera. Puedes personalizar la apariencia, habilidades, posición y nacionalidad de tu jugador. También puede elegir a qué club unirse, y tratar de impresionar al entrenador y los aficionados. Tendrás que enfrentarte a varios desafíos, como marcar goles, hacer asistencias, ganar trofeos, etc. También tendrás que lidiar con lesiones, transferencias, contratos y presión de los medios. Este modo es una gran manera de sumergirse en la vida de una estrella de fútbol.

-

Desafía a amigos o al mundo en la Liga RF

- -

Transfiera su equipo personalizado de Real Football Manager

-

Esta es una característica única que le permite transferir su equipo de Real Football Manager, otro juego de Gameloft, a Real Football 2010. Si has jugado a Real Football Manager, puedes importar tu equipo y jugar con él en Real Football 2010. También puedes exportar tu equipo de Real Football 2010 a Real Football Manager, y seguir administrándolo allí. Esta característica es una gran manera de disfrutar de ambos juegos y crear tu equipo de ensueño.

-

Características del juego

-

Real Football 2010 tiene muchas características que lo hacen realista y divertido de jugar. Aquí están algunas de ellas:

-

Gráficos y animaciones realistas

-

El juego tiene impresionantes gráficos y animaciones que dan vida al juego. Los jugadores se ven como sus contrapartes reales, y tienen movimientos y expresiones realistas. Los estadios son detallados y animados, con multitudes y pancartas. Los efectos del clima y las sombras se suman a la atmósfera del juego.

-

Ángulos y comentarios dinámicos de la cámara

-

El juego tiene diferentes ángulos de cámara que te permiten ver la acción desde diferentes perspectivas. Puede cambiar entre ellos durante el juego, o dejar que el juego elija el mejor ángulo para usted. El juego también tiene un comentario que sigue el juego y añade emoción y emoción. El comentario está disponible en varios idiomas, como inglés, francés, español, alemán, italiano, etc.

-

Controles y ajustes personalizables

-

El juego tiene controles y ajustes personalizables que te permiten jugar el juego de la manera que quieras. Puede elegir entre diferentes esquemas de control, como botones virtuales o gestos de pantalla táctil. También puede ajustar el nivel de dificultad, la duración del partido, los efectos de sonido, etc.

-

Conclusión

- -

Preguntas frecuentes

-

Aquí hay algunas preguntas frecuentes sobre Real Football 2010 para Java:

-
    -
  • Q: ¿Cuánto espacio ocupa Real Football 2010 en mi dispositivo?
  • -
  • A: El tamaño del archivo del juego depende de su dispositivo y el tamaño de la pantalla, pero generalmente es alrededor de 1 MB.
  • -
  • Q: ¿Puedo jugar Real Football 2010 sin conexión?
  • -
  • A: Sí, puede jugar la mayoría de los modos de juego sin conexión, a excepción del modo RF League que requiere una conexión a Internet.
  • -
  • Q: ¿Puedo jugar Real Football 2010 con otros jugadores?
  • -
  • A: Sí, puedes jugar con otros jugadores en línea en el modo Liga de RF, o localmente a través de Bluetooth en el modo Versus.
  • -
  • Q: ¿Puedo actualizar Real Football 2010 con nuevos equipos y jugadores?
  • -
  • A: Sí, puedes actualizar el juego con nuevos equipos y jugadores descargando parches del sitio web de Gameloft o de otras fuentes.
  • -
  • Q: ¿Puedo jugar Real Football 2010 en otras plataformas?
  • -
  • A: Sí, puedes jugar Real Football 2010 en otras plataformas, como Android, iOS, Windows Mobile y Nintendo DS. Sin embargo, el juego podría tener algunas diferencias en términos de gráficos, características y jugabilidad.
  • -
-

Espero que este artículo te haya ayudado a descargar y jugar Real Football 2010 para Java. Si tiene alguna pregunta o comentario, por favor deje un comentario a continuación. Gracias por leer y divertirse!

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Gacha Vida Vieja Versin Apk.md b/spaces/Benson/text-generation/Examples/Descargar Gacha Vida Vieja Versin Apk.md deleted file mode 100644 index c02a12cb4bf670d5c2d35af7c1fe632a2d249863..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Gacha Vida Vieja Versin Apk.md +++ /dev/null @@ -1,72 +0,0 @@ -
-

Cómo descargar Gacha Life Old Versión Apk

-

Gacha Life es un popular juego que te permite crear y personalizar tus propios personajes de anime e interactuar con ellos en varios escenarios. Puedes vestir a tus personajes, entrar en el modo estudio, jugar minijuegos, chatear con otros jugadores y explorar diferentes áreas en el modo vida. Gacha Life tiene millones de fans en todo el mundo que disfrutan expresando su creatividad e imaginación a través de este juego.

-

Sin embargo, no todos están satisfechos con la última versión de Gacha Life. Algunos jugadores prefieren descargar Gacha Life versión antigua apk, que es una versión anterior del juego que se puede instalar en dispositivos Android utilizando un archivo apk. ¿Por qué hacen eso? ¿Cuáles son los beneficios y desventajas de descargar Gacha Life versión antigua apk? ¿Cómo se puede descargar Gacha Life versión antigua apk de forma segura y fácil? En este artículo, vamos a responder a estas preguntas y más.

-

descargar gacha vida vieja versión apk


Download Ziphttps://bltlly.com/2v6Leb



-

¿Cuáles son las características de Gacha Life versión antigua apk?

-

Gacha Life versión antigua apk es una versión del juego que fue lanzado en enero de 2020. Tiene algunas características que son diferentes de la versión actual de Gacha Life, como:

-
    -
  • 20 ranuras de caracteres en lugar de 10
  • -
  • Más artículos de ropa, peinados, armas, sombreros y accesorios
  • -
  • Nuevos elementos, poses y fondos que no estaban disponibles en Gacha Studio y Gachaverse
  • -
  • Sin función de chat o modo en línea
  • -
  • No hay modo de vida o modo NPC
  • -
  • No hay juegos gacha o regalos
  • -
-

¿Por qué algunas personas prefieren descargar Gacha Life versión antigua apk?

-

Hay varias razones por las que algunas personas prefieren descargar Gacha Life versión antigua apk sobre la última versión del juego. Algunos de ellos son:

-
    -
  • Les gusta el diseño antiguo y el estilo del juego mejor que el nuevo
  • -
  • Quieren tener más ranuras de caracteres y opciones de personalización para sus caracteres
  • - -
  • Quieren evitar la función de chat y el modo en línea que pueden exponerlos a contenido inapropiado o acoso cibernético
  • -
  • Quieren jugar sin conexión sin necesidad de conexión Wi-Fi o de datos
  • -
  • Son nostálgicos por la versión original del juego con el que comenzaron a jugar
  • -
-

Cómo descargar Gacha Life versión antigua apk?

-

Si usted es una de esas personas que quieren descargar Gacha Life versión antigua apk, es necesario seguir estos pasos:

-
    -
  1. Encontrar una fuente confiable para el archivo apk. Usted puede buscar en línea para los sitios web que ofrecen Gacha Life versión antigua apk para su descarga gratuita. Sin embargo, tenga cuidado de no descargar de sitios sospechosos o poco fiables que pueden contener virus o malware. Uno de los sitios que puedes probar es Uptodown, que proporciona archivos apk seguros y verificados para varias aplicaciones y juegos.
  2. -
  3. Habilitar fuentes desconocidas en la configuración del dispositivo. Antes de que pueda instalar un archivo apk en su dispositivo Android, es necesario permitir que el dispositivo para instalar aplicaciones de fuentes desconocidas. Para hacer esto, vaya a la configuración del dispositivo, luego la seguridad y luego habilite fuentes desconocidas. Esto le permitirá instalar aplicaciones que están en la aplicación, como el acceso a su almacenamiento, cámara, micrófono, etc.
  4. -
  5. Disfruta jugando Gacha Life versión antigua. Una vez finalizada la instalación, puede iniciar la aplicación y comenzar a jugar la versión antigua de Gacha Life en su dispositivo. Puedes crear y personalizar tus personajes, entrar en el modo estudio, jugar minijuegos y divertirte con tu propio mundo de anime.
  6. -
-

Beneficios de descargar Gacha Life versión antigua apk

-

Descargar Gacha Life versión antigua apk tiene algunos beneficios que usted no puede obtener de la última versión del juego. Algunos de estos beneficios son:

-
    - -
  • Más ranuras de caracteres y opciones de personalización. La versión antigua de Gacha Life tiene 20 ranuras de caracteres en lugar de 10, lo que significa que puede crear más caracteres y guardarlos para su uso posterior. También tiene más artículos de ropa, peinados, armas, sombreros y accesorios para elegir, así como nuevos artículos, poses y fondos que no estaban disponibles en Gacha Studio y Gachaverse. Puedes dar rienda suelta a tu creatividad y hacer que tus personajes se vean únicos e increíbles.
  • -
  • Mejor rendimiento y compatibilidad con dispositivos antiguos. La versión antigua de Gacha Life es menos exigente en los recursos de su dispositivo y se ejecuta más rápido y más suave que la última versión. También funciona bien con dispositivos antiguos que pueden no ser compatibles con las nuevas características o gráficos del juego. Puedes jugar el juego sin experimentar retrasos, fallos o fallos.
  • -
-

Desventajas de descargar Gacha Life versión antigua apk

-

Sin embargo, descargar Gacha Vida versión antigua apk también tiene algunos inconvenientes que usted debe ser consciente de antes de decidir hacerlo. Algunos de estos inconvenientes son:

-
    -
  • No hay actualizaciones y correcciones de errores del desarrollador. La versión antigua de Gacha Life ya no es compatible con el desarrollador, lo que significa que no recibirá ninguna actualización o corrección de errores para el juego. Esto puede afectar la calidad y funcionalidad del juego, así como su disfrute y satisfacción.
  • -
  • Posibles riesgos de seguridad e infecciones de malware. Descargar un archivo apk de una fuente desconocida puede ser arriesgado, ya que puede contener virus o malware que pueden dañar su dispositivo o robar sus datos. Siempre debe escanear el archivo apk antes de instalarlo, y utilizar un antivirus de buena reputación o aplicación de seguridad para proteger su dispositivo.
  • -
  • Faltan nuevas características y contenido de la última versión. La última versión de Gacha Life tiene algunas nuevas características y contenido que no encontrarás en la versión anterior, como:
  • -
      - -
    • Un modo de vida y un modo NPC que te permiten explorar diferentes áreas e interactuar con varios caracteres
    • -
    • Juegos y regalos gacha que te permiten ganar gemas y objetos jugando minijuegos o viendo anuncios
    • -
    • Nuevos artículos de ropa, peinados, armas, sombreros, accesorios, poses, fondos, etc.
    • -
    -
-

Conclusión

-

Gacha Life es un juego divertido y creativo que te permite crear y personalizar tus propios personajes de anime e interactuar con ellos en varios escenarios. Sin embargo, algunos jugadores prefieren descargar Gacha Vida versión antigua apk sobre la última versión del juego por varias razones. Descargar Gacha Life versión antigua apk tiene algunos beneficios y desventajas que usted debe considerar antes de hacerlo.

-

Si desea descargar Gacha Life versión antigua apk, es necesario encontrar una fuente confiable para el archivo apk, habilitar fuentes desconocidas en la configuración de su dispositivo, descargar e instalar el archivo apk, y disfrutar jugando Gacha Life versión antigua en su dispositivo. Sin embargo, también debe tener cuidado con los posibles riesgos de seguridad y las infecciones de malware que pueden venir con la descarga de un archivo apk de una fuente desconocida. También debe escanear el archivo apk antes de instalarlo, y utilizar un antivirus de buena reputación o aplicación de seguridad para proteger su dispositivo.

- -

En última instancia, la elección es suya. Usted puede descargar Gacha Vida versión antigua apk si lo desea, o puede seguir con la última versión del juego. De cualquier manera, esperamos que te diviertas y disfrutes jugando a Gacha Life. Si tienes alguna pregunta o comentario, no dudes en compartirlo con nosotros a continuación. Nos encantaría saber de ti.

-

-

Preguntas frecuentes

-

Aquí hay algunas preguntas frecuentes sobre la descarga de Gacha Life versión antigua apk:

-

¿Es legal descargar Gacha Life versión antigua apk?

-

Depende de la fuente del archivo apk y los términos y condiciones del desarrollador. En general, la descarga de un archivo apk de una fuente de terceros no es ilegal, pero puede violar los derechos de propiedad intelectual del desarrollador o la tienda de aplicaciones. Siempre debe respetar los derechos del desarrollador y la tienda de aplicaciones, y solo descargar un archivo apk de una fuente legítima y autorizada.

-

¿Es seguro para descargar Gacha Life versión antigua apk?

-

No necesariamente. Descargar un archivo apk de una fuente desconocida puede ser arriesgado, ya que puede contener virus o malware que pueden dañar su dispositivo o robar sus datos. Siempre debe escanear el archivo apk antes de instalarlo, y utilizar un antivirus de buena reputación o aplicación de seguridad para proteger su dispositivo. También debe evitar la descarga de un archivo apk de un sitio oscuro o poco fiable que puede contener contenido dañino o ilegal.

-

¿Cómo puedo actualizar Gacha Life versión antigua apk?

-

No se puede actualizar Gacha Vida versión antigua apk, ya que ya no es compatible con el desarrollador. Si desea obtener las últimas actualizaciones y correcciones de errores para el juego, es necesario descargar la última versión de Gacha Life de la Google Play Store u otras tiendas de aplicaciones oficiales. Sin embargo, esto sobrescribirá su versión anterior del juego, y perderá algunas de las características y el contenido que estaban disponibles en la versión anterior.

-

¿Puedo jugar Gacha Vida versión antigua apk en el PC?

- -

¿Puedo transferir mis datos de Gacha Life versión antigua apk a Gacha Life última versión?

-

No, no puede transferir sus datos de Gacha Life versión antigua apk a Gacha Life última versión. Las dos versiones del juego no son compatibles entre sí, y tienen diferentes características y contenido. Si cambia de Gacha Life versión antigua apk a Gacha Life última versión, perderá todos sus progresos y datos en la versión antigua, tales como sus personajes, artículos, gemas, etc. Usted tendrá que empezar desde cero en la última versión del juego.

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/connection.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/connection.py deleted file mode 100644 index 54b96b19154ccaa138af6bc0a4ac2b8f763017ce..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/connection.py +++ /dev/null @@ -1,572 +0,0 @@ -from __future__ import absolute_import - -import datetime -import logging -import os -import re -import socket -import warnings -from socket import error as SocketError -from socket import timeout as SocketTimeout - -from .packages import six -from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection -from .packages.six.moves.http_client import HTTPException # noqa: F401 -from .util.proxy import create_proxy_ssl_context - -try: # Compiled with SSL? - import ssl - - BaseSSLError = ssl.SSLError -except (ImportError, AttributeError): # Platform-specific: No SSL. - ssl = None - - class BaseSSLError(BaseException): - pass - - -try: - # Python 3: not a no-op, we're adding this to the namespace so it can be imported. - ConnectionError = ConnectionError -except NameError: - # Python 2 - class ConnectionError(Exception): - pass - - -try: # Python 3: - # Not a no-op, we're adding this to the namespace so it can be imported. - BrokenPipeError = BrokenPipeError -except NameError: # Python 2: - - class BrokenPipeError(Exception): - pass - - -from ._collections import HTTPHeaderDict # noqa (historical, removed in v2) -from ._version import __version__ -from .exceptions import ( - ConnectTimeoutError, - NewConnectionError, - SubjectAltNameWarning, - SystemTimeWarning, -) -from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection -from .util.ssl_ import ( - assert_fingerprint, - create_urllib3_context, - is_ipaddress, - resolve_cert_reqs, - resolve_ssl_version, - ssl_wrap_socket, -) -from .util.ssl_match_hostname import CertificateError, match_hostname - -log = logging.getLogger(__name__) - -port_by_scheme = {"http": 80, "https": 443} - -# When it comes time to update this value as a part of regular maintenance -# (ie test_recent_date is failing) update it to ~6 months before the current date. -RECENT_DATE = datetime.date(2022, 1, 1) - -_CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]") - - -class HTTPConnection(_HTTPConnection, object): - """ - Based on :class:`http.client.HTTPConnection` but provides an extra constructor - backwards-compatibility layer between older and newer Pythons. - - Additional keyword parameters are used to configure attributes of the connection. - Accepted parameters include: - - - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool` - - ``source_address``: Set the source address for the current connection. - - ``socket_options``: Set specific options on the underlying socket. If not specified, then - defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling - Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy. - - For example, if you wish to enable TCP Keep Alive in addition to the defaults, - you might pass: - - .. code-block:: python - - HTTPConnection.default_socket_options + [ - (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), - ] - - Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). - """ - - default_port = port_by_scheme["http"] - - #: Disable Nagle's algorithm by default. - #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` - default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)] - - #: Whether this connection verifies the host's certificate. - is_verified = False - - #: Whether this proxy connection (if used) verifies the proxy host's - #: certificate. - proxy_is_verified = None - - def __init__(self, *args, **kw): - if not six.PY2: - kw.pop("strict", None) - - # Pre-set source_address. - self.source_address = kw.get("source_address") - - #: The socket options provided by the user. If no options are - #: provided, we use the default options. - self.socket_options = kw.pop("socket_options", self.default_socket_options) - - # Proxy options provided by the user. - self.proxy = kw.pop("proxy", None) - self.proxy_config = kw.pop("proxy_config", None) - - _HTTPConnection.__init__(self, *args, **kw) - - @property - def host(self): - """ - Getter method to remove any trailing dots that indicate the hostname is an FQDN. - - In general, SSL certificates don't include the trailing dot indicating a - fully-qualified domain name, and thus, they don't validate properly when - checked against a domain name that includes the dot. In addition, some - servers may not expect to receive the trailing dot when provided. - - However, the hostname with trailing dot is critical to DNS resolution; doing a - lookup with the trailing dot will properly only resolve the appropriate FQDN, - whereas a lookup without a trailing dot will search the system's search domain - list. Thus, it's important to keep the original host around for use only in - those cases where it's appropriate (i.e., when doing DNS lookup to establish the - actual TCP connection across which we're going to send HTTP requests). - """ - return self._dns_host.rstrip(".") - - @host.setter - def host(self, value): - """ - Setter for the `host` property. - - We assume that only urllib3 uses the _dns_host attribute; httplib itself - only uses `host`, and it seems reasonable that other libraries follow suit. - """ - self._dns_host = value - - def _new_conn(self): - """Establish a socket connection and set nodelay settings on it. - - :return: New socket connection. - """ - extra_kw = {} - if self.source_address: - extra_kw["source_address"] = self.source_address - - if self.socket_options: - extra_kw["socket_options"] = self.socket_options - - try: - conn = connection.create_connection( - (self._dns_host, self.port), self.timeout, **extra_kw - ) - - except SocketTimeout: - raise ConnectTimeoutError( - self, - "Connection to %s timed out. (connect timeout=%s)" - % (self.host, self.timeout), - ) - - except SocketError as e: - raise NewConnectionError( - self, "Failed to establish a new connection: %s" % e - ) - - return conn - - def _is_using_tunnel(self): - # Google App Engine's httplib does not define _tunnel_host - return getattr(self, "_tunnel_host", None) - - def _prepare_conn(self, conn): - self.sock = conn - if self._is_using_tunnel(): - # TODO: Fix tunnel so it doesn't depend on self.sock state. - self._tunnel() - # Mark this connection as not reusable - self.auto_open = 0 - - def connect(self): - conn = self._new_conn() - self._prepare_conn(conn) - - def putrequest(self, method, url, *args, **kwargs): - """ """ - # Empty docstring because the indentation of CPython's implementation - # is broken but we don't want this method in our documentation. - match = _CONTAINS_CONTROL_CHAR_RE.search(method) - if match: - raise ValueError( - "Method cannot contain non-token characters %r (found at least %r)" - % (method, match.group()) - ) - - return _HTTPConnection.putrequest(self, method, url, *args, **kwargs) - - def putheader(self, header, *values): - """ """ - if not any(isinstance(v, str) and v == SKIP_HEADER for v in values): - _HTTPConnection.putheader(self, header, *values) - elif six.ensure_str(header.lower()) not in SKIPPABLE_HEADERS: - raise ValueError( - "urllib3.util.SKIP_HEADER only supports '%s'" - % ("', '".join(map(str.title, sorted(SKIPPABLE_HEADERS))),) - ) - - def request(self, method, url, body=None, headers=None): - # Update the inner socket's timeout value to send the request. - # This only triggers if the connection is re-used. - if getattr(self, "sock", None) is not None: - self.sock.settimeout(self.timeout) - - if headers is None: - headers = {} - else: - # Avoid modifying the headers passed into .request() - headers = headers.copy() - if "user-agent" not in (six.ensure_str(k.lower()) for k in headers): - headers["User-Agent"] = _get_default_user_agent() - super(HTTPConnection, self).request(method, url, body=body, headers=headers) - - def request_chunked(self, method, url, body=None, headers=None): - """ - Alternative to the common request method, which sends the - body with chunked encoding and not as one block - """ - headers = headers or {} - header_keys = set([six.ensure_str(k.lower()) for k in headers]) - skip_accept_encoding = "accept-encoding" in header_keys - skip_host = "host" in header_keys - self.putrequest( - method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host - ) - if "user-agent" not in header_keys: - self.putheader("User-Agent", _get_default_user_agent()) - for header, value in headers.items(): - self.putheader(header, value) - if "transfer-encoding" not in header_keys: - self.putheader("Transfer-Encoding", "chunked") - self.endheaders() - - if body is not None: - stringish_types = six.string_types + (bytes,) - if isinstance(body, stringish_types): - body = (body,) - for chunk in body: - if not chunk: - continue - if not isinstance(chunk, bytes): - chunk = chunk.encode("utf8") - len_str = hex(len(chunk))[2:] - to_send = bytearray(len_str.encode()) - to_send += b"\r\n" - to_send += chunk - to_send += b"\r\n" - self.send(to_send) - - # After the if clause, to always have a closed body - self.send(b"0\r\n\r\n") - - -class HTTPSConnection(HTTPConnection): - """ - Many of the parameters to this constructor are passed to the underlying SSL - socket by means of :py:func:`urllib3.util.ssl_wrap_socket`. - """ - - default_port = port_by_scheme["https"] - - cert_reqs = None - ca_certs = None - ca_cert_dir = None - ca_cert_data = None - ssl_version = None - assert_fingerprint = None - tls_in_tls_required = False - - def __init__( - self, - host, - port=None, - key_file=None, - cert_file=None, - key_password=None, - strict=None, - timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - ssl_context=None, - server_hostname=None, - **kw - ): - - HTTPConnection.__init__(self, host, port, strict=strict, timeout=timeout, **kw) - - self.key_file = key_file - self.cert_file = cert_file - self.key_password = key_password - self.ssl_context = ssl_context - self.server_hostname = server_hostname - - # Required property for Google AppEngine 1.9.0 which otherwise causes - # HTTPS requests to go out as HTTP. (See Issue #356) - self._protocol = "https" - - def set_cert( - self, - key_file=None, - cert_file=None, - cert_reqs=None, - key_password=None, - ca_certs=None, - assert_hostname=None, - assert_fingerprint=None, - ca_cert_dir=None, - ca_cert_data=None, - ): - """ - This method should only be called once, before the connection is used. - """ - # If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also - # have an SSLContext object in which case we'll use its verify_mode. - if cert_reqs is None: - if self.ssl_context is not None: - cert_reqs = self.ssl_context.verify_mode - else: - cert_reqs = resolve_cert_reqs(None) - - self.key_file = key_file - self.cert_file = cert_file - self.cert_reqs = cert_reqs - self.key_password = key_password - self.assert_hostname = assert_hostname - self.assert_fingerprint = assert_fingerprint - self.ca_certs = ca_certs and os.path.expanduser(ca_certs) - self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) - self.ca_cert_data = ca_cert_data - - def connect(self): - # Add certificate verification - self.sock = conn = self._new_conn() - hostname = self.host - tls_in_tls = False - - if self._is_using_tunnel(): - if self.tls_in_tls_required: - self.sock = conn = self._connect_tls_proxy(hostname, conn) - tls_in_tls = True - - # Calls self._set_hostport(), so self.host is - # self._tunnel_host below. - self._tunnel() - # Mark this connection as not reusable - self.auto_open = 0 - - # Override the host with the one we're requesting data from. - hostname = self._tunnel_host - - server_hostname = hostname - if self.server_hostname is not None: - server_hostname = self.server_hostname - - is_time_off = datetime.date.today() < RECENT_DATE - if is_time_off: - warnings.warn( - ( - "System time is way off (before {0}). This will probably " - "lead to SSL verification errors" - ).format(RECENT_DATE), - SystemTimeWarning, - ) - - # Wrap socket using verification with the root certs in - # trusted_root_certs - default_ssl_context = False - if self.ssl_context is None: - default_ssl_context = True - self.ssl_context = create_urllib3_context( - ssl_version=resolve_ssl_version(self.ssl_version), - cert_reqs=resolve_cert_reqs(self.cert_reqs), - ) - - context = self.ssl_context - context.verify_mode = resolve_cert_reqs(self.cert_reqs) - - # Try to load OS default certs if none are given. - # Works well on Windows (requires Python3.4+) - if ( - not self.ca_certs - and not self.ca_cert_dir - and not self.ca_cert_data - and default_ssl_context - and hasattr(context, "load_default_certs") - ): - context.load_default_certs() - - self.sock = ssl_wrap_socket( - sock=conn, - keyfile=self.key_file, - certfile=self.cert_file, - key_password=self.key_password, - ca_certs=self.ca_certs, - ca_cert_dir=self.ca_cert_dir, - ca_cert_data=self.ca_cert_data, - server_hostname=server_hostname, - ssl_context=context, - tls_in_tls=tls_in_tls, - ) - - # If we're using all defaults and the connection - # is TLSv1 or TLSv1.1 we throw a DeprecationWarning - # for the host. - if ( - default_ssl_context - and self.ssl_version is None - and hasattr(self.sock, "version") - and self.sock.version() in {"TLSv1", "TLSv1.1"} - ): - warnings.warn( - "Negotiating TLSv1/TLSv1.1 by default is deprecated " - "and will be disabled in urllib3 v2.0.0. Connecting to " - "'%s' with '%s' can be enabled by explicitly opting-in " - "with 'ssl_version'" % (self.host, self.sock.version()), - DeprecationWarning, - ) - - if self.assert_fingerprint: - assert_fingerprint( - self.sock.getpeercert(binary_form=True), self.assert_fingerprint - ) - elif ( - context.verify_mode != ssl.CERT_NONE - and not getattr(context, "check_hostname", False) - and self.assert_hostname is not False - ): - # While urllib3 attempts to always turn off hostname matching from - # the TLS library, this cannot always be done. So we check whether - # the TLS Library still thinks it's matching hostnames. - cert = self.sock.getpeercert() - if not cert.get("subjectAltName", ()): - warnings.warn( - ( - "Certificate for {0} has no `subjectAltName`, falling back to check for a " - "`commonName` for now. This feature is being removed by major browsers and " - "deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 " - "for details.)".format(hostname) - ), - SubjectAltNameWarning, - ) - _match_hostname(cert, self.assert_hostname or server_hostname) - - self.is_verified = ( - context.verify_mode == ssl.CERT_REQUIRED - or self.assert_fingerprint is not None - ) - - def _connect_tls_proxy(self, hostname, conn): - """ - Establish a TLS connection to the proxy using the provided SSL context. - """ - proxy_config = self.proxy_config - ssl_context = proxy_config.ssl_context - if ssl_context: - # If the user provided a proxy context, we assume CA and client - # certificates have already been set - return ssl_wrap_socket( - sock=conn, - server_hostname=hostname, - ssl_context=ssl_context, - ) - - ssl_context = create_proxy_ssl_context( - self.ssl_version, - self.cert_reqs, - self.ca_certs, - self.ca_cert_dir, - self.ca_cert_data, - ) - - # If no cert was provided, use only the default options for server - # certificate validation - socket = ssl_wrap_socket( - sock=conn, - ca_certs=self.ca_certs, - ca_cert_dir=self.ca_cert_dir, - ca_cert_data=self.ca_cert_data, - server_hostname=hostname, - ssl_context=ssl_context, - ) - - if ssl_context.verify_mode != ssl.CERT_NONE and not getattr( - ssl_context, "check_hostname", False - ): - # While urllib3 attempts to always turn off hostname matching from - # the TLS library, this cannot always be done. So we check whether - # the TLS Library still thinks it's matching hostnames. - cert = socket.getpeercert() - if not cert.get("subjectAltName", ()): - warnings.warn( - ( - "Certificate for {0} has no `subjectAltName`, falling back to check for a " - "`commonName` for now. This feature is being removed by major browsers and " - "deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 " - "for details.)".format(hostname) - ), - SubjectAltNameWarning, - ) - _match_hostname(cert, hostname) - - self.proxy_is_verified = ssl_context.verify_mode == ssl.CERT_REQUIRED - return socket - - -def _match_hostname(cert, asserted_hostname): - # Our upstream implementation of ssl.match_hostname() - # only applies this normalization to IP addresses so it doesn't - # match DNS SANs so we do the same thing! - stripped_hostname = asserted_hostname.strip("u[]") - if is_ipaddress(stripped_hostname): - asserted_hostname = stripped_hostname - - try: - match_hostname(cert, asserted_hostname) - except CertificateError as e: - log.warning( - "Certificate did not match expected hostname: %s. Certificate: %s", - asserted_hostname, - cert, - ) - # Add cert to exception and reraise so client code can inspect - # the cert when catching the exception, if they want to - e._peer_cert = cert - raise - - -def _get_default_user_agent(): - return "python-urllib3/%s" % __version__ - - -class DummyConnection(object): - """Used to detect a failed ConnectionCls import.""" - - pass - - -if not ssl: - HTTPSConnection = DummyConnection # noqa: F811 - - -VerifiedHTTPSConnection = HTTPSConnection diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/_msvccompiler.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/_msvccompiler.py deleted file mode 100644 index 729c2dd5217528d7b3f9220cc2c7981f95c6f6e1..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/_msvccompiler.py +++ /dev/null @@ -1,572 +0,0 @@ -"""distutils._msvccompiler - -Contains MSVCCompiler, an implementation of the abstract CCompiler class -for Microsoft Visual Studio 2015. - -The module is compatible with VS 2015 and later. You can find legacy support -for older versions in distutils.msvc9compiler and distutils.msvccompiler. -""" - -# Written by Perry Stoll -# hacked by Robin Becker and Thomas Heller to do a better job of -# finding DevStudio (through the registry) -# ported to VS 2005 and VS 2008 by Christian Heimes -# ported to VS 2015 by Steve Dower - -import os -import subprocess -import contextlib -import warnings -import unittest.mock as mock - -with contextlib.suppress(ImportError): - import winreg - -from distutils.errors import ( - DistutilsExecError, - DistutilsPlatformError, - CompileError, - LibError, - LinkError, -) -from distutils.ccompiler import CCompiler, gen_lib_options -from distutils import log -from distutils.util import get_platform - -from itertools import count - - -def _find_vc2015(): - try: - key = winreg.OpenKeyEx( - winreg.HKEY_LOCAL_MACHINE, - r"Software\Microsoft\VisualStudio\SxS\VC7", - access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY, - ) - except OSError: - log.debug("Visual C++ is not registered") - return None, None - - best_version = 0 - best_dir = None - with key: - for i in count(): - try: - v, vc_dir, vt = winreg.EnumValue(key, i) - except OSError: - break - if v and vt == winreg.REG_SZ and os.path.isdir(vc_dir): - try: - version = int(float(v)) - except (ValueError, TypeError): - continue - if version >= 14 and version > best_version: - best_version, best_dir = version, vc_dir - return best_version, best_dir - - -def _find_vc2017(): - """Returns "15, path" based on the result of invoking vswhere.exe - If no install is found, returns "None, None" - - The version is returned to avoid unnecessarily changing the function - result. It may be ignored when the path is not None. - - If vswhere.exe is not available, by definition, VS 2017 is not - installed. - """ - root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles") - if not root: - return None, None - - try: - path = subprocess.check_output( - [ - os.path.join( - root, "Microsoft Visual Studio", "Installer", "vswhere.exe" - ), - "-latest", - "-prerelease", - "-requires", - "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", - "-property", - "installationPath", - "-products", - "*", - ], - encoding="mbcs", - errors="strict", - ).strip() - except (subprocess.CalledProcessError, OSError, UnicodeDecodeError): - return None, None - - path = os.path.join(path, "VC", "Auxiliary", "Build") - if os.path.isdir(path): - return 15, path - - return None, None - - -PLAT_SPEC_TO_RUNTIME = { - 'x86': 'x86', - 'x86_amd64': 'x64', - 'x86_arm': 'arm', - 'x86_arm64': 'arm64', -} - - -def _find_vcvarsall(plat_spec): - # bpo-38597: Removed vcruntime return value - _, best_dir = _find_vc2017() - - if not best_dir: - best_version, best_dir = _find_vc2015() - - if not best_dir: - log.debug("No suitable Visual C++ version found") - return None, None - - vcvarsall = os.path.join(best_dir, "vcvarsall.bat") - if not os.path.isfile(vcvarsall): - log.debug("%s cannot be found", vcvarsall) - return None, None - - return vcvarsall, None - - -def _get_vc_env(plat_spec): - if os.getenv("DISTUTILS_USE_SDK"): - return {key.lower(): value for key, value in os.environ.items()} - - vcvarsall, _ = _find_vcvarsall(plat_spec) - if not vcvarsall: - raise DistutilsPlatformError("Unable to find vcvarsall.bat") - - try: - out = subprocess.check_output( - f'cmd /u /c "{vcvarsall}" {plat_spec} && set', - stderr=subprocess.STDOUT, - ).decode('utf-16le', errors='replace') - except subprocess.CalledProcessError as exc: - log.error(exc.output) - raise DistutilsPlatformError(f"Error executing {exc.cmd}") - - env = { - key.lower(): value - for key, _, value in (line.partition('=') for line in out.splitlines()) - if key and value - } - - return env - - -def _find_exe(exe, paths=None): - """Return path to an MSVC executable program. - - Tries to find the program in several places: first, one of the - MSVC program search paths from the registry; next, the directories - in the PATH environment variable. If any of those work, return an - absolute path that is known to exist. If none of them work, just - return the original program name, 'exe'. - """ - if not paths: - paths = os.getenv('path').split(os.pathsep) - for p in paths: - fn = os.path.join(os.path.abspath(p), exe) - if os.path.isfile(fn): - return fn - return exe - - -# A map keyed by get_platform() return values to values accepted by -# 'vcvarsall.bat'. Always cross-compile from x86 to work with the -# lighter-weight MSVC installs that do not include native 64-bit tools. -PLAT_TO_VCVARS = { - 'win32': 'x86', - 'win-amd64': 'x86_amd64', - 'win-arm32': 'x86_arm', - 'win-arm64': 'x86_arm64', -} - - -class MSVCCompiler(CCompiler): - """Concrete class that implements an interface to Microsoft Visual C++, - as defined by the CCompiler abstract class.""" - - compiler_type = 'msvc' - - # Just set this so CCompiler's constructor doesn't barf. We currently - # don't use the 'set_executables()' bureaucracy provided by CCompiler, - # as it really isn't necessary for this sort of single-compiler class. - # Would be nice to have a consistent interface with UnixCCompiler, - # though, so it's worth thinking about. - executables = {} - - # Private class data (need to distinguish C from C++ source for compiler) - _c_extensions = ['.c'] - _cpp_extensions = ['.cc', '.cpp', '.cxx'] - _rc_extensions = ['.rc'] - _mc_extensions = ['.mc'] - - # Needed for the filename generation methods provided by the - # base class, CCompiler. - src_extensions = _c_extensions + _cpp_extensions + _rc_extensions + _mc_extensions - res_extension = '.res' - obj_extension = '.obj' - static_lib_extension = '.lib' - shared_lib_extension = '.dll' - static_lib_format = shared_lib_format = '%s%s' - exe_extension = '.exe' - - def __init__(self, verbose=0, dry_run=0, force=0): - super().__init__(verbose, dry_run, force) - # target platform (.plat_name is consistent with 'bdist') - self.plat_name = None - self.initialized = False - - @classmethod - def _configure(cls, vc_env): - """ - Set class-level include/lib dirs. - """ - cls.include_dirs = cls._parse_path(vc_env.get('include', '')) - cls.library_dirs = cls._parse_path(vc_env.get('lib', '')) - - @staticmethod - def _parse_path(val): - return [dir.rstrip(os.sep) for dir in val.split(os.pathsep) if dir] - - def initialize(self, plat_name=None): - # multi-init means we would need to check platform same each time... - assert not self.initialized, "don't init multiple times" - if plat_name is None: - plat_name = get_platform() - # sanity check for platforms to prevent obscure errors later. - if plat_name not in PLAT_TO_VCVARS: - raise DistutilsPlatformError( - f"--plat-name must be one of {tuple(PLAT_TO_VCVARS)}" - ) - - # Get the vcvarsall.bat spec for the requested platform. - plat_spec = PLAT_TO_VCVARS[plat_name] - - vc_env = _get_vc_env(plat_spec) - if not vc_env: - raise DistutilsPlatformError( - "Unable to find a compatible " "Visual Studio installation." - ) - self._configure(vc_env) - - self._paths = vc_env.get('path', '') - paths = self._paths.split(os.pathsep) - self.cc = _find_exe("cl.exe", paths) - self.linker = _find_exe("link.exe", paths) - self.lib = _find_exe("lib.exe", paths) - self.rc = _find_exe("rc.exe", paths) # resource compiler - self.mc = _find_exe("mc.exe", paths) # message compiler - self.mt = _find_exe("mt.exe", paths) # message compiler - - self.preprocess_options = None - # bpo-38597: Always compile with dynamic linking - # Future releases of Python 3.x will include all past - # versions of vcruntime*.dll for compatibility. - self.compile_options = ['/nologo', '/O2', '/W3', '/GL', '/DNDEBUG', '/MD'] - - self.compile_options_debug = [ - '/nologo', - '/Od', - '/MDd', - '/Zi', - '/W3', - '/D_DEBUG', - ] - - ldflags = ['/nologo', '/INCREMENTAL:NO', '/LTCG'] - - ldflags_debug = ['/nologo', '/INCREMENTAL:NO', '/LTCG', '/DEBUG:FULL'] - - self.ldflags_exe = [*ldflags, '/MANIFEST:EMBED,ID=1'] - self.ldflags_exe_debug = [*ldflags_debug, '/MANIFEST:EMBED,ID=1'] - self.ldflags_shared = [ - *ldflags, - '/DLL', - '/MANIFEST:EMBED,ID=2', - '/MANIFESTUAC:NO', - ] - self.ldflags_shared_debug = [ - *ldflags_debug, - '/DLL', - '/MANIFEST:EMBED,ID=2', - '/MANIFESTUAC:NO', - ] - self.ldflags_static = [*ldflags] - self.ldflags_static_debug = [*ldflags_debug] - - self._ldflags = { - (CCompiler.EXECUTABLE, None): self.ldflags_exe, - (CCompiler.EXECUTABLE, False): self.ldflags_exe, - (CCompiler.EXECUTABLE, True): self.ldflags_exe_debug, - (CCompiler.SHARED_OBJECT, None): self.ldflags_shared, - (CCompiler.SHARED_OBJECT, False): self.ldflags_shared, - (CCompiler.SHARED_OBJECT, True): self.ldflags_shared_debug, - (CCompiler.SHARED_LIBRARY, None): self.ldflags_static, - (CCompiler.SHARED_LIBRARY, False): self.ldflags_static, - (CCompiler.SHARED_LIBRARY, True): self.ldflags_static_debug, - } - - self.initialized = True - - # -- Worker methods ------------------------------------------------ - - @property - def out_extensions(self): - return { - **super().out_extensions, - **{ - ext: self.res_extension - for ext in self._rc_extensions + self._mc_extensions - }, - } - - def compile( # noqa: C901 - self, - sources, - output_dir=None, - macros=None, - include_dirs=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - depends=None, - ): - - if not self.initialized: - self.initialize() - compile_info = self._setup_compile( - output_dir, macros, include_dirs, sources, depends, extra_postargs - ) - macros, objects, extra_postargs, pp_opts, build = compile_info - - compile_opts = extra_preargs or [] - compile_opts.append('/c') - if debug: - compile_opts.extend(self.compile_options_debug) - else: - compile_opts.extend(self.compile_options) - - add_cpp_opts = False - - for obj in objects: - try: - src, ext = build[obj] - except KeyError: - continue - if debug: - # pass the full pathname to MSVC in debug mode, - # this allows the debugger to find the source file - # without asking the user to browse for it - src = os.path.abspath(src) - - if ext in self._c_extensions: - input_opt = "/Tc" + src - elif ext in self._cpp_extensions: - input_opt = "/Tp" + src - add_cpp_opts = True - elif ext in self._rc_extensions: - # compile .RC to .RES file - input_opt = src - output_opt = "/fo" + obj - try: - self.spawn([self.rc] + pp_opts + [output_opt, input_opt]) - except DistutilsExecError as msg: - raise CompileError(msg) - continue - elif ext in self._mc_extensions: - # Compile .MC to .RC file to .RES file. - # * '-h dir' specifies the directory for the - # generated include file - # * '-r dir' specifies the target directory of the - # generated RC file and the binary message resource - # it includes - # - # For now (since there are no options to change this), - # we use the source-directory for the include file and - # the build directory for the RC file and message - # resources. This works at least for win32all. - h_dir = os.path.dirname(src) - rc_dir = os.path.dirname(obj) - try: - # first compile .MC to .RC and .H file - self.spawn([self.mc, '-h', h_dir, '-r', rc_dir, src]) - base, _ = os.path.splitext(os.path.basename(src)) - rc_file = os.path.join(rc_dir, base + '.rc') - # then compile .RC to .RES file - self.spawn([self.rc, "/fo" + obj, rc_file]) - - except DistutilsExecError as msg: - raise CompileError(msg) - continue - else: - # how to handle this file? - raise CompileError(f"Don't know how to compile {src} to {obj}") - - args = [self.cc] + compile_opts + pp_opts - if add_cpp_opts: - args.append('/EHsc') - args.append(input_opt) - args.append("/Fo" + obj) - args.extend(extra_postargs) - - try: - self.spawn(args) - except DistutilsExecError as msg: - raise CompileError(msg) - - return objects - - def create_static_lib( - self, objects, output_libname, output_dir=None, debug=0, target_lang=None - ): - - if not self.initialized: - self.initialize() - objects, output_dir = self._fix_object_args(objects, output_dir) - output_filename = self.library_filename(output_libname, output_dir=output_dir) - - if self._need_link(objects, output_filename): - lib_args = objects + ['/OUT:' + output_filename] - if debug: - pass # XXX what goes here? - try: - log.debug('Executing "%s" %s', self.lib, ' '.join(lib_args)) - self.spawn([self.lib] + lib_args) - except DistutilsExecError as msg: - raise LibError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def link( - self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None, - ): - - if not self.initialized: - self.initialize() - objects, output_dir = self._fix_object_args(objects, output_dir) - fixed_args = self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) - libraries, library_dirs, runtime_library_dirs = fixed_args - - if runtime_library_dirs: - self.warn( - "I don't know what to do with 'runtime_library_dirs': " - + str(runtime_library_dirs) - ) - - lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, libraries) - if output_dir is not None: - output_filename = os.path.join(output_dir, output_filename) - - if self._need_link(objects, output_filename): - ldflags = self._ldflags[target_desc, debug] - - export_opts = ["/EXPORT:" + sym for sym in (export_symbols or [])] - - ld_args = ( - ldflags + lib_opts + export_opts + objects + ['/OUT:' + output_filename] - ) - - # The MSVC linker generates .lib and .exp files, which cannot be - # suppressed by any linker switches. The .lib files may even be - # needed! Make sure they are generated in the temporary build - # directory. Since they have different names for debug and release - # builds, they can go into the same directory. - build_temp = os.path.dirname(objects[0]) - if export_symbols is not None: - (dll_name, dll_ext) = os.path.splitext( - os.path.basename(output_filename) - ) - implib_file = os.path.join(build_temp, self.library_filename(dll_name)) - ld_args.append('/IMPLIB:' + implib_file) - - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - - output_dir = os.path.dirname(os.path.abspath(output_filename)) - self.mkpath(output_dir) - try: - log.debug('Executing "%s" %s', self.linker, ' '.join(ld_args)) - self.spawn([self.linker] + ld_args) - except DistutilsExecError as msg: - raise LinkError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def spawn(self, cmd): - env = dict(os.environ, PATH=self._paths) - with self._fallback_spawn(cmd, env) as fallback: - return super().spawn(cmd, env=env) - return fallback.value - - @contextlib.contextmanager - def _fallback_spawn(self, cmd, env): - """ - Discovered in pypa/distutils#15, some tools monkeypatch the compiler, - so the 'env' kwarg causes a TypeError. Detect this condition and - restore the legacy, unsafe behavior. - """ - bag = type('Bag', (), {})() - try: - yield bag - except TypeError as exc: - if "unexpected keyword argument 'env'" not in str(exc): - raise - else: - return - warnings.warn("Fallback spawn triggered. Please update distutils monkeypatch.") - with mock.patch.dict('os.environ', env): - bag.value = super().spawn(cmd) - - # -- Miscellaneous methods ----------------------------------------- - # These are all used by the 'gen_lib_options() function, in - # ccompiler.py. - - def library_dir_option(self, dir): - return "/LIBPATH:" + dir - - def runtime_library_dir_option(self, dir): - raise DistutilsPlatformError( - "don't know how to set runtime library search path for MSVC" - ) - - def library_option(self, lib): - return self.library_filename(lib) - - def find_library_file(self, dirs, lib, debug=0): - # Prefer a debugging library if found (and requested), but deal - # with it if we don't have one. - if debug: - try_names = [lib + "_d", lib] - else: - try_names = [lib] - for dir in dirs: - for name in try_names: - libfile = os.path.join(dir, self.library_filename(name)) - if os.path.isfile(libfile): - return libfile - else: - # Oops, didn't find it in *any* of 'dirs' - return None diff --git a/spaces/Billyosoro/ESRGAN/realesrgan/archs/discriminator_arch.py b/spaces/Billyosoro/ESRGAN/realesrgan/archs/discriminator_arch.py deleted file mode 100644 index 4b66ab1226d6793de846bc9828bbe427031a0e2d..0000000000000000000000000000000000000000 --- a/spaces/Billyosoro/ESRGAN/realesrgan/archs/discriminator_arch.py +++ /dev/null @@ -1,67 +0,0 @@ -from basicsr.utils.registry import ARCH_REGISTRY -from torch import nn as nn -from torch.nn import functional as F -from torch.nn.utils import spectral_norm - - -@ARCH_REGISTRY.register() -class UNetDiscriminatorSN(nn.Module): - """Defines a U-Net discriminator with spectral normalization (SN) - - It is used in Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. - - Arg: - num_in_ch (int): Channel number of inputs. Default: 3. - num_feat (int): Channel number of base intermediate features. Default: 64. - skip_connection (bool): Whether to use skip connections between U-Net. Default: True. - """ - - def __init__(self, num_in_ch, num_feat=64, skip_connection=True): - super(UNetDiscriminatorSN, self).__init__() - self.skip_connection = skip_connection - norm = spectral_norm - # the first convolution - self.conv0 = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1) - # downsample - self.conv1 = norm(nn.Conv2d(num_feat, num_feat * 2, 4, 2, 1, bias=False)) - self.conv2 = norm(nn.Conv2d(num_feat * 2, num_feat * 4, 4, 2, 1, bias=False)) - self.conv3 = norm(nn.Conv2d(num_feat * 4, num_feat * 8, 4, 2, 1, bias=False)) - # upsample - self.conv4 = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 3, 1, 1, bias=False)) - self.conv5 = norm(nn.Conv2d(num_feat * 4, num_feat * 2, 3, 1, 1, bias=False)) - self.conv6 = norm(nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1, bias=False)) - # extra convolutions - self.conv7 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False)) - self.conv8 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False)) - self.conv9 = nn.Conv2d(num_feat, 1, 3, 1, 1) - - def forward(self, x): - # downsample - x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True) - x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True) - x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True) - x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True) - - # upsample - x3 = F.interpolate(x3, scale_factor=2, mode='bilinear', align_corners=False) - x4 = F.leaky_relu(self.conv4(x3), negative_slope=0.2, inplace=True) - - if self.skip_connection: - x4 = x4 + x2 - x4 = F.interpolate(x4, scale_factor=2, mode='bilinear', align_corners=False) - x5 = F.leaky_relu(self.conv5(x4), negative_slope=0.2, inplace=True) - - if self.skip_connection: - x5 = x5 + x1 - x5 = F.interpolate(x5, scale_factor=2, mode='bilinear', align_corners=False) - x6 = F.leaky_relu(self.conv6(x5), negative_slope=0.2, inplace=True) - - if self.skip_connection: - x6 = x6 + x0 - - # extra convolutions - out = F.leaky_relu(self.conv7(x6), negative_slope=0.2, inplace=True) - out = F.leaky_relu(self.conv8(out), negative_slope=0.2, inplace=True) - out = self.conv9(out) - - return out diff --git a/spaces/BramVanroy/mai-simplification-nl-2023-demo/utils.py b/spaces/BramVanroy/mai-simplification-nl-2023-demo/utils.py deleted file mode 100644 index 9bc79d6686355dddd490e7913feffe4e52b84240..0000000000000000000000000000000000000000 --- a/spaces/BramVanroy/mai-simplification-nl-2023-demo/utils.py +++ /dev/null @@ -1,62 +0,0 @@ -from typing import List, Tuple - -import streamlit as st -import torch -from optimum.bettertransformer import BetterTransformer -from torch import nn, qint8 -from torch.quantization import quantize_dynamic -from transformers import T5ForConditionalGeneration, T5Tokenizer - - -@st.cache_resource(show_spinner=False) -def get_resources(quantize: bool = True, no_cuda: bool = False) -> Tuple[T5ForConditionalGeneration, T5Tokenizer]: - """Load a T5 model and its (slow) tokenizer""" - tokenizer = T5Tokenizer.from_pretrained("BramVanroy/ul2-base-dutch-simplification-mai-2023", use_fast=False) - model = T5ForConditionalGeneration.from_pretrained("BramVanroy/ul2-base-dutch-simplification-mai-2023") - - model = BetterTransformer.transform(model, keep_original_model=False) - model.resize_token_embeddings(len(tokenizer)) - - if torch.cuda.is_available() and not no_cuda: - model = model.to("cuda") - elif quantize: # Quantization not supported on CUDA - model = quantize_dynamic(model, {nn.Linear, nn.Dropout, nn.LayerNorm}, dtype=qint8) - - model.eval() - - return model, tokenizer - - -def batchify(iterable, batch_size=16): - """Turn an iterable in a batch generator - :param iterable: iterable to batchify - :param batch_size: batch size - """ - num_items = len(iterable) - for idx in range(0, num_items, batch_size): - yield iterable[idx : min(idx + batch_size, num_items)] - - -def simplify( - texts: List[str], model: T5ForConditionalGeneration, tokenizer: T5Tokenizer, batch_size: int = 16 -) -> List[str]: - """Simplify a given set of texts with a given model and tokenizer. Yields results in batches of 'batch_size' - :param texts: texts to simplify - :param model: model to use for simplification - :param tokenizer: tokenizer to use for simplification - :param batch_size: batch size to yield results in - """ - for batch_texts in batchify(texts, batch_size=batch_size): - nlg_batch_texts = ["[NLG] " + text for text in batch_texts] - encoded = tokenizer(nlg_batch_texts, return_tensors="pt", padding=True) - encoded = {k: v.to(model.device) for k, v in encoded.items()} - gen_kwargs = { - "max_new_tokens": 128, - "num_beams": 3, - } - - with torch.no_grad(): - encoded = {k: v.to(model.device) for k, v in encoded.items()} - generated = model.generate(**encoded, **gen_kwargs).cpu() - - yield batch_texts, tokenizer.batch_decode(generated, skip_special_tokens=True) diff --git a/spaces/CVPR/LIVE/pybind11/tests/test_embed/catch.cpp b/spaces/CVPR/LIVE/pybind11/tests/test_embed/catch.cpp deleted file mode 100644 index dd137385cb32250b8640169934fb96aa5e80f069..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/pybind11/tests/test_embed/catch.cpp +++ /dev/null @@ -1,22 +0,0 @@ -// The Catch implementation is compiled here. This is a standalone -// translation unit to avoid recompiling it for every test change. - -#include - -#ifdef _MSC_VER -// Silence MSVC C++17 deprecation warning from Catch regarding std::uncaught_exceptions (up to catch -// 2.0.1; this should be fixed in the next catch release after 2.0.1). -# pragma warning(disable: 4996) -#endif - -#define CATCH_CONFIG_RUNNER -#include - -namespace py = pybind11; - -int main(int argc, char *argv[]) { - py::scoped_interpreter guard{}; - auto result = Catch::Session().run(argc, argv); - - return result < 0xff ? result : 0xff; -} diff --git a/spaces/CVPR/LIVE/thrust/thrust/merge.h b/spaces/CVPR/LIVE/thrust/thrust/merge.h deleted file mode 100644 index 184141f6f626b1d667564867e3d1ce045fc65d19..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/merge.h +++ /dev/null @@ -1,680 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/*! \file merge.h - * \brief Merging sorted ranges - */ - -#pragma once - -#include -#include -#include - -namespace thrust -{ - - -/*! \addtogroup merging Merging - * \ingroup algorithms - * \{ - */ - - -/*! \p merge combines two sorted ranges [first1, last1) and [first2, last2) - * into a single sorted range. That is, it copies from [first1, last1) and - * [first2, last2) into [result, result + (last1 - first1) + (last2 - first2)) - * such that the resulting range is in ascending order. \p merge is stable, meaning both that the - * relative order of elements within each input range is preserved, and that for equivalent elements - * in both input ranges the element from the first range precedes the element from the second. The - * return value is result + (last1 - first1) + (last2 - first2). - * - * This version of \p merge compares elements using \c operator<. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first1 The beginning of the first input range. - * \param last1 The end of the first input range. - * \param first2 The beginning of the second input range. - * \param last2 The end of the second input range. - * \param result The beginning of the merged output. - * \return The end of the output range. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator1 is a model of Input Iterator, - * \p InputIterator1 and \p InputIterator2 have the same \c value_type, - * \p InputIterator1's \c value_type is a model of LessThan Comparable, - * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, - * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. - * \tparam InputIterator2 is a model of Input Iterator, - * \p InputIterator2 and \p InputIterator1 have the same \c value_type, - * \p InputIterator2's \c value_type is a model of LessThan Comparable, - * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, - * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. - * \tparam OutputIterator is a model of Output Iterator. - * - * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to operator<. - * \pre The resulting range shall not overlap with either input range. - * - * The following code snippet demonstrates how to use - * \p merge to compute the merger of two sorted sets of integers using the \p thrust::host execution policy for parallelization: - * - * \code - * #include - * #include - * ... - * int A1[6] = {1, 3, 5, 7, 9, 11}; - * int A2[7] = {1, 1, 2, 3, 5, 8, 13}; - * - * int result[13]; - * - * int *result_end = - * thrust::merge(thrust::host, - * A1, A1 + 6, - * A2, A2 + 7, - * result); - * // result = {1, 1, 1, 2, 3, 3, 5, 5, 7, 8, 9, 11, 13} - * \endcode - * - * \see http://www.sgi.com/tech/stl/merge.html - * \see \p set_union - * \see \p sort - * \see \p is_sorted - */ -template -__host__ __device__ - OutputIterator merge(const thrust::detail::execution_policy_base &exec, - InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - InputIterator2 last2, - OutputIterator result); - - -/*! \p merge combines two sorted ranges [first1, last1) and [first2, last2) - * into a single sorted range. That is, it copies from [first1, last1) and - * [first2, last2) into [result, result + (last1 - first1) + (last2 - first2)) - * such that the resulting range is in ascending order. \p merge is stable, meaning both that the - * relative order of elements within each input range is preserved, and that for equivalent elements - * in both input ranges the element from the first range precedes the element from the second. The - * return value is result + (last1 - first1) + (last2 - first2). - * - * This version of \p merge compares elements using \c operator<. - * - * \param first1 The beginning of the first input range. - * \param last1 The end of the first input range. - * \param first2 The beginning of the second input range. - * \param last2 The end of the second input range. - * \param result The beginning of the merged output. - * \return The end of the output range. - * - * \tparam InputIterator1 is a model of Input Iterator, - * \p InputIterator1 and \p InputIterator2 have the same \c value_type, - * \p InputIterator1's \c value_type is a model of LessThan Comparable, - * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, - * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. - * \tparam InputIterator2 is a model of Input Iterator, - * \p InputIterator2 and \p InputIterator1 have the same \c value_type, - * \p InputIterator2's \c value_type is a model of LessThan Comparable, - * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, - * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. - * \tparam OutputIterator is a model of Output Iterator. - * - * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to operator<. - * \pre The resulting range shall not overlap with either input range. - * - * The following code snippet demonstrates how to use - * \p merge to compute the merger of two sorted sets of integers. - * - * \code - * #include - * ... - * int A1[6] = {1, 3, 5, 7, 9, 11}; - * int A2[7] = {1, 1, 2, 3, 5, 8, 13}; - * - * int result[13]; - * - * int *result_end = thrust::merge(A1, A1 + 6, A2, A2 + 7, result); - * // result = {1, 1, 1, 2, 3, 3, 5, 5, 7, 8, 9, 11, 13} - * \endcode - * - * \see http://www.sgi.com/tech/stl/merge.html - * \see \p set_union - * \see \p sort - * \see \p is_sorted - */ -template - OutputIterator merge(InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - InputIterator2 last2, - OutputIterator result); - - -/*! \p merge combines two sorted ranges [first1, last1) and [first2, last2) - * into a single sorted range. That is, it copies from [first1, last1) and - * [first2, last2) into [result, result + (last1 - first1) + (last2 - first2)) - * such that the resulting range is in ascending order. \p merge is stable, meaning both that the - * relative order of elements within each input range is preserved, and that for equivalent elements - * in both input ranges the element from the first range precedes the element from the second. The - * return value is result + (last1 - first1) + (last2 - first2). - * - * This version of \p merge compares elements using a function object \p comp. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first1 The beginning of the first input range. - * \param last1 The end of the first input range. - * \param first2 The beginning of the second input range. - * \param last2 The end of the second input range. - * \param result The beginning of the merged output. - * \param comp Comparison operator. - * \return The end of the output range. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator1 is a model of Input Iterator, - * \p InputIterator1's \c value_type is convertable to \p StrictWeakCompare's \c first_argument_type. - * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. - * \tparam InputIterator2 is a model of Input Iterator, - * \p InputIterator2's \c value_type is convertable to \p StrictWeakCompare's \c second_argument_type. - * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. - * \tparam OutputIterator is a model of Output Iterator. - * \tparam StrictWeakCompare is a model of Strict Weak Ordering. - * - * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to \p comp. - * \pre The resulting range shall not overlap with either input range. - * - * The following code snippet demonstrates how to use - * \p merge to compute the merger of two sets of integers sorted in - * descending order using the \p thrust::host execution policy for parallelization: - * - * \code - * #include - * #include - * #include - * ... - * int A1[6] = {11, 9, 7, 5, 3, 1}; - * int A2[7] = {13, 8, 5, 3, 2, 1, 1}; - * - * int result[13]; - * - * int *result_end = thrust::merge(thrust::host, - * A1, A1 + 6, - * A2, A2 + 7, - * result, - * thrust::greater()); - * // result = {13, 11, 9, 8, 7, 5, 5, 3, 3, 2, 1, 1, 1} - * \endcode - * - * \see http://www.sgi.com/tech/stl/merge.html - * \see \p sort - * \see \p is_sorted - */ -template -__host__ __device__ - OutputIterator merge(const thrust::detail::execution_policy_base &exec, - InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - InputIterator2 last2, - OutputIterator result, - StrictWeakCompare comp); - - -/*! \p merge combines two sorted ranges [first1, last1) and [first2, last2) - * into a single sorted range. That is, it copies from [first1, last1) and - * [first2, last2) into [result, result + (last1 - first1) + (last2 - first2)) - * such that the resulting range is in ascending order. \p merge is stable, meaning both that the - * relative order of elements within each input range is preserved, and that for equivalent elements - * in both input ranges the element from the first range precedes the element from the second. The - * return value is result + (last1 - first1) + (last2 - first2). - * - * This version of \p merge compares elements using a function object \p comp. - * - * \param first1 The beginning of the first input range. - * \param last1 The end of the first input range. - * \param first2 The beginning of the second input range. - * \param last2 The end of the second input range. - * \param result The beginning of the merged output. - * \param comp Comparison operator. - * \return The end of the output range. - * - * \tparam InputIterator1 is a model of Input Iterator, - * \p InputIterator1's \c value_type is convertable to \p StrictWeakCompare's \c first_argument_type. - * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. - * \tparam InputIterator2 is a model of Input Iterator, - * \p InputIterator2's \c value_type is convertable to \p StrictWeakCompare's \c second_argument_type. - * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. - * \tparam OutputIterator is a model of Output Iterator. - * \tparam StrictWeakCompare is a model of Strict Weak Ordering. - * - * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to \p comp. - * \pre The resulting range shall not overlap with either input range. - * - * The following code snippet demonstrates how to use - * \p merge to compute the merger of two sets of integers sorted in - * descending order. - * - * \code - * #include - * #include - * ... - * int A1[6] = {11, 9, 7, 5, 3, 1}; - * int A2[7] = {13, 8, 5, 3, 2, 1, 1}; - * - * int result[13]; - * - * int *result_end = thrust::merge(A1, A1 + 6, A2, A2 + 7, result, thrust::greater()); - * // result = {13, 11, 9, 8, 7, 5, 5, 3, 3, 2, 1, 1, 1} - * \endcode - * - * \see http://www.sgi.com/tech/stl/merge.html - * \see \p sort - * \see \p is_sorted - */ -template - OutputIterator merge(InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - InputIterator2 last2, - OutputIterator result, - StrictWeakCompare comp); - - -/*! \p merge_by_key performs a key-value merge. That is, \p merge_by_key copies elements from - * [keys_first1, keys_last1) and [keys_first2, keys_last2) into a single range, - * [keys_result, keys_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) such that - * the resulting range is in ascending key order. - * - * At the same time, \p merge_by_key copies elements from the two associated ranges [values_first1 + (keys_last1 - keys_first1)) - * and [values_first2 + (keys_last2 - keys_first2)) into a single range, - * [values_result, values_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) such that - * the resulting range is in ascending order implied by each input element's associated key. - * - * \p merge_by_key is stable, meaning both that the relative order of elements within each input range is - * preserved, and that for equivalent elements in all input key ranges the element from the first range - * precedes the element from the second. - * - * The return value is is (keys_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) - * and (values_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)). - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param keys_first1 The beginning of the first input range of keys. - * \param keys_last1 The end of the first input range of keys. - * \param keys_first2 The beginning of the second input range of keys. - * \param keys_last2 The end of the second input range of keys. - * \param values_first1 The beginning of the first input range of values. - * \param values_first2 The beginning of the first input range of values. - * \param keys_result The beginning of the merged output range of keys. - * \param values_result The beginning of the merged output range of values. - * \return A \p pair \c p such that p.first is the end of the output range of keys, - * and such that p.second is the end of the output range of values. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator1 is a model of Input Iterator, - * \p InputIterator1 and \p InputIterator2 have the same \c value_type, - * \p InputIterator1's \c value_type is a model of LessThan Comparable, - * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, - * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. - * \tparam InputIterator2 is a model of Input Iterator, - * \p InputIterator2 and \p InputIterator1 have the same \c value_type, - * \p InputIterator2's \c value_type is a model of LessThan Comparable, - * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, - * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. - * \tparam InputIterator3 is a model of Input Iterator, - * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. - * \tparam InputIterator4 is a model of Input Iterator, - * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. - * \tparam OutputIterator1 is a model of Output Iterator. - * \tparam OutputIterator2 is a model of Output Iterator. - * - * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to operator<. - * \pre The resulting ranges shall not overlap with any input range. - * - * The following code snippet demonstrates how to use - * \p merge_by_key to compute the merger of two sets of integers sorted in - * ascending order using the \p thrust::host execution policy for parallelization: - * - * \code - * #include - * #include - * #include - * ... - * int A_keys[6] = {1, 3, 5, 7, 9, 11}; - * int A_vals[6] = {0, 0, 0, 0, 0, 0}; - * - * int B_keys[7] = {1, 1, 2, 3, 5, 8, 13}; - * int B_vals[7] = {1, 1, 1, 1, 1, 1, 1}; - * - * int keys_result[13]; - * int vals_result[13]; - * - * thrust::pair end = - * thrust::merge_by_key(thrust::host, - * A_keys, A_keys + 6, - * B_keys, B_keys + 7, - * A_vals, B_vals, - * keys_result, vals_result); - * - * // keys_result = {1, 1, 1, 2, 3, 3, 5, 5, 7, 8, 9, 11, 13} - * // vals_result = {0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1} - * \endcode - * - * \see merge - * \see \p sort_by_key - * \see \p is_sorted - */ -template -__host__ __device__ - thrust::pair - merge_by_key(const thrust::detail::execution_policy_base &exec, - InputIterator1 keys_first1, InputIterator1 keys_last1, - InputIterator2 keys_first2, InputIterator2 keys_last2, - InputIterator3 values_first1, InputIterator4 values_first2, - OutputIterator1 keys_result, - OutputIterator2 values_result); - - -/*! \p merge_by_key performs a key-value merge. That is, \p merge_by_key copies elements from - * [keys_first1, keys_last1) and [keys_first2, keys_last2) into a single range, - * [keys_result, keys_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) such that - * the resulting range is in ascending key order. - * - * At the same time, \p merge_by_key copies elements from the two associated ranges [values_first1 + (keys_last1 - keys_first1)) - * and [values_first2 + (keys_last2 - keys_first2)) into a single range, - * [values_result, values_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) such that - * the resulting range is in ascending order implied by each input element's associated key. - * - * \p merge_by_key is stable, meaning both that the relative order of elements within each input range is - * preserved, and that for equivalent elements in all input key ranges the element from the first range - * precedes the element from the second. - * - * The return value is is (keys_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) - * and (values_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)). - * - * \param keys_first1 The beginning of the first input range of keys. - * \param keys_last1 The end of the first input range of keys. - * \param keys_first2 The beginning of the second input range of keys. - * \param keys_last2 The end of the second input range of keys. - * \param values_first1 The beginning of the first input range of values. - * \param values_first2 The beginning of the first input range of values. - * \param keys_result The beginning of the merged output range of keys. - * \param values_result The beginning of the merged output range of values. - * \return A \p pair \c p such that p.first is the end of the output range of keys, - * and such that p.second is the end of the output range of values. - * - * \tparam InputIterator1 is a model of Input Iterator, - * \p InputIterator1 and \p InputIterator2 have the same \c value_type, - * \p InputIterator1's \c value_type is a model of LessThan Comparable, - * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, - * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. - * \tparam InputIterator2 is a model of Input Iterator, - * \p InputIterator2 and \p InputIterator1 have the same \c value_type, - * \p InputIterator2's \c value_type is a model of LessThan Comparable, - * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, - * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. - * \tparam InputIterator3 is a model of Input Iterator, - * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. - * \tparam InputIterator4 is a model of Input Iterator, - * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. - * \tparam OutputIterator1 is a model of Output Iterator. - * \tparam OutputIterator2 is a model of Output Iterator. - * - * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to operator<. - * \pre The resulting ranges shall not overlap with any input range. - * - * The following code snippet demonstrates how to use - * \p merge_by_key to compute the merger of two sets of integers sorted in - * ascending order. - * - * \code - * #include - * #include - * ... - * int A_keys[6] = {1, 3, 5, 7, 9, 11}; - * int A_vals[6] = {0, 0, 0, 0, 0, 0}; - * - * int B_keys[7] = {1, 1, 2, 3, 5, 8, 13}; - * int B_vals[7] = {1, 1, 1, 1, 1, 1, 1}; - * - * int keys_result[13]; - * int vals_result[13]; - * - * thrust::pair end = thrust::merge_by_key(A_keys, A_keys + 6, B_keys, B_keys + 7, A_vals, B_vals, keys_result, vals_result); - * - * // keys_result = {1, 1, 1, 2, 3, 3, 5, 5, 7, 8, 9, 11, 13} - * // vals_result = {0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1} - * \endcode - * - * \see merge - * \see \p sort_by_key - * \see \p is_sorted - */ -template - thrust::pair - merge_by_key(InputIterator1 keys_first1, InputIterator1 keys_last1, - InputIterator2 keys_first2, InputIterator2 keys_last2, - InputIterator3 values_first1, InputIterator4 values_first2, - OutputIterator1 keys_result, - OutputIterator2 values_result); - - -/*! \p merge_by_key performs a key-value merge. That is, \p merge_by_key copies elements from - * [keys_first1, keys_last1) and [keys_first2, keys_last2) into a single range, - * [keys_result, keys_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) such that - * the resulting range is in ascending key order. - * - * At the same time, \p merge_by_key copies elements from the two associated ranges [values_first1 + (keys_last1 - keys_first1)) - * and [values_first2 + (keys_last2 - keys_first2)) into a single range, - * [values_result, values_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) such that - * the resulting range is in ascending order implied by each input element's associated key. - * - * \p merge_by_key is stable, meaning both that the relative order of elements within each input range is - * preserved, and that for equivalent elements in all input key ranges the element from the first range - * precedes the element from the second. - * - * The return value is is (keys_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) - * and (values_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)). - * - * This version of \p merge_by_key compares key elements using a function object \p comp. - * - * The algorithm's execution is parallelized using \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param keys_first1 The beginning of the first input range of keys. - * \param keys_last1 The end of the first input range of keys. - * \param keys_first2 The beginning of the second input range of keys. - * \param keys_last2 The end of the second input range of keys. - * \param values_first1 The beginning of the first input range of values. - * \param values_first2 The beginning of the first input range of values. - * \param keys_result The beginning of the merged output range of keys. - * \param values_result The beginning of the merged output range of values. - * \param comp Comparison operator. - * \return A \p pair \c p such that p.first is the end of the output range of keys, - * and such that p.second is the end of the output range of values. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator1 is a model of Input Iterator, - * \p InputIterator1's \c value_type is convertable to \p StrictWeakCompare's \c first_argument_type. - * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator1's set of \c value_types. - * \tparam InputIterator2 is a model of Input Iterator, - * \p InputIterator2's \c value_type is convertable to \p StrictWeakCompare's \c second_argument_type. - * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator1's set of \c value_types. - * \tparam InputIterator3 is a model of Input Iterator, - * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. - * \tparam InputIterator4 is a model of Input Iterator, - * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. - * \tparam OutputIterator1 is a model of Output Iterator. - * \tparam OutputIterator2 is a model of Output Iterator. - * \tparam StrictWeakCompare is a model of Strict Weak Ordering. - * - * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to \p comp. - * \pre The resulting ranges shall not overlap with any input range. - * - * The following code snippet demonstrates how to use - * \p merge_by_key to compute the merger of two sets of integers sorted in - * descending order using the \p thrust::host execution policy for parallelization: - * - * \code - * #include - * #include - * #include - * ... - * int A_keys[6] = {11, 9, 7, 5, 3, 1}; - * int A_vals[6] = { 0, 0, 0, 0, 0, 0}; - * - * int B_keys[7] = {13, 8, 5, 3, 2, 1, 1}; - * int B_vals[7] = { 1, 1, 1, 1, 1, 1, 1}; - * - * int keys_result[13]; - * int vals_result[13]; - * - * thrust::pair end = - * thrust::merge_by_key(thrust::host, - * A_keys, A_keys + 6, - * B_keys, B_keys + 7, - * A_vals, B_vals, - * keys_result, vals_result, - * thrust::greater()); - * - * // keys_result = {13, 11, 9, 8, 7, 5, 5, 3, 3, 2, 1, 1, 1} - * // vals_result = { 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1} - * \endcode - * - * \see merge - * \see \p sort_by_key - * \see \p is_sorted - */ -template -__host__ __device__ - thrust::pair - merge_by_key(const thrust::detail::execution_policy_base &exec, - InputIterator1 keys_first1, InputIterator1 keys_last1, - InputIterator2 keys_first2, InputIterator2 keys_last2, - InputIterator3 values_first1, InputIterator4 values_first2, - OutputIterator1 keys_result, - OutputIterator2 values_result, - Compare comp); - - -/*! \p merge_by_key performs a key-value merge. That is, \p merge_by_key copies elements from - * [keys_first1, keys_last1) and [keys_first2, keys_last2) into a single range, - * [keys_result, keys_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) such that - * the resulting range is in ascending key order. - * - * At the same time, \p merge_by_key copies elements from the two associated ranges [values_first1 + (keys_last1 - keys_first1)) - * and [values_first2 + (keys_last2 - keys_first2)) into a single range, - * [values_result, values_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) such that - * the resulting range is in ascending order implied by each input element's associated key. - * - * \p merge_by_key is stable, meaning both that the relative order of elements within each input range is - * preserved, and that for equivalent elements in all input key ranges the element from the first range - * precedes the element from the second. - * - * The return value is is (keys_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) - * and (values_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)). - * - * This version of \p merge_by_key compares key elements using a function object \p comp. - * - * \param keys_first1 The beginning of the first input range of keys. - * \param keys_last1 The end of the first input range of keys. - * \param keys_first2 The beginning of the second input range of keys. - * \param keys_last2 The end of the second input range of keys. - * \param values_first1 The beginning of the first input range of values. - * \param values_first2 The beginning of the first input range of values. - * \param keys_result The beginning of the merged output range of keys. - * \param values_result The beginning of the merged output range of values. - * \param comp Comparison operator. - * \return A \p pair \c p such that p.first is the end of the output range of keys, - * and such that p.second is the end of the output range of values. - * - * \tparam InputIterator1 is a model of Input Iterator, - * \p InputIterator1's \c value_type is convertable to \p StrictWeakCompare's \c first_argument_type. - * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator1's set of \c value_types. - * \tparam InputIterator2 is a model of Input Iterator, - * \p InputIterator2's \c value_type is convertable to \p StrictWeakCompare's \c second_argument_type. - * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator1's set of \c value_types. - * \tparam InputIterator3 is a model of Input Iterator, - * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. - * \tparam InputIterator4 is a model of Input Iterator, - * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. - * \tparam OutputIterator1 is a model of Output Iterator. - * \tparam OutputIterator2 is a model of Output Iterator. - * \tparam StrictWeakCompare is a model of Strict Weak Ordering. - * - * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to \p comp. - * \pre The resulting ranges shall not overlap with any input range. - * - * The following code snippet demonstrates how to use - * \p merge_by_key to compute the merger of two sets of integers sorted in - * descending order. - * - * \code - * #include - * #include - * ... - * int A_keys[6] = {11, 9, 7, 5, 3, 1}; - * int A_vals[6] = { 0, 0, 0, 0, 0, 0}; - * - * int B_keys[7] = {13, 8, 5, 3, 2, 1, 1}; - * int B_vals[7] = { 1, 1, 1, 1, 1, 1, 1}; - * - * int keys_result[13]; - * int vals_result[13]; - * - * thrust::pair end = thrust::merge_by_key(A_keys, A_keys + 6, B_keys, B_keys + 7, A_vals, B_vals, keys_result, vals_result, thrust::greater()); - * - * // keys_result = {13, 11, 9, 8, 7, 5, 5, 3, 3, 2, 1, 1, 1} - * // vals_result = { 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1} - * \endcode - * - * \see merge - * \see \p sort_by_key - * \see \p is_sorted - */ -template - thrust::pair - merge_by_key(InputIterator1 keys_first1, InputIterator1 keys_last1, - InputIterator2 keys_first2, InputIterator2 keys_last2, - InputIterator3 values_first1, InputIterator4 values_first2, - OutputIterator1 keys_result, - OutputIterator2 values_result, - StrictWeakCompare comp); - - -/*! \} // merging - */ - -} // end thrust - -#include - diff --git a/spaces/CVPR/WALT/docker/Dockerfile b/spaces/CVPR/WALT/docker/Dockerfile deleted file mode 100644 index fcbef9057621342d69644598f2fb865aee80001f..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/docker/Dockerfile +++ /dev/null @@ -1,52 +0,0 @@ -ARG PYTORCH="1.9.0" -ARG CUDA="11.1" -ARG CUDNN="8" - -FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel - -ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" -ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" -ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" -RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub -RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub -RUN apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -# Install MMCV -#RUN pip install mmcv-full==1.3.8 -f https://download.openmmlab.com/mmcv/dist/cu101/torch1.6.0/index.html -# -f https://openmmlab.oss-accelerate.aliyuncs.com/mmcv/dist/index.html -RUN pip install mmcv-full==1.4.0 -f https://download.openmmlab.com/mmcv/dist/cu111/torch1.9.0/index.html -# Install MMDetection -RUN conda clean --all -RUN git clone https://github.com/open-mmlab/mmdetection.git /mmdetection -WORKDIR /mmdetection -ENV FORCE_CUDA="1" -RUN cd /mmdetection && git checkout 7bd39044f35aec4b90dd797b965777541a8678ff -RUN pip install -r requirements/build.txt -RUN pip install --no-cache-dir -e . -RUN apt-get update -RUN apt-get install -y vim -RUN pip uninstall -y pycocotools -RUN pip install mmpycocotools timm scikit-image imagesize - - -# make sure we don't overwrite some existing directory called "apex" -WORKDIR /tmp/unique_for_apex -# uninstall Apex if present, twice to make absolutely sure :) -RUN pip uninstall -y apex || : -RUN pip uninstall -y apex || : -# SHA is something the user can touch to force recreation of this Docker layer, -# and therefore force cloning of the latest version of Apex -RUN SHA=ToUcHMe git clone https://github.com/NVIDIA/apex.git -WORKDIR /tmp/unique_for_apex/apex -RUN pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" . -RUN pip install seaborn sklearn imantics gradio -WORKDIR /code -ENTRYPOINT ["python", "app.py"] - -#RUN git clone https://github.com/NVIDIA/apex -#RUN cd apex -#RUN pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" . -#RUN pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ - diff --git a/spaces/CVPR/WALT/mmdet/models/dense_heads/anchor_free_head.py b/spaces/CVPR/WALT/mmdet/models/dense_heads/anchor_free_head.py deleted file mode 100644 index 1814a0cc4f577f470f74f025440073a0aaa1ebd0..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/models/dense_heads/anchor_free_head.py +++ /dev/null @@ -1,340 +0,0 @@ -from abc import abstractmethod - -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init -from mmcv.runner import force_fp32 - -from mmdet.core import multi_apply -from ..builder import HEADS, build_loss -from .base_dense_head import BaseDenseHead -from .dense_test_mixins import BBoxTestMixin - - -@HEADS.register_module() -class AnchorFreeHead(BaseDenseHead, BBoxTestMixin): - """Anchor-free head (FCOS, Fovea, RepPoints, etc.). - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - feat_channels (int): Number of hidden channels. Used in child classes. - stacked_convs (int): Number of stacking convs of the head. - strides (tuple): Downsample factor of each feature map. - dcn_on_last_conv (bool): If true, use dcn in the last layer of - towers. Default: False. - conv_bias (bool | str): If specified as `auto`, it will be decided by - the norm_cfg. Bias of conv will be set as True if `norm_cfg` is - None, otherwise False. Default: "auto". - loss_cls (dict): Config of classification loss. - loss_bbox (dict): Config of localization loss. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Config dict for normalization layer. Default: None. - train_cfg (dict): Training config of anchor head. - test_cfg (dict): Testing config of anchor head. - """ # noqa: W605 - - _version = 1 - - def __init__(self, - num_classes, - in_channels, - feat_channels=256, - stacked_convs=4, - strides=(4, 8, 16, 32, 64), - dcn_on_last_conv=False, - conv_bias='auto', - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='IoULoss', loss_weight=1.0), - conv_cfg=None, - norm_cfg=None, - train_cfg=None, - test_cfg=None): - super(AnchorFreeHead, self).__init__() - self.num_classes = num_classes - self.cls_out_channels = num_classes - self.in_channels = in_channels - self.feat_channels = feat_channels - self.stacked_convs = stacked_convs - self.strides = strides - self.dcn_on_last_conv = dcn_on_last_conv - assert conv_bias == 'auto' or isinstance(conv_bias, bool) - self.conv_bias = conv_bias - self.loss_cls = build_loss(loss_cls) - self.loss_bbox = build_loss(loss_bbox) - self.train_cfg = train_cfg - self.test_cfg = test_cfg - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.fp16_enabled = False - - self._init_layers() - - def _init_layers(self): - """Initialize layers of the head.""" - self._init_cls_convs() - self._init_reg_convs() - self._init_predictor() - - def _init_cls_convs(self): - """Initialize classification conv layers of the head.""" - self.cls_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - if self.dcn_on_last_conv and i == self.stacked_convs - 1: - conv_cfg = dict(type='DCNv2') - else: - conv_cfg = self.conv_cfg - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=self.norm_cfg, - bias=self.conv_bias)) - - def _init_reg_convs(self): - """Initialize bbox regression conv layers of the head.""" - self.reg_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - if self.dcn_on_last_conv and i == self.stacked_convs - 1: - conv_cfg = dict(type='DCNv2') - else: - conv_cfg = self.conv_cfg - self.reg_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=self.norm_cfg, - bias=self.conv_bias)) - - def _init_predictor(self): - """Initialize predictor layers of the head.""" - self.conv_cls = nn.Conv2d( - self.feat_channels, self.cls_out_channels, 3, padding=1) - self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) - - def init_weights(self): - """Initialize weights of the head.""" - for m in self.cls_convs: - if isinstance(m.conv, nn.Conv2d): - normal_init(m.conv, std=0.01) - for m in self.reg_convs: - if isinstance(m.conv, nn.Conv2d): - normal_init(m.conv, std=0.01) - bias_cls = bias_init_with_prob(0.01) - normal_init(self.conv_cls, std=0.01, bias=bias_cls) - normal_init(self.conv_reg, std=0.01) - - def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs): - """Hack some keys of the model state dict so that can load checkpoints - of previous version.""" - version = local_metadata.get('version', None) - if version is None: - # the key is different in early versions - # for example, 'fcos_cls' become 'conv_cls' now - bbox_head_keys = [ - k for k in state_dict.keys() if k.startswith(prefix) - ] - ori_predictor_keys = [] - new_predictor_keys = [] - # e.g. 'fcos_cls' or 'fcos_reg' - for key in bbox_head_keys: - ori_predictor_keys.append(key) - key = key.split('.') - conv_name = None - if key[1].endswith('cls'): - conv_name = 'conv_cls' - elif key[1].endswith('reg'): - conv_name = 'conv_reg' - elif key[1].endswith('centerness'): - conv_name = 'conv_centerness' - else: - assert NotImplementedError - if conv_name is not None: - key[1] = conv_name - new_predictor_keys.append('.'.join(key)) - else: - ori_predictor_keys.pop(-1) - for i in range(len(new_predictor_keys)): - state_dict[new_predictor_keys[i]] = state_dict.pop( - ori_predictor_keys[i]) - super()._load_from_state_dict(state_dict, prefix, local_metadata, - strict, missing_keys, unexpected_keys, - error_msgs) - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: Usually contain classification scores and bbox predictions. - cls_scores (list[Tensor]): Box scores for each scale level, - each is a 4D-tensor, the channel number is - num_points * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level, each is a 4D-tensor, the channel number is - num_points * 4. - """ - return multi_apply(self.forward_single, feats)[:2] - - def forward_single(self, x): - """Forward features of a single scale level. - - Args: - x (Tensor): FPN feature maps of the specified stride. - - Returns: - tuple: Scores for each class, bbox predictions, features - after classification and regression conv layers, some - models needs these features like FCOS. - """ - cls_feat = x - reg_feat = x - - for cls_layer in self.cls_convs: - cls_feat = cls_layer(cls_feat) - cls_score = self.conv_cls(cls_feat) - - for reg_layer in self.reg_convs: - reg_feat = reg_layer(reg_feat) - bbox_pred = self.conv_reg(reg_feat) - return cls_score, bbox_pred, cls_feat, reg_feat - - @abstractmethod - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute loss of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level, - each is a 4D-tensor, the channel number is - num_points * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level, each is a 4D-tensor, the channel number is - num_points * 4. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - """ - - raise NotImplementedError - - @abstractmethod - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def get_bboxes(self, - cls_scores, - bbox_preds, - img_metas, - cfg=None, - rescale=None): - """Transform network output for a batch into bbox predictions. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_points * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_points * 4, H, W) - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used - rescale (bool): If True, return boxes in original image space - """ - - raise NotImplementedError - - @abstractmethod - def get_targets(self, points, gt_bboxes_list, gt_labels_list): - """Compute regression, classification and centerness targets for points - in multiple images. - - Args: - points (list[Tensor]): Points of each fpn level, each has shape - (num_points, 2). - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, - each has shape (num_gt, 4). - gt_labels_list (list[Tensor]): Ground truth labels of each box, - each has shape (num_gt,). - """ - raise NotImplementedError - - def _get_points_single(self, - featmap_size, - stride, - dtype, - device, - flatten=False): - """Get points of a single scale level.""" - h, w = featmap_size - x_range = torch.arange(w, dtype=dtype, device=device) - y_range = torch.arange(h, dtype=dtype, device=device) - y, x = torch.meshgrid(y_range, x_range) - if flatten: - y = y.flatten() - x = x.flatten() - return y, x - - def get_points(self, featmap_sizes, dtype, device, flatten=False): - """Get points according to feature map sizes. - - Args: - featmap_sizes (list[tuple]): Multi-level feature map sizes. - dtype (torch.dtype): Type of points. - device (torch.device): Device of points. - - Returns: - tuple: points of each image. - """ - mlvl_points = [] - for i in range(len(featmap_sizes)): - mlvl_points.append( - self._get_points_single(featmap_sizes[i], self.strides[i], - dtype, device, flatten)) - return mlvl_points - - def aug_test(self, feats, img_metas, rescale=False): - """Test function with test time augmentation. - - Args: - feats (list[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains features for all images in the batch. - img_metas (list[list[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. each dict has image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[ndarray]: bbox results of each class - """ - return self.aug_test_bboxes(feats, img_metas, rescale=rescale) diff --git a/spaces/CVPR/lama-example/saicinpainting/evaluation/masks/mask.py b/spaces/CVPR/lama-example/saicinpainting/evaluation/masks/mask.py deleted file mode 100644 index 3e34d0675a781fba983cb542f18390255aaf2609..0000000000000000000000000000000000000000 --- a/spaces/CVPR/lama-example/saicinpainting/evaluation/masks/mask.py +++ /dev/null @@ -1,429 +0,0 @@ -import enum -from copy import deepcopy - -import numpy as np -from skimage import img_as_ubyte -from skimage.transform import rescale, resize -try: - from detectron2 import model_zoo - from detectron2.config import get_cfg - from detectron2.engine import DefaultPredictor - DETECTRON_INSTALLED = True -except: - print("Detectron v2 is not installed") - DETECTRON_INSTALLED = False - -from .countless.countless2d import zero_corrected_countless - - -class ObjectMask(): - def __init__(self, mask): - self.height, self.width = mask.shape - (self.up, self.down), (self.left, self.right) = self._get_limits(mask) - self.mask = mask[self.up:self.down, self.left:self.right].copy() - - @staticmethod - def _get_limits(mask): - def indicator_limits(indicator): - lower = indicator.argmax() - upper = len(indicator) - indicator[::-1].argmax() - return lower, upper - - vertical_indicator = mask.any(axis=1) - vertical_limits = indicator_limits(vertical_indicator) - - horizontal_indicator = mask.any(axis=0) - horizontal_limits = indicator_limits(horizontal_indicator) - - return vertical_limits, horizontal_limits - - def _clean(self): - self.up, self.down, self.left, self.right = 0, 0, 0, 0 - self.mask = np.empty((0, 0)) - - def horizontal_flip(self, inplace=False): - if not inplace: - flipped = deepcopy(self) - return flipped.horizontal_flip(inplace=True) - - self.mask = self.mask[:, ::-1] - return self - - def vertical_flip(self, inplace=False): - if not inplace: - flipped = deepcopy(self) - return flipped.vertical_flip(inplace=True) - - self.mask = self.mask[::-1, :] - return self - - def image_center(self): - y_center = self.up + (self.down - self.up) / 2 - x_center = self.left + (self.right - self.left) / 2 - return y_center, x_center - - def rescale(self, scaling_factor, inplace=False): - if not inplace: - scaled = deepcopy(self) - return scaled.rescale(scaling_factor, inplace=True) - - scaled_mask = rescale(self.mask.astype(float), scaling_factor, order=0) > 0.5 - (up, down), (left, right) = self._get_limits(scaled_mask) - self.mask = scaled_mask[up:down, left:right] - - y_center, x_center = self.image_center() - mask_height, mask_width = self.mask.shape - self.up = int(round(y_center - mask_height / 2)) - self.down = self.up + mask_height - self.left = int(round(x_center - mask_width / 2)) - self.right = self.left + mask_width - return self - - def crop_to_canvas(self, vertical=True, horizontal=True, inplace=False): - if not inplace: - cropped = deepcopy(self) - cropped.crop_to_canvas(vertical=vertical, horizontal=horizontal, inplace=True) - return cropped - - if vertical: - if self.up >= self.height or self.down <= 0: - self._clean() - else: - cut_up, cut_down = max(-self.up, 0), max(self.down - self.height, 0) - if cut_up != 0: - self.mask = self.mask[cut_up:] - self.up = 0 - if cut_down != 0: - self.mask = self.mask[:-cut_down] - self.down = self.height - - if horizontal: - if self.left >= self.width or self.right <= 0: - self._clean() - else: - cut_left, cut_right = max(-self.left, 0), max(self.right - self.width, 0) - if cut_left != 0: - self.mask = self.mask[:, cut_left:] - self.left = 0 - if cut_right != 0: - self.mask = self.mask[:, :-cut_right] - self.right = self.width - - return self - - def restore_full_mask(self, allow_crop=False): - cropped = self.crop_to_canvas(inplace=allow_crop) - mask = np.zeros((cropped.height, cropped.width), dtype=bool) - mask[cropped.up:cropped.down, cropped.left:cropped.right] = cropped.mask - return mask - - def shift(self, vertical=0, horizontal=0, inplace=False): - if not inplace: - shifted = deepcopy(self) - return shifted.shift(vertical=vertical, horizontal=horizontal, inplace=True) - - self.up += vertical - self.down += vertical - self.left += horizontal - self.right += horizontal - return self - - def area(self): - return self.mask.sum() - - -class RigidnessMode(enum.Enum): - soft = 0 - rigid = 1 - - -class SegmentationMask: - def __init__(self, confidence_threshold=0.5, rigidness_mode=RigidnessMode.rigid, - max_object_area=0.3, min_mask_area=0.02, downsample_levels=6, num_variants_per_mask=4, - max_mask_intersection=0.5, max_foreground_coverage=0.5, max_foreground_intersection=0.5, - max_hidden_area=0.2, max_scale_change=0.25, horizontal_flip=True, - max_vertical_shift=0.1, position_shuffle=True): - """ - :param confidence_threshold: float; threshold for confidence of the panoptic segmentator to allow for - the instance. - :param rigidness_mode: RigidnessMode object - when soft, checks intersection only with the object from which the mask_object was produced - when rigid, checks intersection with any foreground class object - :param max_object_area: float; allowed upper bound for to be considered as mask_object. - :param min_mask_area: float; lower bound for mask to be considered valid - :param downsample_levels: int; defines width of the resized segmentation to obtain shifted masks; - :param num_variants_per_mask: int; maximal number of the masks for the same object; - :param max_mask_intersection: float; maximum allowed area fraction of intersection for 2 masks - produced by horizontal shift of the same mask_object; higher value -> more diversity - :param max_foreground_coverage: float; maximum allowed area fraction of intersection for foreground object to be - covered by mask; lower value -> less the objects are covered - :param max_foreground_intersection: float; maximum allowed area of intersection for the mask with foreground - object; lower value -> mask is more on the background than on the objects - :param max_hidden_area: upper bound on part of the object hidden by shifting object outside the screen area; - :param max_scale_change: allowed scale change for the mask_object; - :param horizontal_flip: if horizontal flips are allowed; - :param max_vertical_shift: amount of vertical movement allowed; - :param position_shuffle: shuffle - """ - - assert DETECTRON_INSTALLED, 'Cannot use SegmentationMask without detectron2' - self.cfg = get_cfg() - self.cfg.merge_from_file(model_zoo.get_config_file("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml")) - self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml") - self.cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = confidence_threshold - self.predictor = DefaultPredictor(self.cfg) - - self.rigidness_mode = RigidnessMode(rigidness_mode) - self.max_object_area = max_object_area - self.min_mask_area = min_mask_area - self.downsample_levels = downsample_levels - self.num_variants_per_mask = num_variants_per_mask - self.max_mask_intersection = max_mask_intersection - self.max_foreground_coverage = max_foreground_coverage - self.max_foreground_intersection = max_foreground_intersection - self.max_hidden_area = max_hidden_area - self.position_shuffle = position_shuffle - - self.max_scale_change = max_scale_change - self.horizontal_flip = horizontal_flip - self.max_vertical_shift = max_vertical_shift - - def get_segmentation(self, img): - im = img_as_ubyte(img) - panoptic_seg, segment_info = self.predictor(im)["panoptic_seg"] - return panoptic_seg, segment_info - - @staticmethod - def _is_power_of_two(n): - return (n != 0) and (n & (n-1) == 0) - - def identify_candidates(self, panoptic_seg, segments_info): - potential_mask_ids = [] - for segment in segments_info: - if not segment["isthing"]: - continue - mask = (panoptic_seg == segment["id"]).int().detach().cpu().numpy() - area = mask.sum().item() / np.prod(panoptic_seg.shape) - if area >= self.max_object_area: - continue - potential_mask_ids.append(segment["id"]) - return potential_mask_ids - - def downsample_mask(self, mask): - height, width = mask.shape - if not (self._is_power_of_two(height) and self._is_power_of_two(width)): - raise ValueError("Image sides are not power of 2.") - - num_iterations = width.bit_length() - 1 - self.downsample_levels - if num_iterations < 0: - raise ValueError(f"Width is lower than 2^{self.downsample_levels}.") - - if height.bit_length() - 1 < num_iterations: - raise ValueError("Height is too low to perform downsampling") - - downsampled = mask - for _ in range(num_iterations): - downsampled = zero_corrected_countless(downsampled) - - return downsampled - - def _augmentation_params(self): - scaling_factor = np.random.uniform(1 - self.max_scale_change, 1 + self.max_scale_change) - if self.horizontal_flip: - horizontal_flip = bool(np.random.choice(2)) - else: - horizontal_flip = False - vertical_shift = np.random.uniform(-self.max_vertical_shift, self.max_vertical_shift) - - return { - "scaling_factor": scaling_factor, - "horizontal_flip": horizontal_flip, - "vertical_shift": vertical_shift - } - - def _get_intersection(self, mask_array, mask_object): - intersection = mask_array[ - mask_object.up:mask_object.down, mask_object.left:mask_object.right - ] & mask_object.mask - return intersection - - def _check_masks_intersection(self, aug_mask, total_mask_area, prev_masks): - for existing_mask in prev_masks: - intersection_area = self._get_intersection(existing_mask, aug_mask).sum() - intersection_existing = intersection_area / existing_mask.sum() - intersection_current = 1 - (aug_mask.area() - intersection_area) / total_mask_area - if (intersection_existing > self.max_mask_intersection) or \ - (intersection_current > self.max_mask_intersection): - return False - return True - - def _check_foreground_intersection(self, aug_mask, foreground): - for existing_mask in foreground: - intersection_area = self._get_intersection(existing_mask, aug_mask).sum() - intersection_existing = intersection_area / existing_mask.sum() - if intersection_existing > self.max_foreground_coverage: - return False - intersection_mask = intersection_area / aug_mask.area() - if intersection_mask > self.max_foreground_intersection: - return False - return True - - def _move_mask(self, mask, foreground): - # Obtaining properties of the original mask_object: - orig_mask = ObjectMask(mask) - - chosen_masks = [] - chosen_parameters = [] - # to fix the case when resizing gives mask_object consisting only of False - scaling_factor_lower_bound = 0. - - for var_idx in range(self.num_variants_per_mask): - # Obtaining augmentation parameters and applying them to the downscaled mask_object - augmentation_params = self._augmentation_params() - augmentation_params["scaling_factor"] = min([ - augmentation_params["scaling_factor"], - 2 * min(orig_mask.up, orig_mask.height - orig_mask.down) / orig_mask.height + 1., - 2 * min(orig_mask.left, orig_mask.width - orig_mask.right) / orig_mask.width + 1. - ]) - augmentation_params["scaling_factor"] = max([ - augmentation_params["scaling_factor"], scaling_factor_lower_bound - ]) - - aug_mask = deepcopy(orig_mask) - aug_mask.rescale(augmentation_params["scaling_factor"], inplace=True) - if augmentation_params["horizontal_flip"]: - aug_mask.horizontal_flip(inplace=True) - total_aug_area = aug_mask.area() - if total_aug_area == 0: - scaling_factor_lower_bound = 1. - continue - - # Fix if the element vertical shift is too strong and shown area is too small: - vertical_area = aug_mask.mask.sum(axis=1) / total_aug_area # share of area taken by rows - # number of rows which are allowed to be hidden from upper and lower parts of image respectively - max_hidden_up = np.searchsorted(vertical_area.cumsum(), self.max_hidden_area) - max_hidden_down = np.searchsorted(vertical_area[::-1].cumsum(), self.max_hidden_area) - # correcting vertical shift, so not too much area will be hidden - augmentation_params["vertical_shift"] = np.clip( - augmentation_params["vertical_shift"], - -(aug_mask.up + max_hidden_up) / aug_mask.height, - (aug_mask.height - aug_mask.down + max_hidden_down) / aug_mask.height - ) - # Applying vertical shift: - vertical_shift = int(round(aug_mask.height * augmentation_params["vertical_shift"])) - aug_mask.shift(vertical=vertical_shift, inplace=True) - aug_mask.crop_to_canvas(vertical=True, horizontal=False, inplace=True) - - # Choosing horizontal shift: - max_hidden_area = self.max_hidden_area - (1 - aug_mask.area() / total_aug_area) - horizontal_area = aug_mask.mask.sum(axis=0) / total_aug_area - max_hidden_left = np.searchsorted(horizontal_area.cumsum(), max_hidden_area) - max_hidden_right = np.searchsorted(horizontal_area[::-1].cumsum(), max_hidden_area) - allowed_shifts = np.arange(-max_hidden_left, aug_mask.width - - (aug_mask.right - aug_mask.left) + max_hidden_right + 1) - allowed_shifts = - (aug_mask.left - allowed_shifts) - - if self.position_shuffle: - np.random.shuffle(allowed_shifts) - - mask_is_found = False - for horizontal_shift in allowed_shifts: - aug_mask_left = deepcopy(aug_mask) - aug_mask_left.shift(horizontal=horizontal_shift, inplace=True) - aug_mask_left.crop_to_canvas(inplace=True) - - prev_masks = [mask] + chosen_masks - is_mask_suitable = self._check_masks_intersection(aug_mask_left, total_aug_area, prev_masks) & \ - self._check_foreground_intersection(aug_mask_left, foreground) - if is_mask_suitable: - aug_draw = aug_mask_left.restore_full_mask() - chosen_masks.append(aug_draw) - augmentation_params["horizontal_shift"] = horizontal_shift / aug_mask_left.width - chosen_parameters.append(augmentation_params) - mask_is_found = True - break - - if not mask_is_found: - break - - return chosen_parameters - - def _prepare_mask(self, mask): - height, width = mask.shape - target_width = width if self._is_power_of_two(width) else (1 << width.bit_length()) - target_height = height if self._is_power_of_two(height) else (1 << height.bit_length()) - - return resize(mask.astype('float32'), (target_height, target_width), order=0, mode='edge').round().astype('int32') - - def get_masks(self, im, return_panoptic=False): - panoptic_seg, segments_info = self.get_segmentation(im) - potential_mask_ids = self.identify_candidates(panoptic_seg, segments_info) - - panoptic_seg_scaled = self._prepare_mask(panoptic_seg.detach().cpu().numpy()) - downsampled = self.downsample_mask(panoptic_seg_scaled) - scene_objects = [] - for segment in segments_info: - if not segment["isthing"]: - continue - mask = downsampled == segment["id"] - if not np.any(mask): - continue - scene_objects.append(mask) - - mask_set = [] - for mask_id in potential_mask_ids: - mask = downsampled == mask_id - if not np.any(mask): - continue - - if self.rigidness_mode is RigidnessMode.soft: - foreground = [mask] - elif self.rigidness_mode is RigidnessMode.rigid: - foreground = scene_objects - else: - raise ValueError(f'Unexpected rigidness_mode: {rigidness_mode}') - - masks_params = self._move_mask(mask, foreground) - - full_mask = ObjectMask((panoptic_seg == mask_id).detach().cpu().numpy()) - - for params in masks_params: - aug_mask = deepcopy(full_mask) - aug_mask.rescale(params["scaling_factor"], inplace=True) - if params["horizontal_flip"]: - aug_mask.horizontal_flip(inplace=True) - - vertical_shift = int(round(aug_mask.height * params["vertical_shift"])) - horizontal_shift = int(round(aug_mask.width * params["horizontal_shift"])) - aug_mask.shift(vertical=vertical_shift, horizontal=horizontal_shift, inplace=True) - aug_mask = aug_mask.restore_full_mask().astype('uint8') - if aug_mask.mean() <= self.min_mask_area: - continue - mask_set.append(aug_mask) - - if return_panoptic: - return mask_set, panoptic_seg.detach().cpu().numpy() - else: - return mask_set - - -def propose_random_square_crop(mask, min_overlap=0.5): - height, width = mask.shape - mask_ys, mask_xs = np.where(mask > 0.5) # mask==0 is known fragment and mask==1 is missing - - if height < width: - crop_size = height - obj_left, obj_right = mask_xs.min(), mask_xs.max() - obj_width = obj_right - obj_left - left_border = max(0, min(width - crop_size - 1, obj_left + obj_width * min_overlap - crop_size)) - right_border = max(left_border + 1, min(width - crop_size, obj_left + obj_width * min_overlap)) - start_x = np.random.randint(left_border, right_border) - return start_x, 0, start_x + crop_size, height - else: - crop_size = width - obj_top, obj_bottom = mask_ys.min(), mask_ys.max() - obj_height = obj_bottom - obj_top - top_border = max(0, min(height - crop_size - 1, obj_top + obj_height * min_overlap - crop_size)) - bottom_border = max(top_border + 1, min(height - crop_size, obj_top + obj_height * min_overlap)) - start_y = np.random.randint(top_border, bottom_border) - return 0, start_y, width, start_y + crop_size diff --git a/spaces/CVPR/transfiner/configs/Misc/torchvision_imagenet_R_50.py b/spaces/CVPR/transfiner/configs/Misc/torchvision_imagenet_R_50.py deleted file mode 100644 index 0d75305bcf7445b98db84b3d489a1505d2fce5af..0000000000000000000000000000000000000000 --- a/spaces/CVPR/transfiner/configs/Misc/torchvision_imagenet_R_50.py +++ /dev/null @@ -1,150 +0,0 @@ -""" -An example config file to train a ImageNet classifier with detectron2. -Model and dataloader both come from torchvision. -This shows how to use detectron2 as a general engine for any new models and tasks. - -To run, use the following command: - -python tools/lazyconfig_train_net.py --config-file configs/Misc/torchvision_imagenet_R_50.py \ - --num-gpus 8 dataloader.train.dataset.root=/path/to/imagenet/ - -""" - - -import torch -from torch import nn -from torch.nn import functional as F -from omegaconf import OmegaConf -import torchvision -from torchvision.transforms import transforms as T -from torchvision.models.resnet import ResNet, Bottleneck -from fvcore.common.param_scheduler import MultiStepParamScheduler - -from detectron2.solver import WarmupParamScheduler -from detectron2.solver.build import get_default_optimizer_params -from detectron2.config import LazyCall as L -from detectron2.model_zoo import get_config -from detectron2.data.samplers import TrainingSampler, InferenceSampler -from detectron2.evaluation import DatasetEvaluator -from detectron2.utils import comm - - -""" -Note: Here we put reusable code (models, evaluation, data) together with configs just as a -proof-of-concept, to easily demonstrate what's needed to train a ImageNet classifier in detectron2. -Writing code in configs offers extreme flexibility but is often not a good engineering practice. -In practice, you might want to put code in your project and import them instead. -""" - - -def build_data_loader(dataset, batch_size, num_workers, training=True): - return torch.utils.data.DataLoader( - dataset, - sampler=(TrainingSampler if training else InferenceSampler)(len(dataset)), - batch_size=batch_size, - num_workers=num_workers, - pin_memory=True, - ) - - -class ClassificationNet(nn.Module): - def __init__(self, model: nn.Module): - super().__init__() - self.model = model - - @property - def device(self): - return list(self.model.parameters())[0].device - - def forward(self, inputs): - image, label = inputs - pred = self.model(image.to(self.device)) - if self.training: - label = label.to(self.device) - return F.cross_entropy(pred, label) - else: - return pred - - -class ClassificationAcc(DatasetEvaluator): - def reset(self): - self.corr = self.total = 0 - - def process(self, inputs, outputs): - image, label = inputs - self.corr += (outputs.argmax(dim=1).cpu() == label.cpu()).sum().item() - self.total += len(label) - - def evaluate(self): - all_corr_total = comm.all_gather([self.corr, self.total]) - corr = sum(x[0] for x in all_corr_total) - total = sum(x[1] for x in all_corr_total) - return {"accuracy": corr / total} - - -# --- End of code that could be in a project and be imported - - -dataloader = OmegaConf.create() -dataloader.train = L(build_data_loader)( - dataset=L(torchvision.datasets.ImageNet)( - root="/path/to/imagenet", - split="train", - transform=L(T.Compose)( - transforms=[ - L(T.RandomResizedCrop)(size=224), - L(T.RandomHorizontalFlip)(), - T.ToTensor(), - L(T.Normalize)(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), - ] - ), - ), - batch_size=256 // 8, - num_workers=4, - training=True, -) - -dataloader.test = L(build_data_loader)( - dataset=L(torchvision.datasets.ImageNet)( - root="${...train.dataset.root}", - split="val", - transform=L(T.Compose)( - transforms=[ - L(T.Resize)(size=256), - L(T.CenterCrop)(size=224), - T.ToTensor(), - L(T.Normalize)(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), - ] - ), - ), - batch_size=256 // 8, - num_workers=4, - training=False, -) - -dataloader.evaluator = L(ClassificationAcc)() - -model = L(ClassificationNet)( - model=(ResNet)(block=Bottleneck, layers=[3, 4, 6, 3], zero_init_residual=True) -) - - -optimizer = L(torch.optim.SGD)( - params=L(get_default_optimizer_params)(), - lr=0.1, - momentum=0.9, - weight_decay=1e-4, -) - -lr_multiplier = L(WarmupParamScheduler)( - scheduler=L(MultiStepParamScheduler)( - values=[1.0, 0.1, 0.01, 0.001], milestones=[30, 60, 90, 100] - ), - warmup_length=1 / 100, - warmup_factor=0.1, -) - - -train = get_config("common/train.py").train -train.init_checkpoint = None -train.max_iter = 100 * 1281167 // 256 diff --git a/spaces/ChrisPreston/diff-svc_minato_aqua/modules/hubert/hubert_model.py b/spaces/ChrisPreston/diff-svc_minato_aqua/modules/hubert/hubert_model.py deleted file mode 100644 index 47f76f7dd30a7dd90cc765c57a4ba87f3aeaecab..0000000000000000000000000000000000000000 --- a/spaces/ChrisPreston/diff-svc_minato_aqua/modules/hubert/hubert_model.py +++ /dev/null @@ -1,243 +0,0 @@ -import copy -import random -from typing import Optional, Tuple - -import librosa -import torch -import torch.nn as nn -import torch.nn.functional as t_func -from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present - - -class Hubert(nn.Module): - def __init__(self, num_label_embeddings: int = 100, mask: bool = True): - super().__init__() - self._mask = mask - self.feature_extractor = FeatureExtractor() - self.feature_projection = FeatureProjection() - self.positional_embedding = PositionalConvEmbedding() - self.norm = nn.LayerNorm(768) - self.dropout = nn.Dropout(0.1) - self.encoder = TransformerEncoder( - nn.TransformerEncoderLayer( - 768, 12, 3072, activation="gelu", batch_first=True - ), - 12, - ) - self.proj = nn.Linear(768, 256) - - self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_()) - self.label_embedding = nn.Embedding(num_label_embeddings, 256) - - def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - mask = None - if self.training and self._mask: - mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2) - x[mask] = self.masked_spec_embed.to(x.dtype) - return x, mask - - def encode( - self, x: torch.Tensor, layer: Optional[int] = None - ) -> Tuple[torch.Tensor, torch.Tensor]: - x = self.feature_extractor(x) - x = self.feature_projection(x.transpose(1, 2)) - x, mask = self.mask(x) - x = x + self.positional_embedding(x) - x = self.dropout(self.norm(x)) - x = self.encoder(x, output_layer=layer) - return x, mask - - def logits(self, x: torch.Tensor) -> torch.Tensor: - logits = torch.cosine_similarity( - x.unsqueeze(2), - self.label_embedding.weight.unsqueeze(0).unsqueeze(0), - dim=-1, - ) - return logits / 0.1 - - def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - x, mask = self.encode(x) - x = self.proj(x) - logits = self.logits(x) - return logits, mask - - -class HubertSoft(Hubert): - def __init__(self): - super().__init__() - - # @torch.inference_mode() - def units(self, wav: torch.Tensor) -> torch.Tensor: - wav = torch.nn.functional.pad(wav, ((400 - 320) // 2, (400 - 320) // 2)) - x, _ = self.encode(wav) - return self.proj(x) - - def forward(self, wav: torch.Tensor): - return self.units(wav) - - -class FeatureExtractor(nn.Module): - def __init__(self): - super().__init__() - self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False) - self.norm0 = nn.GroupNorm(512, 512) - self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False) - self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = t_func.gelu(self.norm0(self.conv0(x))) - x = t_func.gelu(self.conv1(x)) - x = t_func.gelu(self.conv2(x)) - x = t_func.gelu(self.conv3(x)) - x = t_func.gelu(self.conv4(x)) - x = t_func.gelu(self.conv5(x)) - x = t_func.gelu(self.conv6(x)) - return x - - -class FeatureProjection(nn.Module): - def __init__(self): - super().__init__() - self.norm = nn.LayerNorm(512) - self.projection = nn.Linear(512, 768) - self.dropout = nn.Dropout(0.1) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.norm(x) - x = self.projection(x) - x = self.dropout(x) - return x - - -class PositionalConvEmbedding(nn.Module): - def __init__(self): - super().__init__() - self.conv = nn.Conv1d( - 768, - 768, - kernel_size=128, - padding=128 // 2, - groups=16, - ) - self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.conv(x.transpose(1, 2)) - x = t_func.gelu(x[:, :, :-1]) - return x.transpose(1, 2) - - -class TransformerEncoder(nn.Module): - def __init__( - self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int - ) -> None: - super(TransformerEncoder, self).__init__() - self.layers = nn.ModuleList( - [copy.deepcopy(encoder_layer) for _ in range(num_layers)] - ) - self.num_layers = num_layers - - def forward( - self, - src: torch.Tensor, - mask: torch.Tensor = None, - src_key_padding_mask: torch.Tensor = None, - output_layer: Optional[int] = None, - ) -> torch.Tensor: - output = src - for layer in self.layers[:output_layer]: - output = layer( - output, src_mask=mask, src_key_padding_mask=src_key_padding_mask - ) - return output - - -def _compute_mask( - shape: Tuple[int, int], - mask_prob: float, - mask_length: int, - device: torch.device, - min_masks: int = 0, -) -> torch.Tensor: - batch_size, sequence_length = shape - - if mask_length < 1: - raise ValueError("`mask_length` has to be bigger than 0.") - - if mask_length > sequence_length: - raise ValueError( - f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`" - ) - - # compute number of masked spans in batch - num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random()) - num_masked_spans = max(num_masked_spans, min_masks) - - # make sure num masked indices <= sequence_length - if num_masked_spans * mask_length > sequence_length: - num_masked_spans = sequence_length // mask_length - - # SpecAugment mask to fill - mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool) - - # uniform distribution to sample from, make sure that offset samples are < sequence_length - uniform_dist = torch.ones( - (batch_size, sequence_length - (mask_length - 1)), device=device - ) - - # get random indices to mask - mask_indices = torch.multinomial(uniform_dist, num_masked_spans) - - # expand masked indices to masked spans - mask_indices = ( - mask_indices.unsqueeze(dim=-1) - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - offsets = ( - torch.arange(mask_length, device=device)[None, None, :] - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - mask_idxs = mask_indices + offsets - - # scatter indices to mask - mask = mask.scatter(1, mask_idxs, True) - - return mask - - -def hubert_soft( - path: str -) -> HubertSoft: - r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`. - Args: - path (str): path of a pretrained model - """ - dev = torch.device("cuda" if torch.cuda.is_available() else "cpu") - hubert = HubertSoft() - checkpoint = torch.load(path, map_location="cpu") - consume_prefix_in_state_dict_if_present(checkpoint, "module.") - hubert.load_state_dict(checkpoint) - hubert.eval().to(dev) - return hubert - - -def get_units(hbt_soft, raw_wav_path, dev=torch.device('cuda')): - wav, sr = librosa.load(raw_wav_path, sr=None) - assert (sr >= 16000) - if len(wav.shape) > 1: - wav = librosa.to_mono(wav) - if sr != 16000: - wav16 = librosa.resample(wav, sr, 16000) - else: - wav16 = wav - dev = torch.device("cuda" if (dev == torch.device('cuda') and torch.cuda.is_available()) else "cpu") - torch.cuda.is_available() and torch.cuda.empty_cache() - with torch.inference_mode(): - units = hbt_soft.units(torch.FloatTensor(wav16.astype(float)).unsqueeze(0).unsqueeze(0).to(dev)) - return units diff --git a/spaces/Cyril666/my_abi/modules/model.py b/spaces/Cyril666/my_abi/modules/model.py deleted file mode 100644 index dc19b937690f82d388fc2f9bd8127567618df5e7..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/my_abi/modules/model.py +++ /dev/null @@ -1,50 +0,0 @@ -import torch -import torch.nn as nn - -from utils import CharsetMapper - - -_default_tfmer_cfg = dict(d_model=512, nhead=8, d_inner=2048, # 1024 - dropout=0.1, activation='relu') - -class Model(nn.Module): - - def __init__(self, config): - super().__init__() - self.max_length = config.dataset_max_length + 1 - self.charset = CharsetMapper(config.dataset_charset_path, max_length=self.max_length) - - def load(self, source, device=None, strict=True): - state = torch.load(source, map_location=device) - self.load_state_dict(state['model'], strict=strict) - - def _get_length(self, logit, dim=-1): - """ Greed decoder to obtain length from logit""" - out = (logit.argmax(dim=-1) == self.charset.null_label) - abn = out.any(dim) - out = ((out.cumsum(dim) == 1) & out).max(dim)[1] - out = out + 1 # additional end token - out = torch.where(abn, out, out.new_tensor(logit.shape[1])) - return out - - @staticmethod - def _get_padding_mask(length, max_length): - length = length.unsqueeze(-1) - grid = torch.arange(0, max_length, device=length.device).unsqueeze(0) - return grid >= length - - @staticmethod - def _get_square_subsequent_mask(sz, device, diagonal=0, fw=True): - r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf'). - Unmasked positions are filled with float(0.0). - """ - mask = (torch.triu(torch.ones(sz, sz, device=device), diagonal=diagonal) == 1) - if fw: mask = mask.transpose(0, 1) - mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) - return mask - - @staticmethod - def _get_location_mask(sz, device=None): - mask = torch.eye(sz, device=device) - mask = mask.float().masked_fill(mask == 1, float('-inf')) - return mask diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_c_v_a_r.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_c_v_a_r.py deleted file mode 100644 index 6ea44dbab3b0a4b0da1e5327d077873867f0b520..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_c_v_a_r.py +++ /dev/null @@ -1,86 +0,0 @@ -from . import DefaultTable -from fontTools.misc import sstruct -from fontTools.misc.textTools import bytesjoin -from fontTools.ttLib.tables.TupleVariation import ( - compileTupleVariationStore, - decompileTupleVariationStore, - TupleVariation, -) - - -# https://www.microsoft.com/typography/otspec/cvar.htm -# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm -# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6cvar.html - -CVAR_HEADER_FORMAT = """ - > # big endian - majorVersion: H - minorVersion: H - tupleVariationCount: H - offsetToData: H -""" - -CVAR_HEADER_SIZE = sstruct.calcsize(CVAR_HEADER_FORMAT) - - -class table__c_v_a_r(DefaultTable.DefaultTable): - dependencies = ["cvt ", "fvar"] - - def __init__(self, tag=None): - DefaultTable.DefaultTable.__init__(self, tag) - self.majorVersion, self.minorVersion = 1, 0 - self.variations = [] - - def compile(self, ttFont, useSharedPoints=False): - tupleVariationCount, tuples, data = compileTupleVariationStore( - variations=[v for v in self.variations if v.hasImpact()], - pointCount=len(ttFont["cvt "].values), - axisTags=[axis.axisTag for axis in ttFont["fvar"].axes], - sharedTupleIndices={}, - useSharedPoints=useSharedPoints, - ) - header = { - "majorVersion": self.majorVersion, - "minorVersion": self.minorVersion, - "tupleVariationCount": tupleVariationCount, - "offsetToData": CVAR_HEADER_SIZE + len(tuples), - } - return b"".join([sstruct.pack(CVAR_HEADER_FORMAT, header), tuples, data]) - - def decompile(self, data, ttFont): - axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] - header = {} - sstruct.unpack(CVAR_HEADER_FORMAT, data[0:CVAR_HEADER_SIZE], header) - self.majorVersion = header["majorVersion"] - self.minorVersion = header["minorVersion"] - assert self.majorVersion == 1, self.majorVersion - self.variations = decompileTupleVariationStore( - tableTag=self.tableTag, - axisTags=axisTags, - tupleVariationCount=header["tupleVariationCount"], - pointCount=len(ttFont["cvt "].values), - sharedTuples=None, - data=data, - pos=CVAR_HEADER_SIZE, - dataPos=header["offsetToData"], - ) - - def fromXML(self, name, attrs, content, ttFont): - if name == "version": - self.majorVersion = int(attrs.get("major", "1")) - self.minorVersion = int(attrs.get("minor", "0")) - elif name == "tuple": - valueCount = len(ttFont["cvt "].values) - var = TupleVariation({}, [None] * valueCount) - self.variations.append(var) - for tupleElement in content: - if isinstance(tupleElement, tuple): - tupleName, tupleAttrs, tupleContent = tupleElement - var.fromXML(tupleName, tupleAttrs, tupleContent) - - def toXML(self, writer, ttFont): - axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] - writer.simpletag("version", major=self.majorVersion, minor=self.minorVersion) - writer.newline() - for var in self.variations: - var.toXML(writer, axisTags) diff --git a/spaces/Dao3/chatwithdocs/app.py b/spaces/Dao3/chatwithdocs/app.py deleted file mode 100644 index 7e64216b1e5c9a1d0839118b34b9682134fef623..0000000000000000000000000000000000000000 --- a/spaces/Dao3/chatwithdocs/app.py +++ /dev/null @@ -1,114 +0,0 @@ -import streamlit as st -from streamlit_chat import message -import os -from utils import ( - parse_docx, - parse_pdf, - parse_txt, - parse_csv, - parse_pptx, - search_docs, - embed_docs, - text_to_docs, - get_answer, - parse_any, - get_sources, - wrap_text_in_html, -) -from openai.error import OpenAIError - -def clear_submit(): - st.session_state["submit"] = False - -def set_openai_api_key(api_key: str): - st.session_state["OPENAI_API_KEY"] = api_key - -st.markdown('

和文档聊聊💬 by I-Robot.Life

', unsafe_allow_html=True) - -# Sidebar -index = None -doc = None -with st.sidebar: - user_secret = st.text_input( - "OpenAI API Key", - type="password", - placeholder="输入你的api-key (sk-开头)", - help="api-key应该从官网获取 https://platform.openai.com/account/api-keys.", - value=st.session_state.get("OPENAI_API_KEY", ""), - ) - if user_secret: - set_openai_api_key(user_secret) - - uploaded_file = st.file_uploader( - "上传你的文档,可以是pdf, docx, txt,扫描、影印的pdf暂不支持", - type=["pdf", "docx", "txt", "csv", "pptx", "js", "py", "json", "html", "css", "md"], - help="扫描、影印的pdf暂不支持!", - on_change=clear_submit, - ) - - if uploaded_file is not None: - if uploaded_file.name.endswith(".pdf"): - doc = parse_pdf(uploaded_file) - elif uploaded_file.name.endswith(".docx"): - doc = parse_docx(uploaded_file) - elif uploaded_file.name.endswith(".csv"): - doc = parse_csv(uploaded_file) - elif uploaded_file.name.endswith(".txt"): - doc = parse_txt(uploaded_file) - elif uploaded_file.name.endswith(".pptx"): - doc = parse_pptx(uploaded_file) - else: - doc = parse_any(uploaded_file) - #st.error("文档格式不支持") - #doc = None - text = text_to_docs(doc) - st.write(text) - try: - with st.spinner("正在拼命阅读... 你可以去接杯水再回来看看⏳"): - index = embed_docs(text) - st.session_state["api_key_configured"] = True - except OpenAIError as e: - st.error(e._message) - -tab1, tab2 = st.tabs(["Intro", "Chat with the File"]) -with tab1: - st.markdown("### 使用指南") - st.write("1,输入可用的api-key.") - st.write('2,上传文档...等待解析完成') - st.write('3,提问,得到回答') - - - st.write('感谢Daniel Avila,感谢Github', unsafe_allow_html=True) - -with tab2: - st.write('从官网链接获取apikey link: https://openai.com/api/') - if 'generated' not in st.session_state: - st.session_state['generated'] = [] - - if 'past' not in st.session_state: - st.session_state['past'] = [] - - def get_text(): - if user_secret: - st.header("关于文档,你想问..?") - input_text = st.text_area("You:", on_change=clear_submit) - return input_text - user_input = get_text() - - button = st.button("Submit") - if button or st.session_state.get("submit"): - if not user_input: - st.error("请输入问题") - else: - st.session_state["submit"] = True - sources = search_docs(index, user_input) - try: - answer = get_answer(sources, user_input) - st.session_state.past.append(user_input) - st.session_state.generated.append(answer["output_text"].split("SOURCES: ")[0]) - except OpenAIError as e: - st.error(e._message) - if st.session_state['generated']: - for i in range(len(st.session_state['generated'])-1, -1, -1): - message(st.session_state["generated"][i], key=str(i)) - message(st.session_state['past'][i], is_user=True, key=str(i) + '_user') \ No newline at end of file diff --git a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/utils/time_watch.py b/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/utils/time_watch.py deleted file mode 100644 index e710d8376fe1d670ed40e9986298d9713ad4bdb0..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/utils/time_watch.py +++ /dev/null @@ -1,25 +0,0 @@ -""" -@Date: 2021/07/18 -@description: -""" -import time - - -class TimeWatch: - def __init__(self, name="", logger=None): - self.name = name - self.start = time.time() - self.logger = logger - - def __del__(self): - end = time.time() - output = f"{self.name} | time use {(end - self.start):.2f}s." - if self.logger: - self.logger.info(output) - else: - print(output) - - -if __name__ == '__main__': - w = TimeWatch("__main__") - time.sleep(2) \ No newline at end of file diff --git a/spaces/Datasculptor/DescriptionGPT/detic/evaluation/custom_coco_eval.py b/spaces/Datasculptor/DescriptionGPT/detic/evaluation/custom_coco_eval.py deleted file mode 100644 index 2ea1d5e5703a9922028178fbe87b2518a9f66683..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/DescriptionGPT/detic/evaluation/custom_coco_eval.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import contextlib -import copy -import io -import itertools -import json -import logging -import numpy as np -import os -import pickle -from collections import OrderedDict -import pycocotools.mask as mask_util -import torch -from pycocotools.coco import COCO -from pycocotools.cocoeval import COCOeval -from tabulate import tabulate - -import detectron2.utils.comm as comm -from detectron2.config import CfgNode -from detectron2.data import MetadataCatalog -from detectron2.data.datasets.coco import convert_to_coco_json -from detectron2.evaluation.coco_evaluation import COCOEvaluator -from detectron2.structures import Boxes, BoxMode, pairwise_iou -from detectron2.utils.file_io import PathManager -from detectron2.utils.logger import create_small_table -from ..data.datasets.coco_zeroshot import categories_seen, categories_unseen - -class CustomCOCOEvaluator(COCOEvaluator): - def _derive_coco_results(self, coco_eval, iou_type, class_names=None): - """ - Additionally plot mAP for 'seen classes' and 'unseen classes' - """ - - metrics = { - "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"], - "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"], - "keypoints": ["AP", "AP50", "AP75", "APm", "APl"], - }[iou_type] - - if coco_eval is None: - self._logger.warn("No predictions from the model!") - return {metric: float("nan") for metric in metrics} - - # the standard metrics - results = { - metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") - for idx, metric in enumerate(metrics) - } - self._logger.info( - "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) - ) - if not np.isfinite(sum(results.values())): - self._logger.info("Some metrics cannot be computed and is shown as NaN.") - - if class_names is None or len(class_names) <= 1: - return results - # Compute per-category AP - # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa - precisions = coco_eval.eval["precision"] - # precision has dims (iou, recall, cls, area range, max dets) - assert len(class_names) == precisions.shape[2] - - seen_names = set([x['name'] for x in categories_seen]) - unseen_names = set([x['name'] for x in categories_unseen]) - results_per_category = [] - results_per_category50 = [] - results_per_category50_seen = [] - results_per_category50_unseen = [] - for idx, name in enumerate(class_names): - # area range index 0: all area ranges - # max dets index -1: typically 100 per image - precision = precisions[:, :, idx, 0, -1] - precision = precision[precision > -1] - ap = np.mean(precision) if precision.size else float("nan") - results_per_category.append(("{}".format(name), float(ap * 100))) - precision50 = precisions[0, :, idx, 0, -1] - precision50 = precision50[precision50 > -1] - ap50 = np.mean(precision50) if precision50.size else float("nan") - results_per_category50.append(("{}".format(name), float(ap50 * 100))) - if name in seen_names: - results_per_category50_seen.append(float(ap50 * 100)) - if name in unseen_names: - results_per_category50_unseen.append(float(ap50 * 100)) - - # tabulate it - N_COLS = min(6, len(results_per_category) * 2) - results_flatten = list(itertools.chain(*results_per_category)) - results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) - table = tabulate( - results_2d, - tablefmt="pipe", - floatfmt=".3f", - headers=["category", "AP"] * (N_COLS // 2), - numalign="left", - ) - self._logger.info("Per-category {} AP: \n".format(iou_type) + table) - - - N_COLS = min(6, len(results_per_category50) * 2) - results_flatten = list(itertools.chain(*results_per_category50)) - results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) - table = tabulate( - results_2d, - tablefmt="pipe", - floatfmt=".3f", - headers=["category", "AP50"] * (N_COLS // 2), - numalign="left", - ) - self._logger.info("Per-category {} AP50: \n".format(iou_type) + table) - self._logger.info( - "Seen {} AP50: {}".format( - iou_type, - sum(results_per_category50_seen) / len(results_per_category50_seen), - )) - self._logger.info( - "Unseen {} AP50: {}".format( - iou_type, - sum(results_per_category50_unseen) / len(results_per_category50_unseen), - )) - - results.update({"AP-" + name: ap for name, ap in results_per_category}) - results["AP50-seen"] = sum(results_per_category50_seen) / len(results_per_category50_seen) - results["AP50-unseen"] = sum(results_per_category50_unseen) / len(results_per_category50_unseen) - return results \ No newline at end of file diff --git a/spaces/Datasculptor/LoRA-DreamBooth-Training-UI/README.md b/spaces/Datasculptor/LoRA-DreamBooth-Training-UI/README.md deleted file mode 100644 index b61f96a3f0f5df541bd4e0dfba3a468ceb1c54e9..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/LoRA-DreamBooth-Training-UI/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: LoRA DreamBooth Training UI -emoji: ⚡ -colorFrom: red -colorTo: purple -sdk: gradio -sdk_version: 3.16.2 -python_version: 3.10.9 -app_file: app.py -pinned: false -license: mit -duplicated_from: lora-library/LoRA-DreamBooth-Training-UI ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Datasculptor/StyleGAN-NADA/e4e/configs/data_configs.py b/spaces/Datasculptor/StyleGAN-NADA/e4e/configs/data_configs.py deleted file mode 100644 index deccb0b1c266ad4b6abaef53d67ec1ed0ddbd462..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/StyleGAN-NADA/e4e/configs/data_configs.py +++ /dev/null @@ -1,41 +0,0 @@ -from configs import transforms_config -from configs.paths_config import dataset_paths - - -DATASETS = { - 'ffhq_encode': { - 'transforms': transforms_config.EncodeTransforms, - 'train_source_root': dataset_paths['ffhq'], - 'train_target_root': dataset_paths['ffhq'], - 'test_source_root': dataset_paths['celeba_test'], - 'test_target_root': dataset_paths['celeba_test'], - }, - 'cars_encode': { - 'transforms': transforms_config.CarsEncodeTransforms, - 'train_source_root': dataset_paths['cars_train'], - 'train_target_root': dataset_paths['cars_train'], - 'test_source_root': dataset_paths['cars_test'], - 'test_target_root': dataset_paths['cars_test'], - }, - 'horse_encode': { - 'transforms': transforms_config.EncodeTransforms, - 'train_source_root': dataset_paths['horse_train'], - 'train_target_root': dataset_paths['horse_train'], - 'test_source_root': dataset_paths['horse_test'], - 'test_target_root': dataset_paths['horse_test'], - }, - 'church_encode': { - 'transforms': transforms_config.EncodeTransforms, - 'train_source_root': dataset_paths['church_train'], - 'train_target_root': dataset_paths['church_train'], - 'test_source_root': dataset_paths['church_test'], - 'test_target_root': dataset_paths['church_test'], - }, - 'cats_encode': { - 'transforms': transforms_config.EncodeTransforms, - 'train_source_root': dataset_paths['cats_train'], - 'train_target_root': dataset_paths['cats_train'], - 'test_source_root': dataset_paths['cats_test'], - 'test_target_root': dataset_paths['cats_test'], - } -} diff --git a/spaces/Dileepgorantala/dileepVoiceAI/app.py b/spaces/Dileepgorantala/dileepVoiceAI/app.py deleted file mode 100644 index ca8b6d40b4ab898c70da92f4a4298de2baf703dc..0000000000000000000000000000000000000000 --- a/spaces/Dileepgorantala/dileepVoiceAI/app.py +++ /dev/null @@ -1,164 +0,0 @@ -import os -import re -import requests -import json -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') -PLAY_HT_API_KEY=os.getenv('PLAY_HT_API_KEY') -PLAY_HT_USER_ID=os.getenv('PLAY_HT_USER_ID') - -PLAY_HT_VOICE_ID=os.getenv('PLAY_HT_VOICE_ID') -play_ht_api_get_audio_url = "https://play.ht/api/v2/tts" - - -template = """You are a helpful assistant to answer user queries. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -headers = { - "accept": "text/event-stream", - "content-type": "application/json", - "AUTHORIZATION": "Bearer "+ PLAY_HT_API_KEY, - "X-USER-ID": PLAY_HT_USER_ID -} - - -def get_payload(text): - return { - "text": text, - "voice": PLAY_HT_VOICE_ID, - "quality": "medium", - "output_format": "mp3", - "speed": 1, - "sample_rate": 24000, - "seed": None, - "temperature": None - } - -def get_generated_audio(text): - payload = get_payload(text) - generated_response = {} - try: - response = requests.post(play_ht_api_get_audio_url, json=payload, headers=headers) - response.raise_for_status() - generated_response["type"]= 'SUCCESS' - generated_response["response"] = response.text - except requests.exceptions.RequestException as e: - generated_response["type"]= 'ERROR' - try: - response_text = json.loads(response.text) - if response_text['error_message']: - generated_response["response"] = response_text['error_message'] - else: - generated_response["response"] = response.text - except Exception as e: - generated_response["response"] = response.text - except Exception as e: - generated_response["type"]= 'ERROR' - generated_response["response"] = response.text - return generated_response - -def extract_urls(text): - # Define the regex pattern for URLs - url_pattern = r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+[/\w\.-]*' - - # Find all occurrences of URLs in the text - urls = re.findall(url_pattern, text) - - return urls - -def get_audio_reply_for_question(text): - generated_audio_event = get_generated_audio(text) - #From get_generated_audio, you will get events in a string format, from that we need to extract the url - final_response = { - "audio_url": '', - "message": '' - } - if generated_audio_event["type"] == 'SUCCESS': - audio_urls = extract_urls(generated_audio_event["response"]) - if len(audio_urls) == 0: - final_response['message'] = "No audio file link found in generated event" - else: - final_response['audio_url'] = audio_urls[-1] - else: - final_response['message'] = generated_audio_event['response'] - return final_response - -def download_url(url): - try: - # Send a GET request to the URL to fetch the content - final_response = { - 'content':'', - 'error':'' - } - response = requests.get(url) - # Check if the request was successful (status code 200) - if response.status_code == 200: - final_response['content'] = response.content - else: - final_response['error'] = f"Failed to download the URL. Status code: {response.status_code}" - except Exception as e: - final_response['error'] = f"Failed to download the URL. Error: {e}" - return final_response - -def get_filename_from_url(url): - # Use os.path.basename() to extract the file name from the URL - file_name = os.path.basename(url) - return file_name - -def get_text_response(user_message): - response = llm_chain.predict(user_message = user_message) - return response - -def get_text_response_and_audio_response(user_message): - response = get_text_response(user_message) # Getting the reply from Open AI - audio_reply_for_question_response = get_audio_reply_for_question(response) - final_response = { - 'output_file_path': '', - 'message':'' - } - audio_url = audio_reply_for_question_response['audio_url'] - if audio_url: - output_file_path=get_filename_from_url(audio_url) - download_url_response = download_url(audio_url) - audio_content = download_url_response['content'] - if audio_content: - with open(output_file_path, "wb") as audio_file: - audio_file.write(audio_content) - final_response['output_file_path'] = output_file_path - else: - final_response['message'] = download_url_response['error'] - else: - final_response['message'] = audio_reply_for_question_response['message'] - return final_response - -def chat_bot_response(message, history): - text_and_audio_response = get_text_response_and_audio_response(message) - output_file_path = text_and_audio_response['output_file_path'] - if output_file_path: - return (text_and_audio_response['output_file_path'],) - else: - return text_and_audio_response['message'] - -demo = gr.ChatInterface(chat_bot_response,examples=["How are you doing?","What are your interests?","Which places do you like to visit?"]) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/DragGan/DragGan/training/loss.py b/spaces/DragGan/DragGan/training/loss.py deleted file mode 100644 index 56748095c1fb409fedbf87b2375075440440f0b4..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/training/loss.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Loss functions.""" - -import numpy as np -import torch -from torch_utils import training_stats -from torch_utils.ops import conv2d_gradfix -from torch_utils.ops import upfirdn2d - -#---------------------------------------------------------------------------- - -class Loss: - def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, gain, cur_nimg): # to be overridden by subclass - raise NotImplementedError() - -#---------------------------------------------------------------------------- - -class StyleGAN2Loss(Loss): - def __init__(self, device, G, D, augment_pipe=None, r1_gamma=10, style_mixing_prob=0, pl_weight=0, pl_batch_shrink=2, pl_decay=0.01, pl_no_weight_grad=False, blur_init_sigma=0, blur_fade_kimg=0): - super().__init__() - self.device = device - self.G = G - self.D = D - self.augment_pipe = augment_pipe - self.r1_gamma = r1_gamma - self.style_mixing_prob = style_mixing_prob - self.pl_weight = pl_weight - self.pl_batch_shrink = pl_batch_shrink - self.pl_decay = pl_decay - self.pl_no_weight_grad = pl_no_weight_grad - self.pl_mean = torch.zeros([], device=device) - self.blur_init_sigma = blur_init_sigma - self.blur_fade_kimg = blur_fade_kimg - - def run_G(self, z, c, update_emas=False): - ws = self.G.mapping(z, c, update_emas=update_emas) - if self.style_mixing_prob > 0: - with torch.autograd.profiler.record_function('style_mixing'): - cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1]) - cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1])) - ws[:, cutoff:] = self.G.mapping(torch.randn_like(z), c, update_emas=False)[:, cutoff:] - img = self.G.synthesis(ws, update_emas=update_emas) - return img, ws - - def run_D(self, img, c, blur_sigma=0, update_emas=False): - blur_size = np.floor(blur_sigma * 3) - if blur_size > 0: - with torch.autograd.profiler.record_function('blur'): - f = torch.arange(-blur_size, blur_size + 1, device=img.device).div(blur_sigma).square().neg().exp2() - img = upfirdn2d.filter2d(img, f / f.sum()) - if self.augment_pipe is not None: - img = self.augment_pipe(img) - logits = self.D(img, c, update_emas=update_emas) - return logits - - def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, gain, cur_nimg): - assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth'] - if self.pl_weight == 0: - phase = {'Greg': 'none', 'Gboth': 'Gmain'}.get(phase, phase) - if self.r1_gamma == 0: - phase = {'Dreg': 'none', 'Dboth': 'Dmain'}.get(phase, phase) - blur_sigma = max(1 - cur_nimg / (self.blur_fade_kimg * 1e3), 0) * self.blur_init_sigma if self.blur_fade_kimg > 0 else 0 - - # Gmain: Maximize logits for generated images. - if phase in ['Gmain', 'Gboth']: - with torch.autograd.profiler.record_function('Gmain_forward'): - gen_img, _gen_ws = self.run_G(gen_z, gen_c) - gen_logits = self.run_D(gen_img, gen_c, blur_sigma=blur_sigma) - training_stats.report('Loss/scores/fake', gen_logits) - training_stats.report('Loss/signs/fake', gen_logits.sign()) - loss_Gmain = torch.nn.functional.softplus(-gen_logits) # -log(sigmoid(gen_logits)) - training_stats.report('Loss/G/loss', loss_Gmain) - with torch.autograd.profiler.record_function('Gmain_backward'): - loss_Gmain.mean().mul(gain).backward() - - # Gpl: Apply path length regularization. - if phase in ['Greg', 'Gboth']: - with torch.autograd.profiler.record_function('Gpl_forward'): - batch_size = gen_z.shape[0] // self.pl_batch_shrink - gen_img, gen_ws = self.run_G(gen_z[:batch_size], gen_c[:batch_size]) - pl_noise = torch.randn_like(gen_img) / np.sqrt(gen_img.shape[2] * gen_img.shape[3]) - with torch.autograd.profiler.record_function('pl_grads'), conv2d_gradfix.no_weight_gradients(self.pl_no_weight_grad): - pl_grads = torch.autograd.grad(outputs=[(gen_img * pl_noise).sum()], inputs=[gen_ws], create_graph=True, only_inputs=True)[0] - pl_lengths = pl_grads.square().sum(2).mean(1).sqrt() - pl_mean = self.pl_mean.lerp(pl_lengths.mean(), self.pl_decay) - self.pl_mean.copy_(pl_mean.detach()) - pl_penalty = (pl_lengths - pl_mean).square() - training_stats.report('Loss/pl_penalty', pl_penalty) - loss_Gpl = pl_penalty * self.pl_weight - training_stats.report('Loss/G/reg', loss_Gpl) - with torch.autograd.profiler.record_function('Gpl_backward'): - loss_Gpl.mean().mul(gain).backward() - - # Dmain: Minimize logits for generated images. - loss_Dgen = 0 - if phase in ['Dmain', 'Dboth']: - with torch.autograd.profiler.record_function('Dgen_forward'): - gen_img, _gen_ws = self.run_G(gen_z, gen_c, update_emas=True) - gen_logits = self.run_D(gen_img, gen_c, blur_sigma=blur_sigma, update_emas=True) - training_stats.report('Loss/scores/fake', gen_logits) - training_stats.report('Loss/signs/fake', gen_logits.sign()) - loss_Dgen = torch.nn.functional.softplus(gen_logits) # -log(1 - sigmoid(gen_logits)) - with torch.autograd.profiler.record_function('Dgen_backward'): - loss_Dgen.mean().mul(gain).backward() - - # Dmain: Maximize logits for real images. - # Dr1: Apply R1 regularization. - if phase in ['Dmain', 'Dreg', 'Dboth']: - name = 'Dreal' if phase == 'Dmain' else 'Dr1' if phase == 'Dreg' else 'Dreal_Dr1' - with torch.autograd.profiler.record_function(name + '_forward'): - real_img_tmp = real_img.detach().requires_grad_(phase in ['Dreg', 'Dboth']) - real_logits = self.run_D(real_img_tmp, real_c, blur_sigma=blur_sigma) - training_stats.report('Loss/scores/real', real_logits) - training_stats.report('Loss/signs/real', real_logits.sign()) - - loss_Dreal = 0 - if phase in ['Dmain', 'Dboth']: - loss_Dreal = torch.nn.functional.softplus(-real_logits) # -log(sigmoid(real_logits)) - training_stats.report('Loss/D/loss', loss_Dgen + loss_Dreal) - - loss_Dr1 = 0 - if phase in ['Dreg', 'Dboth']: - with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients(): - r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[real_img_tmp], create_graph=True, only_inputs=True)[0] - r1_penalty = r1_grads.square().sum([1,2,3]) - loss_Dr1 = r1_penalty * (self.r1_gamma / 2) - training_stats.report('Loss/r1_penalty', r1_penalty) - training_stats.report('Loss/D/reg', loss_Dr1) - - with torch.autograd.profiler.record_function(name + '_backward'): - (loss_Dreal + loss_Dr1).mean().mul(gain).backward() - -#---------------------------------------------------------------------------- diff --git a/spaces/ECCV2022/bytetrack/exps/default/yolox_s.py b/spaces/ECCV2022/bytetrack/exps/default/yolox_s.py deleted file mode 100644 index abb6a8bbbe4fd1c6aff71596621aaeec2a6a15d8..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/exps/default/yolox_s.py +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -# Copyright (c) Megvii, Inc. and its affiliates. - -import os - -from yolox.exp import Exp as MyExp - - -class Exp(MyExp): - def __init__(self): - super(Exp, self).__init__() - self.depth = 0.33 - self.width = 0.50 - self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] diff --git a/spaces/ECCV2022/bytetrack/tools/track.py b/spaces/ECCV2022/bytetrack/tools/track.py deleted file mode 100644 index ee7769a543b417f84968301153e8d6d0d2d659a0..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/tools/track.py +++ /dev/null @@ -1,293 +0,0 @@ -from loguru import logger - -import torch -import torch.backends.cudnn as cudnn -from torch.nn.parallel import DistributedDataParallel as DDP - -from yolox.core import launch -from yolox.exp import get_exp -from yolox.utils import configure_nccl, fuse_model, get_local_rank, get_model_info, setup_logger -from yolox.evaluators import MOTEvaluator - -import argparse -import os -import random -import warnings -import glob -import motmetrics as mm -from collections import OrderedDict -from pathlib import Path - - -def make_parser(): - parser = argparse.ArgumentParser("YOLOX Eval") - parser.add_argument("-expn", "--experiment-name", type=str, default=None) - parser.add_argument("-n", "--name", type=str, default=None, help="model name") - - # distributed - parser.add_argument( - "--dist-backend", default="nccl", type=str, help="distributed backend" - ) - parser.add_argument( - "--dist-url", - default=None, - type=str, - help="url used to set up distributed training", - ) - parser.add_argument("-b", "--batch-size", type=int, default=64, help="batch size") - parser.add_argument( - "-d", "--devices", default=None, type=int, help="device for training" - ) - parser.add_argument( - "--local_rank", default=0, type=int, help="local rank for dist training" - ) - parser.add_argument( - "--num_machines", default=1, type=int, help="num of node for training" - ) - parser.add_argument( - "--machine_rank", default=0, type=int, help="node rank for multi-node training" - ) - parser.add_argument( - "-f", - "--exp_file", - default=None, - type=str, - help="pls input your expriment description file", - ) - parser.add_argument( - "--fp16", - dest="fp16", - default=False, - action="store_true", - help="Adopting mix precision evaluating.", - ) - parser.add_argument( - "--fuse", - dest="fuse", - default=False, - action="store_true", - help="Fuse conv and bn for testing.", - ) - parser.add_argument( - "--trt", - dest="trt", - default=False, - action="store_true", - help="Using TensorRT model for testing.", - ) - parser.add_argument( - "--test", - dest="test", - default=False, - action="store_true", - help="Evaluating on test-dev set.", - ) - parser.add_argument( - "--speed", - dest="speed", - default=False, - action="store_true", - help="speed test only.", - ) - parser.add_argument( - "opts", - help="Modify config options using the command-line", - default=None, - nargs=argparse.REMAINDER, - ) - # det args - parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval") - parser.add_argument("--conf", default=0.01, type=float, help="test conf") - parser.add_argument("--nms", default=0.7, type=float, help="test nms threshold") - parser.add_argument("--tsize", default=None, type=int, help="test img size") - parser.add_argument("--seed", default=None, type=int, help="eval seed") - # tracking args - parser.add_argument("--track_thresh", type=float, default=0.6, help="tracking confidence threshold") - parser.add_argument("--track_buffer", type=int, default=30, help="the frames for keep lost tracks") - parser.add_argument("--match_thresh", type=float, default=0.9, help="matching threshold for tracking") - parser.add_argument("--min-box-area", type=float, default=100, help='filter out tiny boxes') - parser.add_argument("--mot20", dest="mot20", default=False, action="store_true", help="test mot20.") - return parser - - -def compare_dataframes(gts, ts): - accs = [] - names = [] - for k, tsacc in ts.items(): - if k in gts: - logger.info('Comparing {}...'.format(k)) - accs.append(mm.utils.compare_to_groundtruth(gts[k], tsacc, 'iou', distth=0.5)) - names.append(k) - else: - logger.warning('No ground truth for {}, skipping.'.format(k)) - - return accs, names - - -@logger.catch -def main(exp, args, num_gpu): - if args.seed is not None: - random.seed(args.seed) - torch.manual_seed(args.seed) - cudnn.deterministic = True - warnings.warn( - "You have chosen to seed testing. This will turn on the CUDNN deterministic setting, " - ) - - is_distributed = num_gpu > 1 - - # set environment variables for distributed training - cudnn.benchmark = True - - rank = args.local_rank - # rank = get_local_rank() - - file_name = os.path.join(exp.output_dir, args.experiment_name) - - if rank == 0: - os.makedirs(file_name, exist_ok=True) - - results_folder = os.path.join(file_name, "track_results") - os.makedirs(results_folder, exist_ok=True) - - setup_logger(file_name, distributed_rank=rank, filename="val_log.txt", mode="a") - logger.info("Args: {}".format(args)) - - if args.conf is not None: - exp.test_conf = args.conf - if args.nms is not None: - exp.nmsthre = args.nms - if args.tsize is not None: - exp.test_size = (args.tsize, args.tsize) - - model = exp.get_model() - logger.info("Model Summary: {}".format(get_model_info(model, exp.test_size))) - #logger.info("Model Structure:\n{}".format(str(model))) - - val_loader = exp.get_eval_loader(args.batch_size, is_distributed, args.test) - evaluator = MOTEvaluator( - args=args, - dataloader=val_loader, - img_size=exp.test_size, - confthre=exp.test_conf, - nmsthre=exp.nmsthre, - num_classes=exp.num_classes, - ) - - torch.cuda.set_device(rank) - model.cuda(rank) - model.eval() - - if not args.speed and not args.trt: - if args.ckpt is None: - ckpt_file = os.path.join(file_name, "best_ckpt.pth.tar") - else: - ckpt_file = args.ckpt - logger.info("loading checkpoint") - loc = "cuda:{}".format(rank) - ckpt = torch.load(ckpt_file, map_location=loc) - # load the model state dict - model.load_state_dict(ckpt["model"]) - logger.info("loaded checkpoint done.") - - if is_distributed: - model = DDP(model, device_ids=[rank]) - - if args.fuse: - logger.info("\tFusing model...") - model = fuse_model(model) - - if args.trt: - assert ( - not args.fuse and not is_distributed and args.batch_size == 1 - ), "TensorRT model is not support model fusing and distributed inferencing!" - trt_file = os.path.join(file_name, "model_trt.pth") - assert os.path.exists( - trt_file - ), "TensorRT model is not found!\n Run tools/trt.py first!" - model.head.decode_in_inference = False - decoder = model.head.decode_outputs - else: - trt_file = None - decoder = None - - # start evaluate - *_, summary = evaluator.evaluate( - model, is_distributed, args.fp16, trt_file, decoder, exp.test_size, results_folder - ) - logger.info("\n" + summary) - - # evaluate MOTA - mm.lap.default_solver = 'lap' - - if exp.val_ann == 'val_half.json': - gt_type = '_val_half' - else: - gt_type = '' - print('gt_type', gt_type) - if args.mot20: - gtfiles = glob.glob(os.path.join('datasets/MOT20/train', '*/gt/gt{}.txt'.format(gt_type))) - else: - gtfiles = glob.glob(os.path.join('datasets/mot/train', '*/gt/gt{}.txt'.format(gt_type))) - print('gt_files', gtfiles) - tsfiles = [f for f in glob.glob(os.path.join(results_folder, '*.txt')) if not os.path.basename(f).startswith('eval')] - - logger.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles))) - logger.info('Available LAP solvers {}'.format(mm.lap.available_solvers)) - logger.info('Default LAP solver \'{}\''.format(mm.lap.default_solver)) - logger.info('Loading files.') - - gt = OrderedDict([(Path(f).parts[-3], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=1)) for f in gtfiles]) - ts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=-1)) for f in tsfiles]) - - mh = mm.metrics.create() - accs, names = compare_dataframes(gt, ts) - - logger.info('Running metrics') - metrics = ['recall', 'precision', 'num_unique_objects', 'mostly_tracked', - 'partially_tracked', 'mostly_lost', 'num_false_positives', 'num_misses', - 'num_switches', 'num_fragmentations', 'mota', 'motp', 'num_objects'] - summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True) - # summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics, generate_overall=True) - # print(mm.io.render_summary( - # summary, formatters=mh.formatters, - # namemap=mm.io.motchallenge_metric_names)) - div_dict = { - 'num_objects': ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations'], - 'num_unique_objects': ['mostly_tracked', 'partially_tracked', 'mostly_lost']} - for divisor in div_dict: - for divided in div_dict[divisor]: - summary[divided] = (summary[divided] / summary[divisor]) - fmt = mh.formatters - change_fmt_list = ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations', 'mostly_tracked', - 'partially_tracked', 'mostly_lost'] - for k in change_fmt_list: - fmt[k] = fmt['mota'] - print(mm.io.render_summary(summary, formatters=fmt, namemap=mm.io.motchallenge_metric_names)) - - metrics = mm.metrics.motchallenge_metrics + ['num_objects'] - summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True) - print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names)) - logger.info('Completed') - - -if __name__ == "__main__": - args = make_parser().parse_args() - exp = get_exp(args.exp_file, args.name) - exp.merge(args.opts) - - if not args.experiment_name: - args.experiment_name = exp.exp_name - - num_gpu = torch.cuda.device_count() if args.devices is None else args.devices - assert num_gpu <= torch.cuda.device_count() - - launch( - main, - num_gpu, - args.num_machines, - args.machine_rank, - backend=args.dist_backend, - dist_url=args.dist_url, - args=(exp, args, num_gpu), - ) diff --git a/spaces/ECCV2022/bytetrack/tutorials/centertrack/byte_tracker.py b/spaces/ECCV2022/bytetrack/tutorials/centertrack/byte_tracker.py deleted file mode 100644 index 8cb757e0f1e62f3ec4f2e9ab57cef2b509298dbc..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/tutorials/centertrack/byte_tracker.py +++ /dev/null @@ -1,363 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -from sklearn.utils.linear_assignment_ import linear_assignment -import copy -from .mot_online.kalman_filter import KalmanFilter -from .mot_online.basetrack import BaseTrack, TrackState -from .mot_online import matching - - -class STrack(BaseTrack): - shared_kalman = KalmanFilter() - def __init__(self, tlwh, score): - - # wait activate - self._tlwh = np.asarray(tlwh, dtype=np.float) - self.kalman_filter = None - self.mean, self.covariance = None, None - self.is_activated = False - - self.score = score - self.tracklet_len = 0 - - def predict(self): - mean_state = self.mean.copy() - if self.state != TrackState.Tracked: - mean_state[7] = 0 - self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance) - - @staticmethod - def multi_predict(stracks): - if len(stracks) > 0: - multi_mean = np.asarray([st.mean.copy() for st in stracks]) - multi_covariance = np.asarray([st.covariance for st in stracks]) - for i, st in enumerate(stracks): - if st.state != TrackState.Tracked: - multi_mean[i][7] = 0 - multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance) - for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): - stracks[i].mean = mean - stracks[i].covariance = cov - - def activate(self, kalman_filter, frame_id): - """Start a new tracklet""" - self.kalman_filter = kalman_filter - self.track_id = self.next_id() - self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh)) - - self.tracklet_len = 0 - self.state = TrackState.Tracked - if frame_id == 1: - self.is_activated = True - # self.is_activated = True - self.frame_id = frame_id - self.start_frame = frame_id - - def re_activate(self, new_track, frame_id, new_id=False): - self.mean, self.covariance = self.kalman_filter.update( - self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh) - ) - self.tracklet_len = 0 - self.state = TrackState.Tracked - self.is_activated = True - self.frame_id = frame_id - if new_id: - self.track_id = self.next_id() - self.score = new_track.score - - def update(self, new_track, frame_id): - """ - Update a matched track - :type new_track: STrack - :type frame_id: int - :type update_feature: bool - :return: - """ - self.frame_id = frame_id - self.tracklet_len += 1 - - new_tlwh = new_track.tlwh - self.mean, self.covariance = self.kalman_filter.update( - self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh)) - self.state = TrackState.Tracked - self.is_activated = True - - self.score = new_track.score - - @property - # @jit(nopython=True) - def tlwh(self): - """Get current position in bounding box format `(top left x, top left y, - width, height)`. - """ - if self.mean is None: - return self._tlwh.copy() - ret = self.mean[:4].copy() - ret[2] *= ret[3] - ret[:2] -= ret[2:] / 2 - return ret - - @property - # @jit(nopython=True) - def tlbr(self): - """Convert bounding box to format `(min x, min y, max x, max y)`, i.e., - `(top left, bottom right)`. - """ - ret = self.tlwh.copy() - ret[2:] += ret[:2] - return ret - - @staticmethod - # @jit(nopython=True) - def tlwh_to_xyah(tlwh): - """Convert bounding box to format `(center x, center y, aspect ratio, - height)`, where the aspect ratio is `width / height`. - """ - ret = np.asarray(tlwh).copy() - ret[:2] += ret[2:] / 2 - ret[2] /= ret[3] - return ret - - def to_xyah(self): - return self.tlwh_to_xyah(self.tlwh) - - @staticmethod - # @jit(nopython=True) - def tlbr_to_tlwh(tlbr): - ret = np.asarray(tlbr).copy() - ret[2:] -= ret[:2] - return ret - - @staticmethod - # @jit(nopython=True) - def tlwh_to_tlbr(tlwh): - ret = np.asarray(tlwh).copy() - ret[2:] += ret[:2] - return ret - - def __repr__(self): - return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame) - - - -class BYTETracker(object): - def __init__(self, args, frame_rate=30): - self.args = args - self.det_thresh = args.new_thresh - self.buffer_size = int(frame_rate / 30.0 * args.track_buffer) - self.max_time_lost = self.buffer_size - self.reset() - - # below has no effect to final output, just to be compatible to codebase - def init_track(self, results): - for item in results: - if item['score'] > self.opt.new_thresh and item['class'] == 1: - self.id_count += 1 - item['active'] = 1 - item['age'] = 1 - item['tracking_id'] = self.id_count - if not ('ct' in item): - bbox = item['bbox'] - item['ct'] = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2] - self.tracks.append(item) - - def reset(self): - self.frame_id = 0 - self.kalman_filter = KalmanFilter() - self.tracked_stracks = [] # type: list[STrack] - self.lost_stracks = [] # type: list[STrack] - self.removed_stracks = [] # type: list[STrack] - self.tracks = [] - - # below has no effect to final output, just to be compatible to codebase - self.id_count = 0 - - def step(self, results, public_det=None): - self.frame_id += 1 - activated_starcks = [] - refind_stracks = [] - lost_stracks = [] - removed_stracks = [] - detections = [] - detections_second = [] - - scores = np.array([item['score'] for item in results if item['class'] == 1], np.float32) - bboxes = np.vstack([item['bbox'] for item in results if item['class'] == 1]) # N x 4, x1y1x2y2 - - remain_inds = scores >= self.args.track_thresh - dets = bboxes[remain_inds] - scores_keep = scores[remain_inds] - - - inds_low = scores > self.args.out_thresh - inds_high = scores < self.args.track_thresh - inds_second = np.logical_and(inds_low, inds_high) - dets_second = bboxes[inds_second] - scores_second = scores[inds_second] - - if len(dets) > 0: - '''Detections''' - detections = [STrack(STrack.tlbr_to_tlwh(tlbr), s) for - (tlbr, s) in zip(dets, scores_keep)] - else: - detections = [] - - ''' Add newly detected tracklets to tracked_stracks''' - unconfirmed = [] - tracked_stracks = [] # type: list[STrack] - for track in self.tracked_stracks: - if not track.is_activated: - unconfirmed.append(track) - else: - tracked_stracks.append(track) - - ''' Step 2: First association, with Kalman and IOU''' - strack_pool = joint_stracks(tracked_stracks, self.lost_stracks) - # Predict the current location with KF - STrack.multi_predict(strack_pool) - dists = matching.iou_distance(strack_pool, detections) - #dists = matching.fuse_motion(self.kalman_filter, dists, strack_pool, detections) - matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.args.match_thresh) - - for itracked, idet in matches: - track = strack_pool[itracked] - det = detections[idet] - if track.state == TrackState.Tracked: - track.update(detections[idet], self.frame_id) - activated_starcks.append(track) - else: - track.re_activate(det, self.frame_id, new_id=False) - refind_stracks.append(track) - - ''' Step 3: Second association, association the untrack to the low score detections, with IOU''' - if len(dets_second) > 0: - '''Detections''' - detections_second = [STrack(STrack.tlbr_to_tlwh(tlbr), s) for - (tlbr, s) in zip(dets_second, scores_second)] - else: - detections_second = [] - r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked] - dists = matching.iou_distance(r_tracked_stracks, detections_second) - matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5) - for itracked, idet in matches: - track = r_tracked_stracks[itracked] - det = detections_second[idet] - if track.state == TrackState.Tracked: - track.update(det, self.frame_id) - activated_starcks.append(track) - else: - track.re_activate(det, self.frame_id, new_id=False) - refind_stracks.append(track) - - for it in u_track: - track = r_tracked_stracks[it] - if not track.state == TrackState.Lost: - track.mark_lost() - lost_stracks.append(track) - - '''Deal with unconfirmed tracks, usually tracks with only one beginning frame''' - detections = [detections[i] for i in u_detection] - dists = matching.iou_distance(unconfirmed, detections) - matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7) - for itracked, idet in matches: - unconfirmed[itracked].update(detections[idet], self.frame_id) - activated_starcks.append(unconfirmed[itracked]) - for it in u_unconfirmed: - track = unconfirmed[it] - track.mark_removed() - removed_stracks.append(track) - - """ Step 4: Init new stracks""" - for inew in u_detection: - track = detections[inew] - if track.score < self.det_thresh: - continue - track.activate(self.kalman_filter, self.frame_id) - activated_starcks.append(track) - """ Step 5: Update state""" - for track in self.lost_stracks: - if self.frame_id - track.end_frame > self.max_time_lost: - track.mark_removed() - removed_stracks.append(track) - - self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked] - self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks) - self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks) - self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks) - self.lost_stracks.extend(lost_stracks) - self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks) - self.removed_stracks.extend(removed_stracks) - self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks) - output_stracks = [track for track in self.tracked_stracks if track.is_activated] - - ret = [] - for track in output_stracks: - track_dict = {} - track_dict['score'] = track.score - track_dict['bbox'] = track.tlbr - bbox = track_dict['bbox'] - track_dict['ct'] = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2] - track_dict['active'] = 1 if track.is_activated else 0 - track_dict['tracking_id'] = track.track_id - track_dict['class'] = 1 - ret.append(track_dict) - - self.tracks = ret - return ret - - -def joint_stracks(tlista, tlistb): - exists = {} - res = [] - for t in tlista: - exists[t.track_id] = 1 - res.append(t) - for t in tlistb: - tid = t.track_id - if not exists.get(tid, 0): - exists[tid] = 1 - res.append(t) - return res - - -def sub_stracks(tlista, tlistb): - stracks = {} - for t in tlista: - stracks[t.track_id] = t - for t in tlistb: - tid = t.track_id - if stracks.get(tid, 0): - del stracks[tid] - return list(stracks.values()) - - -def remove_duplicate_stracks(stracksa, stracksb): - pdist = matching.iou_distance(stracksa, stracksb) - pairs = np.where(pdist < 0.15) - dupa, dupb = list(), list() - for p, q in zip(*pairs): - timep = stracksa[p].frame_id - stracksa[p].start_frame - timeq = stracksb[q].frame_id - stracksb[q].start_frame - if timep > timeq: - dupb.append(q) - else: - dupa.append(p) - resa = [t for i, t in enumerate(stracksa) if not i in dupa] - resb = [t for i, t in enumerate(stracksb) if not i in dupb] - return resa, resb - - -def remove_fp_stracks(stracksa, n_frame=10): - remain = [] - for t in stracksa: - score_5 = t.score_list[-n_frame:] - score_5 = np.array(score_5, dtype=np.float32) - index = score_5 < 0.45 - num = np.sum(index) - if num < n_frame: - remain.append(t) - return remain - diff --git a/spaces/EronSamez/RVC_HFmeu/tools/torchgate/torchgate.py b/spaces/EronSamez/RVC_HFmeu/tools/torchgate/torchgate.py deleted file mode 100644 index 086f2ab38e4ad79e432a51c38ed7e59defae0acd..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/tools/torchgate/torchgate.py +++ /dev/null @@ -1,264 +0,0 @@ -import torch -from torch.nn.functional import conv1d, conv2d -from typing import Union, Optional -from .utils import linspace, temperature_sigmoid, amp_to_db - - -class TorchGate(torch.nn.Module): - """ - A PyTorch module that applies a spectral gate to an input signal. - - Arguments: - sr {int} -- Sample rate of the input signal. - nonstationary {bool} -- Whether to use non-stationary or stationary masking (default: {False}). - n_std_thresh_stationary {float} -- Number of standard deviations above mean to threshold noise for - stationary masking (default: {1.5}). - n_thresh_nonstationary {float} -- Number of multiplies above smoothed magnitude spectrogram. for - non-stationary masking (default: {1.3}). - temp_coeff_nonstationary {float} -- Temperature coefficient for non-stationary masking (default: {0.1}). - n_movemean_nonstationary {int} -- Number of samples for moving average smoothing in non-stationary masking - (default: {20}). - prop_decrease {float} -- Proportion to decrease signal by where the mask is zero (default: {1.0}). - n_fft {int} -- Size of FFT for STFT (default: {1024}). - win_length {[int]} -- Window length for STFT. If None, defaults to `n_fft` (default: {None}). - hop_length {[int]} -- Hop length for STFT. If None, defaults to `win_length` // 4 (default: {None}). - freq_mask_smooth_hz {float} -- Frequency smoothing width for mask (in Hz). If None, no smoothing is applied - (default: {500}). - time_mask_smooth_ms {float} -- Time smoothing width for mask (in ms). If None, no smoothing is applied - (default: {50}). - """ - - @torch.no_grad() - def __init__( - self, - sr: int, - nonstationary: bool = False, - n_std_thresh_stationary: float = 1.5, - n_thresh_nonstationary: float = 1.3, - temp_coeff_nonstationary: float = 0.1, - n_movemean_nonstationary: int = 20, - prop_decrease: float = 1.0, - n_fft: int = 1024, - win_length: bool = None, - hop_length: int = None, - freq_mask_smooth_hz: float = 500, - time_mask_smooth_ms: float = 50, - ): - super().__init__() - - # General Params - self.sr = sr - self.nonstationary = nonstationary - assert 0.0 <= prop_decrease <= 1.0 - self.prop_decrease = prop_decrease - - # STFT Params - self.n_fft = n_fft - self.win_length = self.n_fft if win_length is None else win_length - self.hop_length = self.win_length // 4 if hop_length is None else hop_length - - # Stationary Params - self.n_std_thresh_stationary = n_std_thresh_stationary - - # Non-Stationary Params - self.temp_coeff_nonstationary = temp_coeff_nonstationary - self.n_movemean_nonstationary = n_movemean_nonstationary - self.n_thresh_nonstationary = n_thresh_nonstationary - - # Smooth Mask Params - self.freq_mask_smooth_hz = freq_mask_smooth_hz - self.time_mask_smooth_ms = time_mask_smooth_ms - self.register_buffer("smoothing_filter", self._generate_mask_smoothing_filter()) - - @torch.no_grad() - def _generate_mask_smoothing_filter(self) -> Union[torch.Tensor, None]: - """ - A PyTorch module that applies a spectral gate to an input signal using the STFT. - - Returns: - smoothing_filter (torch.Tensor): a 2D tensor representing the smoothing filter, - with shape (n_grad_freq, n_grad_time), where n_grad_freq is the number of frequency - bins to smooth and n_grad_time is the number of time frames to smooth. - If both self.freq_mask_smooth_hz and self.time_mask_smooth_ms are None, returns None. - """ - if self.freq_mask_smooth_hz is None and self.time_mask_smooth_ms is None: - return None - - n_grad_freq = ( - 1 - if self.freq_mask_smooth_hz is None - else int(self.freq_mask_smooth_hz / (self.sr / (self.n_fft / 2))) - ) - if n_grad_freq < 1: - raise ValueError( - f"freq_mask_smooth_hz needs to be at least {int((self.sr / (self._n_fft / 2)))} Hz" - ) - - n_grad_time = ( - 1 - if self.time_mask_smooth_ms is None - else int(self.time_mask_smooth_ms / ((self.hop_length / self.sr) * 1000)) - ) - if n_grad_time < 1: - raise ValueError( - f"time_mask_smooth_ms needs to be at least {int((self.hop_length / self.sr) * 1000)} ms" - ) - - if n_grad_time == 1 and n_grad_freq == 1: - return None - - v_f = torch.cat( - [ - linspace(0, 1, n_grad_freq + 1, endpoint=False), - linspace(1, 0, n_grad_freq + 2), - ] - )[1:-1] - v_t = torch.cat( - [ - linspace(0, 1, n_grad_time + 1, endpoint=False), - linspace(1, 0, n_grad_time + 2), - ] - )[1:-1] - smoothing_filter = torch.outer(v_f, v_t).unsqueeze(0).unsqueeze(0) - - return smoothing_filter / smoothing_filter.sum() - - @torch.no_grad() - def _stationary_mask( - self, X_db: torch.Tensor, xn: Optional[torch.Tensor] = None - ) -> torch.Tensor: - """ - Computes a stationary binary mask to filter out noise in a log-magnitude spectrogram. - - Arguments: - X_db (torch.Tensor): 2D tensor of shape (frames, freq_bins) containing the log-magnitude spectrogram. - xn (torch.Tensor): 1D tensor containing the audio signal corresponding to X_db. - - Returns: - sig_mask (torch.Tensor): Binary mask of the same shape as X_db, where values greater than the threshold - are set to 1, and the rest are set to 0. - """ - if xn is not None: - XN = torch.stft( - xn, - n_fft=self.n_fft, - hop_length=self.hop_length, - win_length=self.win_length, - return_complex=True, - pad_mode="constant", - center=True, - window=torch.hann_window(self.win_length).to(xn.device), - ) - - XN_db = amp_to_db(XN).to(dtype=X_db.dtype) - else: - XN_db = X_db - - # calculate mean and standard deviation along the frequency axis - std_freq_noise, mean_freq_noise = torch.std_mean(XN_db, dim=-1) - - # compute noise threshold - noise_thresh = mean_freq_noise + std_freq_noise * self.n_std_thresh_stationary - - # create binary mask by thresholding the spectrogram - sig_mask = X_db > noise_thresh.unsqueeze(2) - return sig_mask - - @torch.no_grad() - def _nonstationary_mask(self, X_abs: torch.Tensor) -> torch.Tensor: - """ - Computes a non-stationary binary mask to filter out noise in a log-magnitude spectrogram. - - Arguments: - X_abs (torch.Tensor): 2D tensor of shape (frames, freq_bins) containing the magnitude spectrogram. - - Returns: - sig_mask (torch.Tensor): Binary mask of the same shape as X_abs, where values greater than the threshold - are set to 1, and the rest are set to 0. - """ - X_smoothed = ( - conv1d( - X_abs.reshape(-1, 1, X_abs.shape[-1]), - torch.ones( - self.n_movemean_nonstationary, - dtype=X_abs.dtype, - device=X_abs.device, - ).view(1, 1, -1), - padding="same", - ).view(X_abs.shape) - / self.n_movemean_nonstationary - ) - - # Compute slowness ratio and apply temperature sigmoid - slowness_ratio = (X_abs - X_smoothed) / (X_smoothed + 1e-6) - sig_mask = temperature_sigmoid( - slowness_ratio, self.n_thresh_nonstationary, self.temp_coeff_nonstationary - ) - - return sig_mask - - def forward( - self, x: torch.Tensor, xn: Optional[torch.Tensor] = None - ) -> torch.Tensor: - """ - Apply the proposed algorithm to the input signal. - - Arguments: - x (torch.Tensor): The input audio signal, with shape (batch_size, signal_length). - xn (Optional[torch.Tensor]): The noise signal used for stationary noise reduction. If `None`, the input - signal is used as the noise signal. Default: `None`. - - Returns: - torch.Tensor: The denoised audio signal, with the same shape as the input signal. - """ - assert x.ndim == 2 - if x.shape[-1] < self.win_length * 2: - raise Exception(f"x must be bigger than {self.win_length * 2}") - - assert xn is None or xn.ndim == 1 or xn.ndim == 2 - if xn is not None and xn.shape[-1] < self.win_length * 2: - raise Exception(f"xn must be bigger than {self.win_length * 2}") - - # Compute short-time Fourier transform (STFT) - X = torch.stft( - x, - n_fft=self.n_fft, - hop_length=self.hop_length, - win_length=self.win_length, - return_complex=True, - pad_mode="constant", - center=True, - window=torch.hann_window(self.win_length).to(x.device), - ) - - # Compute signal mask based on stationary or nonstationary assumptions - if self.nonstationary: - sig_mask = self._nonstationary_mask(X.abs()) - else: - sig_mask = self._stationary_mask(amp_to_db(X), xn) - - # Propagate decrease in signal power - sig_mask = self.prop_decrease * (sig_mask * 1.0 - 1.0) + 1.0 - - # Smooth signal mask with 2D convolution - if self.smoothing_filter is not None: - sig_mask = conv2d( - sig_mask.unsqueeze(1), - self.smoothing_filter.to(sig_mask.dtype), - padding="same", - ) - - # Apply signal mask to STFT magnitude and phase components - Y = X * sig_mask.squeeze(1) - - # Inverse STFT to obtain time-domain signal - y = torch.istft( - Y, - n_fft=self.n_fft, - hop_length=self.hop_length, - win_length=self.win_length, - center=True, - window=torch.hann_window(self.win_length).to(Y.device), - ) - - return y.to(dtype=x.dtype) diff --git a/spaces/EsoCode/text-generation-webui/extensions/multimodal/pipelines/llava/README.md b/spaces/EsoCode/text-generation-webui/extensions/multimodal/pipelines/llava/README.md deleted file mode 100644 index aff64faaae07d2f4da6c24e8ea03693326313139..0000000000000000000000000000000000000000 --- a/spaces/EsoCode/text-generation-webui/extensions/multimodal/pipelines/llava/README.md +++ /dev/null @@ -1,9 +0,0 @@ -## LLaVA pipeline - -This module provides 2 pipelines: -- `llava-7b` - for use with LLaVA v0 7B model (finetuned LLaMa 7B) -- `llava-13b` - for use with LLaVA v0 13B model (finetuned LLaMa 13B) - -[LLaVA](https://github.com/haotian-liu/LLaVA) uses CLIP `openai/clip-vit-large-patch14` as the vision model, and then a single linear layer. For 13B the projector weights are in `liuhaotian/LLaVA-13b-delta-v0`, and for 7B they are in `liuhaotian/LLaVA-7b-delta-v0`. - -The supported parameter combinations for both the vision model, and the projector are: CUDA/32bit, CUDA/16bit, CPU/32bit diff --git a/spaces/Faridmaruf/RVCV2MODEL/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py b/spaces/Faridmaruf/RVCV2MODEL/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py deleted file mode 100644 index b2c592527a5966e6f8e79e8c52dc5b414246dcc6..0000000000000000000000000000000000000000 --- a/spaces/Faridmaruf/RVCV2MODEL/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py +++ /dev/null @@ -1,97 +0,0 @@ -from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import parselmouth -import numpy as np - - -class PMF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def compute_f0(self, wav, p_len=None): - x = wav - if p_len is None: - p_len = x.shape[0] // self.hop_length - else: - assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" - time_step = self.hop_length / self.sampling_rate * 1000 - f0 = ( - parselmouth.Sound(x, self.sampling_rate) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=self.f0_min, - pitch_ceiling=self.f0_max, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") - f0, uv = self.interpolate_f0(f0) - return f0 - - def compute_f0_uv(self, wav, p_len=None): - x = wav - if p_len is None: - p_len = x.shape[0] // self.hop_length - else: - assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" - time_step = self.hop_length / self.sampling_rate * 1000 - f0 = ( - parselmouth.Sound(x, self.sampling_rate) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=self.f0_min, - pitch_ceiling=self.f0_max, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") - f0, uv = self.interpolate_f0(f0) - return f0, uv diff --git a/spaces/Ferion/image-matting-app/ppmatting/datasets/__init__.py b/spaces/Ferion/image-matting-app/ppmatting/datasets/__init__.py deleted file mode 100644 index 55febcaefed2e14676cbb0864f8d4cc4c1ef7459..0000000000000000000000000000000000000000 --- a/spaces/Ferion/image-matting-app/ppmatting/datasets/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .matting_dataset import MattingDataset -from .composition_1k import Composition1K -from .distinctions_646 import Distinctions646 diff --git a/spaces/FrankZxShen/so-vits-svc-models-ba/vdecoder/nsf_hifigan/env.py b/spaces/FrankZxShen/so-vits-svc-models-ba/vdecoder/nsf_hifigan/env.py deleted file mode 100644 index 2bdbc95d4f7a8bad8fd4f5eef657e2b51d946056..0000000000000000000000000000000000000000 --- a/spaces/FrankZxShen/so-vits-svc-models-ba/vdecoder/nsf_hifigan/env.py +++ /dev/null @@ -1,15 +0,0 @@ -import os -import shutil - - -class AttrDict(dict): - def __init__(self, *args, **kwargs): - super(AttrDict, self).__init__(*args, **kwargs) - self.__dict__ = self - - -def build_env(config, config_name, path): - t_path = os.path.join(path, config_name) - if config != t_path: - os.makedirs(path, exist_ok=True) - shutil.copyfile(config, os.path.join(path, config_name)) diff --git a/spaces/Gen-Sim/Gen-Sim/scripts/traintest_scripts/train_test_multi_task_goal.sh b/spaces/Gen-Sim/Gen-Sim/scripts/traintest_scripts/train_test_multi_task_goal.sh deleted file mode 100644 index 43c4a348c0712b07dfd6e679a09f5a73349d0752..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/scripts/traintest_scripts/train_test_multi_task_goal.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash - -DATA_DIR=$1 -TRAINTASK=${2-'[rainbow-stack,bowl-ball-placement]'} -TESTTASK=${3-'[rainbow-stack,bowl-ball-placement]'} -TASKNAME=${4-'mix-two'} -STEPS=${5-'10000'} - -DISP=False - -echo "Training multi-task dataset... Folder: $DATA_DIR Task $TRAINTASK" - -# You can parallelize these depending on how much resources you have - -############################# -## Language-Conditioned Tasks -# [align-rope,assembling-kits-seq-seen-colors,assembling-kits-seq-unseen-colors,packing-shapes,stack-block-pyramid-seq-unseen-colors, -# separating-piles-seen-colors,separating-piles-unseen-colors,towers-of-hanoi-seq-seen-colors,towers-of-hanoi-seq-unseen-colors] - -# example: sh scripts/traintest_scripts/train_test_multi_task_indistribution.sh data "[align-rope,sweeping-piles,align-box-corner,block-insertion,manipulating-rope,place-red-in-green]" 6taskindomain -# sh scripts/traintest_scripts/train_test_multi_task_goal.sh data "[align-rope,sweeping-piles,align-box-corner,block-insertion,manipulating-rope,place-red-in-green]" "[towers-of-hanoi]" 6taskgen -# sh scripts/traintest_scripts/train_test_multi_task_goal.sh data "[align-rope,sweeping-piles,align-box-corner]" "[towers-of-hanoi]" 3taskgen -# sh scripts/traintest_scripts/train_test_multi_task_goal.sh data "[align-rope]" "[towers-of-hanoi]" 1taskgen -# sh scripts/traintest_scripts/train_test_multi_task_goal.sh data "[align-rope,sweeping-piles,align-box-corner,block-insertion,manipulating-rope,place-red-in-green]" "[towers-of-hanoi]" 10taskgen - -trap "kill 0" SIGINT - -python cliport/train.py train.task=$TRAINTASK \ - train.agent=cliport \ - train.model_task=$TASKNAME \ - train.attn_stream_fusion_type=add \ - train.trans_stream_fusion_type=conv \ - train.lang_fusion_type=mult \ - train.n_demos=50 \ - train.n_steps=${STEPS} \ - dataset.cache=True \ - train.exp_folder=exps/exp-$TASKNAME \ - dataset.type=multi \ - train.load_from_last_ckpt=False - - -# Convert Python list to Bash array -bash_array=$(python3 -c "import sys; print(' '.join((sys.argv[1])[1:-1].split(',')))" "$TESTTASK") - -# Convert the space-separated string to a bash array -echo "Testing multi-task dataset... Folder: $DATA_DIR Task $TESTTASK" - - -for task in $bash_array - do - echo "Testing $task" - # TEST - # bash scripts/generate_gpt_datasets.sh data $task - - python cliport/eval.py model_task=$TASKNAME \ - eval_task=$task \ - agent=cliport \ - mode=test \ - n_demos=100 \ - train_demos=50 \ - checkpoint_type=test_best \ - type=single \ - exp_folder=exps/exp-$TASKNAME \ - update_results=True & - done -wait - -python notebooks/print_results.py -r=exps/exp-$TASKNAME - -echo "Finished Training." \ No newline at end of file diff --git a/spaces/GeneralNewSense/Text-to-Music/app.py b/spaces/GeneralNewSense/Text-to-Music/app.py deleted file mode 100644 index b571467ce2f6bae52d8998f7668276fcb835975e..0000000000000000000000000000000000000000 --- a/spaces/GeneralNewSense/Text-to-Music/app.py +++ /dev/null @@ -1,95 +0,0 @@ -import time - -import gradio as gr -from sentence_transformers import SentenceTransformer - -import httpx -import json - -from utils import get_tags_for_prompts, get_mubert_tags_embeddings, get_pat - -minilm = SentenceTransformer('all-MiniLM-L6-v2') -mubert_tags_embeddings = get_mubert_tags_embeddings(minilm) - - -def get_track_by_tags(tags, pat, duration, maxit=20, loop=False): - if loop: - mode = "loop" - else: - mode = "track" - r = httpx.post('https://api-b2b.mubert.com/v2/RecordTrackTTM', - json={ - "method": "RecordTrackTTM", - "params": { - "pat": pat, - "duration": duration, - "tags": tags, - "mode": mode - } - }) - - rdata = json.loads(r.text) - assert rdata['status'] == 1, rdata['error']['text'] - trackurl = rdata['data']['tasks'][0]['download_link'] - - print('Generating track ', end='') - for i in range(maxit): - r = httpx.get(trackurl) - if r.status_code == 200: - return trackurl - time.sleep(1) - - -def generate_track_by_prompt(email, prompt, duration, loop=False): - try: - pat = get_pat(email) - _, tags = get_tags_for_prompts(minilm, mubert_tags_embeddings, [prompt, ])[0] - return get_track_by_tags(tags, pat, int(duration), loop=loop), "Success", ",".join(tags) - except Exception as e: - return None, str(e), "" - - -block = gr.Blocks() - -with block: - gr.HTML( - """ -
-
-

- Mubert -

-
-

- All music is generated by Mubert API – www.mubert.com -

-
- """ - ) - with gr.Group(): - with gr.Box(): - email = gr.Textbox(label="email") - prompt = gr.Textbox(label="prompt") - duration = gr.Slider(label="duration (seconds)", value=30) - is_loop = gr.Checkbox(label="Generate loop") - out = gr.Audio() - result_msg = gr.Text(label="Result message") - tags = gr.Text(label="Tags") - btn = gr.Button("Submit").style(full_width=True) - - btn.click(fn=generate_track_by_prompt, inputs=[email, prompt, duration, is_loop], outputs=[out, result_msg, tags]) - gr.HTML(''' - - ''') - -block.launch() \ No newline at end of file diff --git a/spaces/GeorgeOrville/bingo/tailwind.config.js b/spaces/GeorgeOrville/bingo/tailwind.config.js deleted file mode 100644 index 03da3c3c45be6983b9f5ffa6df5f1fd0870e9636..0000000000000000000000000000000000000000 --- a/spaces/GeorgeOrville/bingo/tailwind.config.js +++ /dev/null @@ -1,48 +0,0 @@ -/** @type {import('tailwindcss').Config} */ -module.exports = { - content: [ - './src/pages/**/*.{js,ts,jsx,tsx,mdx}', - './src/components/**/*.{js,ts,jsx,tsx,mdx}', - './src/app/**/*.{js,ts,jsx,tsx,mdx}', - './src/ui/**/*.{js,ts,jsx,tsx,mdx}', - ], - "darkMode": "class", - theme: { - extend: { - colors: { - 'primary-blue': 'rgb(var(--color-primary-blue) / )', - secondary: 'rgb(var(--color-secondary) / )', - 'primary-background': 'rgb(var(--primary-background) / )', - 'primary-text': 'rgb(var(--primary-text) / )', - 'secondary-text': 'rgb(var(--secondary-text) / )', - 'light-text': 'rgb(var(--light-text) / )', - 'primary-border': 'rgb(var(--primary-border) / )', - }, - keyframes: { - slideDownAndFade: { - from: { opacity: 0, transform: 'translateY(-2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideLeftAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - slideUpAndFade: { - from: { opacity: 0, transform: 'translateY(2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideRightAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - }, - animation: { - slideDownAndFade: 'slideDownAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideLeftAndFade: 'slideLeftAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideUpAndFade: 'slideUpAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideRightAndFade: 'slideRightAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - }, - }, - }, - plugins: [require('@headlessui/tailwindcss'), require('tailwind-scrollbar')], -} diff --git a/spaces/Godrose0728/sound-link/export_model.py b/spaces/Godrose0728/sound-link/export_model.py deleted file mode 100644 index 52d3b3d083df7bf027b46d9c63e399b2da3f0e0a..0000000000000000000000000000000000000000 --- a/spaces/Godrose0728/sound-link/export_model.py +++ /dev/null @@ -1,13 +0,0 @@ -import torch - -if __name__ == '__main__': - model_path = "saved_model/18/model.pth" - output_path = "saved_model/18/model1.pth" - checkpoint_dict = torch.load(model_path, map_location='cpu') - checkpoint_dict_new = {} - for k, v in checkpoint_dict.items(): - if k == "optimizer": - print("remove optimizer") - continue - checkpoint_dict_new[k] = v - torch.save(checkpoint_dict_new, output_path) diff --git a/spaces/Gradio-Blocks/StyleGAN-NADA/op/conv2d_gradfix.py b/spaces/Gradio-Blocks/StyleGAN-NADA/op/conv2d_gradfix.py deleted file mode 100644 index bb2f94bbcb8132299fd4d538972d32bd7ff6e7d6..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/StyleGAN-NADA/op/conv2d_gradfix.py +++ /dev/null @@ -1,227 +0,0 @@ -import contextlib -import warnings - -import torch -from torch import autograd -from torch.nn import functional as F - -enabled = True -weight_gradients_disabled = False - - -@contextlib.contextmanager -def no_weight_gradients(): - global weight_gradients_disabled - - old = weight_gradients_disabled - weight_gradients_disabled = True - yield - weight_gradients_disabled = old - - -def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): - if could_use_op(input): - return conv2d_gradfix( - transpose=False, - weight_shape=weight.shape, - stride=stride, - padding=padding, - output_padding=0, - dilation=dilation, - groups=groups, - ).apply(input, weight, bias) - - return F.conv2d( - input=input, - weight=weight, - bias=bias, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - ) - - -def conv_transpose2d( - input, - weight, - bias=None, - stride=1, - padding=0, - output_padding=0, - groups=1, - dilation=1, -): - if could_use_op(input): - return conv2d_gradfix( - transpose=True, - weight_shape=weight.shape, - stride=stride, - padding=padding, - output_padding=output_padding, - groups=groups, - dilation=dilation, - ).apply(input, weight, bias) - - return F.conv_transpose2d( - input=input, - weight=weight, - bias=bias, - stride=stride, - padding=padding, - output_padding=output_padding, - dilation=dilation, - groups=groups, - ) - - -def could_use_op(input): - if (not enabled) or (not torch.backends.cudnn.enabled): - return False - - if input.device.type != "cuda": - return False - - if any(torch.__version__.startswith(x) for x in ["1.7.", "1.8."]): - return True - - warnings.warn( - f"conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d()." - ) - - return False - - -def ensure_tuple(xs, ndim): - xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim - - return xs - - -conv2d_gradfix_cache = dict() - - -def conv2d_gradfix( - transpose, weight_shape, stride, padding, output_padding, dilation, groups -): - ndim = 2 - weight_shape = tuple(weight_shape) - stride = ensure_tuple(stride, ndim) - padding = ensure_tuple(padding, ndim) - output_padding = ensure_tuple(output_padding, ndim) - dilation = ensure_tuple(dilation, ndim) - - key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups) - if key in conv2d_gradfix_cache: - return conv2d_gradfix_cache[key] - - common_kwargs = dict( - stride=stride, padding=padding, dilation=dilation, groups=groups - ) - - def calc_output_padding(input_shape, output_shape): - if transpose: - return [0, 0] - - return [ - input_shape[i + 2] - - (output_shape[i + 2] - 1) * stride[i] - - (1 - 2 * padding[i]) - - dilation[i] * (weight_shape[i + 2] - 1) - for i in range(ndim) - ] - - class Conv2d(autograd.Function): - @staticmethod - def forward(ctx, input, weight, bias): - if not transpose: - out = F.conv2d(input=input, weight=weight, bias=bias, **common_kwargs) - - else: - out = F.conv_transpose2d( - input=input, - weight=weight, - bias=bias, - output_padding=output_padding, - **common_kwargs, - ) - - ctx.save_for_backward(input, weight) - - return out - - @staticmethod - def backward(ctx, grad_output): - input, weight = ctx.saved_tensors - grad_input, grad_weight, grad_bias = None, None, None - - if ctx.needs_input_grad[0]: - p = calc_output_padding( - input_shape=input.shape, output_shape=grad_output.shape - ) - grad_input = conv2d_gradfix( - transpose=(not transpose), - weight_shape=weight_shape, - output_padding=p, - **common_kwargs, - ).apply(grad_output, weight, None) - - if ctx.needs_input_grad[1] and not weight_gradients_disabled: - grad_weight = Conv2dGradWeight.apply(grad_output, input) - - if ctx.needs_input_grad[2]: - grad_bias = grad_output.sum((0, 2, 3)) - - return grad_input, grad_weight, grad_bias - - class Conv2dGradWeight(autograd.Function): - @staticmethod - def forward(ctx, grad_output, input): - op = torch._C._jit_get_operation( - "aten::cudnn_convolution_backward_weight" - if not transpose - else "aten::cudnn_convolution_transpose_backward_weight" - ) - flags = [ - torch.backends.cudnn.benchmark, - torch.backends.cudnn.deterministic, - torch.backends.cudnn.allow_tf32, - ] - grad_weight = op( - weight_shape, - grad_output, - input, - padding, - stride, - dilation, - groups, - *flags, - ) - ctx.save_for_backward(grad_output, input) - - return grad_weight - - @staticmethod - def backward(ctx, grad_grad_weight): - grad_output, input = ctx.saved_tensors - grad_grad_output, grad_grad_input = None, None - - if ctx.needs_input_grad[0]: - grad_grad_output = Conv2d.apply(input, grad_grad_weight, None) - - if ctx.needs_input_grad[1]: - p = calc_output_padding( - input_shape=input.shape, output_shape=grad_output.shape - ) - grad_grad_input = conv2d_gradfix( - transpose=(not transpose), - weight_shape=weight_shape, - output_padding=p, - **common_kwargs, - ).apply(grad_output, grad_grad_weight, None) - - return grad_grad_output, grad_grad_input - - conv2d_gradfix_cache[key] = Conv2d - - return Conv2d diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/_base_/datasets/coco_detection.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/_base_/datasets/coco_detection.py deleted file mode 100644 index 09a75c404687223c71dcdf0abc7af827f2e498a6..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/_base_/datasets/coco_detection.py +++ /dev/null @@ -1,48 +0,0 @@ -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric='bbox') diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py deleted file mode 100644 index 29f21fd040614425e8b36415b660823ad6bd38e1..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py +++ /dev/null @@ -1,64 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - pretrained='open-mmlab://resnest50', - backbone=dict( - type='ResNeSt', - stem_channels=64, - depth=50, - radix=2, - reduction_factor=4, - avg_down_stride=True, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch'), - roi_head=dict( - bbox_head=dict( - type='Shared4Conv1FCBBoxHead', - conv_out_channels=256, - norm_cfg=norm_cfg), - mask_head=dict(norm_cfg=norm_cfg))) -# # use ResNeSt img_norm -img_norm_cfg = dict( - mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/pspnet/README.md b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/pspnet/README.md deleted file mode 100644 index 66f3dc286f066c50ef54e98de036ef0f5056e246..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/pspnet/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Pyramid Scene Parsing Network - -## Introduction - - - -```latex -@inproceedings{zhao2017pspnet, - title={Pyramid Scene Parsing Network}, - author={Zhao, Hengshuang and Shi, Jianping and Qi, Xiaojuan and Wang, Xiaogang and Jia, Jiaya}, - booktitle={CVPR}, - year={2017} -} -``` - -## Results and models - -### Cityscapes - -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | -| ------ | --------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| PSPNet | R-50-D8 | 512x1024 | 40000 | 6.1 | 4.07 | 77.85 | 79.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338.log.json) | -| PSPNet | R-101-D8 | 512x1024 | 40000 | 9.6 | 2.68 | 78.34 | 79.74 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751-467e7cf4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_40k_cityscapes/pspnet_r101-d8_512x1024_40k_cityscapes_20200604_232751.log.json) | -| PSPNet | R-50-D8 | 769x769 | 40000 | 6.9 | 1.76 | 78.26 | 79.88 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_40k_cityscapes/pspnet_r50-d8_769x769_40k_cityscapes_20200606_112725-86638686.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_40k_cityscapes/pspnet_r50-d8_769x769_40k_cityscapes_20200606_112725.log.json) | -| PSPNet | R-101-D8 | 769x769 | 40000 | 10.9 | 1.15 | 79.08 | 80.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_40k_cityscapes/pspnet_r101-d8_769x769_40k_cityscapes_20200606_112753-61c6f5be.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_40k_cityscapes/pspnet_r101-d8_769x769_40k_cityscapes_20200606_112753.log.json) | -| PSPNet | R-18-D8 | 512x1024 | 80000 | 1.7 | 15.71 | 74.87 | 76.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes/pspnet_r18-d8_512x1024_80k_cityscapes_20201225_021458-09ffa746.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes/pspnet_r18-d8_512x1024_80k_cityscapes-20201225_021458.log.json) | -| PSPNet | R-50-D8 | 512x1024 | 80000 | - | - | 78.55 | 79.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131-2376f12b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes/pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131.log.json) | -| PSPNet | R-101-D8 | 512x1024 | 80000 | - | - | 79.76 | 81.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211-e1e1100f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211.log.json) | -| PSPNet | R-18-D8 | 769x769 | 80000 | 1.9 | 6.20 | 75.90 | 77.86 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r18-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_769x769_80k_cityscapes/pspnet_r18-d8_769x769_80k_cityscapes_20201225_021458-3deefc62.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18-d8_769x769_80k_cityscapes/pspnet_r18-d8_769x769_80k_cityscapes-20201225_021458.log.json) | -| PSPNet | R-50-D8 | 769x769 | 80000 | - | - | 79.59 | 80.69 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_80k_cityscapes/pspnet_r50-d8_769x769_80k_cityscapes_20200606_210121-5ccf03dd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_769x769_80k_cityscapes/pspnet_r50-d8_769x769_80k_cityscapes_20200606_210121.log.json) | -| PSPNet | R-101-D8 | 769x769 | 80000 | - | - | 79.77 | 81.06 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_80k_cityscapes/pspnet_r101-d8_769x769_80k_cityscapes_20200606_225055-dba412fa.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_769x769_80k_cityscapes/pspnet_r101-d8_769x769_80k_cityscapes_20200606_225055.log.json) | -| PSPNet | R-18b-D8 | 512x1024 | 80000 | 1.5 | 16.28 | 74.23 | 75.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes/pspnet_r18b-d8_512x1024_80k_cityscapes_20201226_063116-26928a60.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes/pspnet_r18b-d8_512x1024_80k_cityscapes-20201226_063116.log.json) | -| PSPNet | R-50b-D8 | 512x1024 | 80000 | 6.0 | 4.30 | 78.22 | 79.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes/pspnet_r50b-d8_512x1024_80k_cityscapes_20201225_094315-6344287a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes/pspnet_r50b-d8_512x1024_80k_cityscapes-20201225_094315.log.json) | -| PSPNet | R-101b-D8 | 512x1024 | 80000 | 9.5 | 2.76 | 79.69 | 80.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes_20201226_170012-3a4d38ab.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes/pspnet_r101b-d8_512x1024_80k_cityscapes-20201226_170012.log.json) | -| PSPNet | R-18b-D8 | 769x769 | 80000 | 1.7 | 6.41 | 74.92 | 76.90 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r18b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_769x769_80k_cityscapes/pspnet_r18b-d8_769x769_80k_cityscapes_20201226_080942-bf98d186.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r18b-d8_769x769_80k_cityscapes/pspnet_r18b-d8_769x769_80k_cityscapes-20201226_080942.log.json) | -| PSPNet | R-50b-D8 | 769x769 | 80000 | 6.8 | 1.88 | 78.50 | 79.96 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes/pspnet_r50b-d8_769x769_80k_cityscapes_20201225_094316-4c643cf6.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes/pspnet_r50b-d8_769x769_80k_cityscapes-20201225_094316.log.json) | -| PSPNet | R-101b-D8 | 769x769 | 80000 | 10.8 | 1.17 | 78.87 | 80.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101b-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_769x769_80k_cityscapes/pspnet_r101b-d8_769x769_80k_cityscapes_20201226_171823-f0e7c293.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101b-d8_769x769_80k_cityscapes/pspnet_r101b-d8_769x769_80k_cityscapes-20201226_171823.log.json) | - -### ADE20K - -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | -| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| PSPNet | R-50-D8 | 512x512 | 80000 | 8.5 | 23.53 | 41.13 | 41.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_80k_ade20k/pspnet_r50-d8_512x512_80k_ade20k_20200615_014128-15a8b914.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_80k_ade20k/pspnet_r50-d8_512x512_80k_ade20k_20200615_014128.log.json) | -| PSPNet | R-101-D8 | 512x512 | 80000 | 12 | 15.30 | 43.57 | 44.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_80k_ade20k/pspnet_r101-d8_512x512_80k_ade20k_20200614_031423-b6e782f0.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_80k_ade20k/pspnet_r101-d8_512x512_80k_ade20k_20200614_031423.log.json) | -| PSPNet | R-50-D8 | 512x512 | 160000 | - | - | 42.48 | 43.44 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_160k_ade20k/pspnet_r50-d8_512x512_160k_ade20k_20200615_184358-1890b0bd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_160k_ade20k/pspnet_r50-d8_512x512_160k_ade20k_20200615_184358.log.json) | -| PSPNet | R-101-D8 | 512x512 | 160000 | - | - | 44.39 | 45.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_160k_ade20k/pspnet_r101-d8_512x512_160k_ade20k_20200615_100650-967c316f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_160k_ade20k/pspnet_r101-d8_512x512_160k_ade20k_20200615_100650.log.json) | - -### Pascal VOC 2012 + Aug - -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | -| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| PSPNet | R-50-D8 | 512x512 | 20000 | 6.1 | 23.59 | 76.78 | 77.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_20k_voc12aug/pspnet_r50-d8_512x512_20k_voc12aug_20200617_101958-ed5dfbd9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_20k_voc12aug/pspnet_r50-d8_512x512_20k_voc12aug_20200617_101958.log.json) | -| PSPNet | R-101-D8 | 512x512 | 20000 | 9.6 | 15.02 | 78.47 | 79.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_20k_voc12aug/pspnet_r101-d8_512x512_20k_voc12aug_20200617_102003-4aef3c9a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_20k_voc12aug/pspnet_r101-d8_512x512_20k_voc12aug_20200617_102003.log.json) | -| PSPNet | R-50-D8 | 512x512 | 40000 | - | - | 77.29 | 78.48 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_40k_voc12aug/pspnet_r50-d8_512x512_40k_voc12aug_20200613_161222-ae9c1b8c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x512_40k_voc12aug/pspnet_r50-d8_512x512_40k_voc12aug_20200613_161222.log.json) | -| PSPNet | R-101-D8 | 512x512 | 40000 | - | - | 78.52 | 79.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_40k_voc12aug/pspnet_r101-d8_512x512_40k_voc12aug_20200613_161222-bc933b18.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_512x512_40k_voc12aug/pspnet_r101-d8_512x512_40k_voc12aug_20200613_161222.log.json) | - -### Pascal Context - -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | -| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| PSPNet | R-101-D8 | 480x480 | 40000 | 8.8 | 9.68 | 46.60 | 47.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context/pspnet_r101-d8_480x480_40k_pascal_context_20200911_211210-bf0f5d7c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context/pspnet_r101-d8_480x480_40k_pascal_context-20200911_211210.log.json) | -| PSPNet | R-101-D8 | 480x480 | 80000 | - | - | 46.03 | 47.15 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context/pspnet_r101-d8_480x480_80k_pascal_context_20200911_190530-c86d6233.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context/pspnet_r101-d8_480x480_80k_pascal_context-20200911_190530.log.json) | - -### Pascal Context 59 - -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | -| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| PSPNet | R-101-D8 | 480x480 | 40000 | - | - | 52.02 | 53.54 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59/pspnet_r101-d8_480x480_40k_pascal_context_59_20210416_114524-86d44cd4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_40k_pascal_context_59/pspnet_r101-d8_480x480_40k_pascal_context_59-20210416_114524.log.json) | -| PSPNet | R-101-D8 | 480x480 | 80000 | - | - | 52.47 | 53.99 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59/pspnet_r101-d8_480x480_80k_pascal_context_59_20210416_114418-fa6caaa2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59/pspnet_r101-d8_480x480_80k_pascal_context_59-20210416_114418.log.json) | diff --git a/spaces/GrandaddyShmax/MusicGen_Plus/app.py b/spaces/GrandaddyShmax/MusicGen_Plus/app.py deleted file mode 100644 index 1e769ea2270e4cd57abfaafea2243d136520cdac..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/MusicGen_Plus/app.py +++ /dev/null @@ -1,717 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# Updated to account for UI changes from https://github.com/rkfg/audiocraft/blob/long/app.py -# also released under the MIT license. - -import random -import argparse -from concurrent.futures import ProcessPoolExecutor -import os -import subprocess as sp -from tempfile import NamedTemporaryFile -import time -import warnings -import glob -import re -from pathlib import Path -from PIL import Image - -import torch -import gradio as gr -import numpy as np - -from audiocraft.data.audio_utils import convert_audio -from audiocraft.data.audio import audio_write -from audiocraft.models import MusicGen -from audiocraft.utils import ui -import subprocess, random, string - -MODEL = None # Last used model -MODELS = None -IS_SHARED_SPACE = "musicgen/MusicGen" in os.environ.get('SPACE_ID', '') -INTERRUPTED = False -UNLOAD_MODEL = False -MOVE_TO_CPU = False -IS_BATCHED = "facebook/MusicGen" in os.environ.get('SPACE_ID', '') -MAX_BATCH_SIZE = 12 -BATCHED_DURATION = 15 -INTERRUPTING = False -# We have to wrap subprocess call to clean a bit the log when using gr.make_waveform -_old_call = sp.call - -def generate_random_string(length): - characters = string.ascii_letters + string.digits - return ''.join(random.choice(characters) for _ in range(length)) - -def resize_video(input_path, output_path, target_width, target_height): - ffmpeg_cmd = [ - 'ffmpeg', - '-y', - '-i', input_path, - '-vf', f'scale={target_width}:{target_height}', - '-c:a', 'copy', - output_path - ] - subprocess.run(ffmpeg_cmd) - -def _call_nostderr(*args, **kwargs): - # Avoid ffmpeg vomitting on the logs. - kwargs['stderr'] = sp.DEVNULL - kwargs['stdout'] = sp.DEVNULL - _old_call(*args, **kwargs) - - -sp.call = _call_nostderr -# Preallocating the pool of processes. -pool = ProcessPoolExecutor(4) -pool.__enter__() - - -def interrupt(): - global INTERRUPTING - INTERRUPTING = True - - -def make_waveform(*args, **kwargs): - # Further remove some warnings. - be = time.time() - with warnings.catch_warnings(): - warnings.simplefilter('ignore') - height = kwargs.pop('height') - width = kwargs.pop('width') - if height < 256: - height = 256 - if width < 256: - width = 256 - waveform_video = gr.make_waveform(*args, **kwargs) - out = f"{generate_random_string(12)}.mp4" - image = kwargs.get('bg_image', None) - if image is None: - resize_video(waveform_video, out, 900, 300) - else: - resize_video(waveform_video, out, width, height) - print("Make a video took", time.time() - be) - return out - - -def load_model(version='melody'): - global MODEL, MODELS - custom_model = None - base_model = 'medium' - print("Loading model", version) - if MODELS is None: - if version == 'custom': - MODEL = MusicGen.get_pretrained(base_model) - MODEL.lm.load_state_dict(torch.load(custom_model)) - else: - MODEL = MusicGen.get_pretrained(version) - return - else: - t1 = time.monotonic() - if MODEL is not None: - MODEL.to('cpu') # move to cache - print("Previous model moved to CPU in %.2fs" % (time.monotonic() - t1)) - t1 = time.monotonic() - if version != 'custom' and MODELS.get(version) is None: - print("Loading model %s from disk" % version) - result = MusicGen.get_pretrained(version) - MODELS[version] = result - print("Model loaded in %.2fs" % (time.monotonic() - t1)) - MODEL = result - return - result = MODELS[version].to('cuda') - print("Cached model loaded in %.2fs" % (time.monotonic() - t1)) - MODEL = result - -def normalize_audio(audio_data): - audio_data = audio_data.astype(np.float32) - max_value = np.max(np.abs(audio_data)) - audio_data /= max_value - return audio_data - -def _do_predictions(texts, melodies, sample, duration, image, height, width, background, bar1, bar2, progress=False, **gen_kwargs): - maximum_size = 29.5 - cut_size = 0 - sampleP = None - if sample is not None: - globalSR, sampleM = sample[0], sample[1] - sampleM = normalize_audio(sampleM) - sampleM = torch.from_numpy(sampleM).t() - if sampleM.dim() == 1: - sampleM = sampleM.unsqueeze(0) - sample_length = sampleM.shape[sampleM.dim() - 1] / globalSR - if sample_length > maximum_size: - cut_size = sample_length - maximum_size - sampleP = sampleM[..., :int(globalSR * cut_size)] - sampleM = sampleM[..., int(globalSR * cut_size):] - if sample_length >= duration: - duration = sample_length + 0.5 - global MODEL - MODEL.set_generation_params(duration=(duration - cut_size), **gen_kwargs) - print("new batch", len(texts), texts, [None if m is None else (m[0], m[1].shape) for m in melodies], [None if sample is None else (sample[0], sample[1].shape)]) - be = time.time() - processed_melodies = [] - target_sr = 32000 - target_ac = 1 - for melody in melodies: - if melody is None: - processed_melodies.append(None) - else: - sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t() - if melody.dim() == 1: - melody = melody[None] - melody = melody[..., :int(sr * duration)] - melody = convert_audio(melody, sr, target_sr, target_ac) - processed_melodies.append(melody) - - if sample is not None: - if sampleP is None: - outputs = MODEL.generate_continuation( - prompt=sampleM, - prompt_sample_rate=globalSR, - descriptions=texts, - progress=progress, - ) - else: - if sampleP.dim() > 1: - sampleP = convert_audio(sampleP, globalSR, target_sr, target_ac) - sampleP = sampleP.to(MODEL.device).float().unsqueeze(0) - outputs = MODEL.generate_continuation( - prompt=sampleM, - prompt_sample_rate=globalSR, - descriptions=texts, - progress=progress, - ) - outputs = torch.cat([sampleP, outputs], 2) - - elif any(m is not None for m in processed_melodies): - outputs = MODEL.generate_with_chroma( - descriptions=texts, - melody_wavs=processed_melodies, - melody_sample_rate=target_sr, - progress=progress, - ) - else: - outputs = MODEL.generate(texts, progress=progress) - - outputs = outputs.detach().cpu().float() - out_files = [] - for output in outputs: - with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file: - audio_write( - file.name, output, MODEL.sample_rate, strategy="loudness", - loudness_headroom_db=16, loudness_compressor=True, add_suffix=False) - out_files.append(pool.submit(make_waveform, file.name, bg_image=image, bg_color=background, bars_color=(bar1, bar2), fg_alpha=1.0, bar_count=75, height=height, width=width)) - res = [out_file.result() for out_file in out_files] - print("batch finished", len(texts), time.time() - be) - if MOVE_TO_CPU: - MODEL.to('cpu') - if UNLOAD_MODEL: - MODEL = None - torch.cuda.empty_cache() - torch.cuda.ipc_collect() - return res - - -def predict_batched(texts, melodies): - max_text_length = 512 - texts = [text[:max_text_length] for text in texts] - load_model('melody') - res = _do_predictions(texts, melodies, BATCHED_DURATION) - return [res] - - -def predict_full(model, prompt_amount, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, audio, mode, duration, topk, topp, temperature, cfg_coef, seed, overlap, image, height, width, background, bar1, bar2, progress=gr.Progress()): - global INTERRUPTING - INTERRUPTING = False - if temperature < 0: - raise gr.Error("Temperature must be >= 0.") - if topk < 0: - raise gr.Error("Topk must be non-negative.") - if topp < 0: - raise gr.Error("Topp must be non-negative.") - - topk = int(topk) - if MODEL is None or MODEL.name != model: - load_model(model) - else: - if MOVE_TO_CPU: - MODEL.to('cuda') - - if seed < 0: - seed = random.randint(0, 0xffff_ffff_ffff) - torch.manual_seed(seed) - predict_full.last_upd = time.monotonic() - def _progress(generated, to_generate): - if time.monotonic() - predict_full.last_upd > 1: - progress((generated, to_generate)) - predict_full.last_upd = time.monotonic() - if INTERRUPTING: - raise gr.Error("Interrupted.") - MODEL.set_custom_progress_callback(_progress) - - melody = None - sample = None - if mode == "sample": - sample = audio - elif mode == "melody": - melody = audio - - text_cat = [p0, p1, p2, p3, p4, p5, p6, p7, p8, p9] - drag_cat = [d0, d1, d2, d3, d4, d5, d6, d7, d8, d9] - texts = [] - ind = 0 - ind2 = 0 - while ind < prompt_amount: - for ind2 in range(int(drag_cat[ind])): - texts.append(text_cat[ind]) - ind2 = 0 - ind = ind + 1 - - outs = _do_predictions( - [texts], [melody], sample, duration, image, height, width, background, bar1, bar2, progress=True, - top_k=topk, top_p=topp, temperature=temperature, cfg_coef=cfg_coef, extend_stride=MODEL.max_duration-overlap) - return outs[0], seed - -max_textboxes = 10 - -def get_available_models(): - return sorted([re.sub('.pt$', '', item.name) for item in list(Path('models/').glob('*')) if item.name.endswith('.pt')]) - -def ui_full(launch_kwargs): - with gr.Blocks(title='MusicGen+') as interface: - gr.Markdown( - """ - # MusicGen+ V1.2.3 (HuggingFace Version) - - ## **NEW VERSION IS OUT:** https://huggingface.co/spaces/GrandaddyShmax/AudioCraft_Plus - - #### **Disclaimer:** This will not run on CPU only. Its best to clone this App and run on GPU instance! - - **Alternatively**, you can run this for free on a google colab: - https://colab.research.google.com/github/camenduru/MusicGen-colab/blob/main/MusicGen_ClownOfMadness_plus_colab.ipynb - - **Or**, run this locally on your PC: - https://github.com/GrandaddyShmax/audiocraft_plus/tree/plus - - Thanks to: facebookresearch, Camenduru, rkfg and GrandaddyShmax - """ - ) - with gr.Row(): - with gr.Column(): - with gr.Tab("Generation"): - with gr.Row(): - s = gr.Slider(1, max_textboxes, value=1, step=1, label="Prompt Segments:") - with gr.Column(): - textboxes = [] - prompts = [] - repeats = [] - with gr.Row(): - text0 = gr.Text(label="Input Text", interactive=True, scale=3) - prompts.append(text0) - drag0 = gr.Number(label="Repeat", value=1, interactive=True, scale=1) - repeats.append(drag0) - for i in range(max_textboxes): - with gr.Row(visible=False) as t: - text = gr.Text(label="Input Text", interactive=True, scale=3) - repeat = gr.Number(label="Repeat", minimum=1, value=1, interactive=True, scale=1) - textboxes.append(t) - prompts.append(text) - repeats.append(repeat) - with gr.Row(): - mode = gr.Radio(["melody", "sample"], label="Input Audio Mode", value="sample", interactive=True) - audio = gr.Audio(source="upload", type="numpy", label="Input Audio (optional)", interactive=True) - with gr.Row(): - submit = gr.Button("Generate", variant="primary") - # Adapted from https://github.com/rkfg/audiocraft/blob/long/app.py, MIT license. - _ = gr.Button("Interrupt").click(fn=interrupt, queue=False) - with gr.Row(): - duration = gr.Slider(minimum=1, maximum=300, value=10, step=1, label="Duration", interactive=True) - with gr.Row(): - overlap = gr.Slider(minimum=1, maximum=29, value=12, step=1, label="Overlap", interactive=True) - with gr.Row(): - seed = gr.Number(label="Seed", value=-1, precision=0, interactive=True) - gr.Button('\U0001f3b2\ufe0f').style(full_width=False).click(fn=lambda: -1, outputs=[seed], queue=False) - reuse_seed = gr.Button('\u267b\ufe0f').style(full_width=False) - with gr.Tab("Customization"): - with gr.Row(): - with gr.Column(): - background = gr.ColorPicker(value="#22A699", label="background color", interactive=True, scale=0) - bar1 = gr.ColorPicker(value="#F2BE22", label="bar color start", interactive=True, scale=0) - bar2 = gr.ColorPicker(value="#F29727", label="bar color end", interactive=True, scale=0) - with gr.Column(): - image = gr.Image(label="Background Image", type="filepath", interactive=True, scale=4) - with gr.Row(): - height = gr.Number(label="Height", value=512, interactive=True) - width = gr.Number(label="Width", value=768, interactive=True) - with gr.Tab("Settings"): - with gr.Row(): - model = gr.Radio(["melody", "small", "medium", "large"], label="Model", value="melody", interactive=True, scale=1) - with gr.Row(): - topk = gr.Number(label="Top-k", value=250, interactive=True) - topp = gr.Number(label="Top-p", value=0, interactive=True) - temperature = gr.Number(label="Temperature", value=1.0, interactive=True) - cfg_coef = gr.Number(label="Classifier Free Guidance", value=5.0, interactive=True) - with gr.Column() as c: - with gr.Tab("Output"): - output = gr.Video(label="Generated Music", scale=0) - seed_used = gr.Number(label='Seed used', value=-1, interactive=False) - with gr.Tab("Wiki"): - gr.Markdown( - """ - ### Generation Tab: - - #### Multi-Prompt: - - This feature allows you to control the music, adding variation to different time segments. - You have up to 10 prompt segments. the first prompt will always be 30s long - the other prompts will be [30s - overlap]. - for example if the overlap is 10s, each prompt segment will be 20s. - - - **[Prompt Segments (number)]:** - Amount of unique prompt to generate throughout the music generation. - - - **[Prompt/Input Text (prompt)]:** - Here describe the music you wish the model to generate. - - - **[Repeat (number)]:** - Write how many times this prompt will repeat (instead of wasting another prompt segment on the same prompt). - - - **[Input Audio Mode (selection)]:** - `Melody` mode only works with the melody model: it conditions the music generation to reference the melody - `Sample` mode works with any model: it gives a music sample to the model to generate its continuation. - - - **[Input Audio (audio file)]:** - Input here the audio you wish to use with "melody" or "sample" mode. - - - **[Generate (button)]:** - Generates the music with the given settings and prompts. - - - **[Interrupt (button)]:** - Stops the music generation as soon as it can, providing an incomplete output. - - - **[Duration (number)]:** - How long you want the generated music to be (in seconds). - - - **[Overlap (number)]:** - How much each new segment will reference the previous segment (in seconds). - For example, if you choose 20s: Each new segment after the first one will reference the previous segment 20s - and will generate only 10s of new music. The model can only process 30s of music. - - - **[Seed (number)]:** - Your generated music id. If you wish to generate the exact same music, - place the exact seed with the exact prompts - (This way you can also extend specific song that was generated short). - - - **[Random Seed (button)]:** - Gives "-1" as a seed, which counts as a random seed. - - - **[Copy Previous Seed (button)]:** - Copies the seed from the output seed (if you don't feel like doing it manualy). - - --- - - ### Customization Tab: - - - **[Background Color (color)]:** - Works only if you don't upload image. Color of the background of the waveform. - - - **[Bar Color Start (color)]:** - First color of the waveform bars. - - - **[Bar Color End (color)]:** - Second color of the waveform bars. - - - **[Background Image (image)]:** - Background image that you wish to be attached to the generated video along with the waveform. - - - **[Height and Width (numbers)]:** - Output video resolution, only works with image. - (minimum height and width is 256). - - --- - - ### Settings Tab: - - - **[Model (selection)]:** - Here you can choose which model you wish to use: - `melody` model is based on the medium model with a unique feature that lets you use melody conditioning - `small` model is trained on 300M parameters - `medium` model is trained on 1.5B parameters - `large` model is trained on 3.3B parameters. - - - **[Top-k (number)]:** - is a parameter used in text generation models, including music generation models. It determines the number of most likely next tokens to consider at each step of the generation process. The model ranks all possible tokens based on their predicted probabilities, and then selects the top-k tokens from the ranked list. The model then samples from this reduced set of tokens to determine the next token in the generated sequence. A smaller value of k results in a more focused and deterministic output, while a larger value of k allows for more diversity in the generated music. - - - **[Top-p (number)]:** - also known as nucleus sampling or probabilistic sampling, is another method used for token selection during text generation. Instead of specifying a fixed number like top-k, top-p considers the cumulative probability distribution of the ranked tokens. It selects the smallest possible set of tokens whose cumulative probability exceeds a certain threshold (usually denoted as p). The model then samples from this set to choose the next token. This approach ensures that the generated output maintains a balance between diversity and coherence, as it allows for a varying number of tokens to be considered based on their probabilities. - - - **[Temperature (number)]:** - is a parameter that controls the randomness of the generated output. It is applied during the sampling process, where a higher temperature value results in more random and diverse outputs, while a lower temperature value leads to more deterministic and focused outputs. In the context of music generation, a higher temperature can introduce more variability and creativity into the generated music, but it may also lead to less coherent or structured compositions. On the other hand, a lower temperature can produce more repetitive and predictable music. - - - **[Classifier Free Guidance (number)]:** - refers to a technique used in some music generation models where a separate classifier network is trained to provide guidance or control over the generated music. This classifier is trained on labeled data to recognize specific musical characteristics or styles. During the generation process, the output of the generator model is evaluated by the classifier, and the generator is encouraged to produce music that aligns with the desired characteristics or style. This approach allows for more fine-grained control over the generated music, enabling users to specify certain attributes they want the model to capture. - """ - ) - with gr.Tab("Changelog"): - gr.Markdown( - """ - ## Changelog: - - ### V1.2.3 - - - Added option to change video size to fit the image you upload - - - - ### V1.2.2 - - - Added Wiki, Changelog and About tabs - - - - ### V1.2.1 - - - Added tabs and organized the entire interface - - - Added option to attach image to the output video - - - Added option to load fine-tuned models (Not on HuggingFace Version) - - - - ### V1.2.0 - - - Added Multi-Prompt - - - - ### V1.1.3 - - - Added customization options for generated waveform - - - - ### V1.1.2 - - - Removed sample length limit: now you can input audio of any length as music sample - - - - ### V1.1.1 - - - Improved music sample audio quality when using music continuation - - - - ### V1.1.0 - - - Rebuilt the repo on top of the latest structure of the main MusicGen repo - - - Improved Music continuation feature - - - - ### V1.0.0 - Stable Version - - - Added Music continuation - """ - ) - with gr.Tab("About"): - gr.Markdown( - """ - This is your private demo for [MusicGen](https://github.com/facebookresearch/audiocraft), a simple and controllable model for music generation - presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284) - - ## MusicGen+ is an extended version of the original MusicGen by facebookresearch. - - ### Repo: https://github.com/GrandaddyShmax/audiocraft_plus/tree/plus - - --- - - ### This project was possible thanks to: - - #### GrandaddyShmax - https://github.com/GrandaddyShmax - - #### Camenduru - https://github.com/camenduru - - #### rkfg - https://github.com/rkfg - """ - ) - reuse_seed.click(fn=lambda x: x, inputs=[seed_used], outputs=[seed], queue=False) - submit.click(predict_full, inputs=[model, s, prompts[0], prompts[1], prompts[2], prompts[3], prompts[4], prompts[5], prompts[6], prompts[7], prompts[8], prompts[9], repeats[0], repeats[1], repeats[2], repeats[3], repeats[4], repeats[5], repeats[6], repeats[7], repeats[8], repeats[9], audio, mode, duration, topk, topp, temperature, cfg_coef, seed, overlap, image, height, width, background, bar1, bar2], outputs=[output, seed_used]) - - def variable_outputs(k): - k = int(k) - 1 - return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k) - def get_size(image): - if image is not None: - img = Image.open(image) - img_height = img.height - img_width = img.width - if (img_height%2) != 0: - img_height = img_height + 1 - if (img_width%2) != 0: - img_width = img_width + 1 - return img_height, img_width - else: - return 512, 768 - - image.change(get_size, image, outputs=[height, width]) - s.change(variable_outputs, s, textboxes) - gr.Examples( - fn=predict_full, - examples=[ - [ - "An 80s driving pop song with heavy drums and synth pads in the background", - "./assets/bach.mp3", - "melody" - ], - [ - "A cheerful country song with acoustic guitars", - "./assets/bolero_ravel.mp3", - "melody" - ], - [ - "90s rock song with electric guitar and heavy drums", - None, - "medium" - ], - [ - "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions", - "./assets/bach.mp3", - "melody" - ], - [ - "lofi slow bpm electro chill with organic samples", - None, - "medium", - ], - ], - inputs=[text0, audio, model], - outputs=[output] - ) - - interface.queue().launch(**launch_kwargs) - - -def ui_batched(launch_kwargs): - with gr.Blocks() as demo: - gr.Markdown( - """ - # MusicGen - - This is the demo for [MusicGen](https://github.com/facebookresearch/audiocraft), a simple and controllable model for music generation - presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284). -
- - Duplicate Space - for longer sequences, more control and no queue.

- """ - ) - with gr.Row(): - with gr.Column(): - with gr.Row(): - text = gr.Text(label="Describe your music", lines=2, interactive=True) - melody = gr.Audio(source="upload", type="numpy", label="Condition on a melody (optional)", interactive=True) - with gr.Row(): - submit = gr.Button("Generate") - with gr.Column(): - output = gr.Video(label="Generated Music") - submit.click(predict_batched, inputs=[text, melody], outputs=[output], batch=True, max_batch_size=MAX_BATCH_SIZE) - gr.Examples( - fn=predict_batched, - examples=[ - [ - "An 80s driving pop song with heavy drums and synth pads in the background", - "./assets/bach.mp3", - ], - [ - "A cheerful country song with acoustic guitars", - "./assets/bolero_ravel.mp3", - ], - [ - "90s rock song with electric guitar and heavy drums", - None, - ], - [ - "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130", - "./assets/bach.mp3", - ], - [ - "lofi slow bpm electro chill with organic samples", - None, - ], - ], - inputs=[text, melody], - outputs=[output] - ) - - demo.queue(max_size=8 * 4).launch(**launch_kwargs) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - '--listen', - type=str, - default='0.0.0.0' if 'SPACE_ID' in os.environ else '127.0.0.1', - help='IP to listen on for connections to Gradio', - ) - parser.add_argument( - '--username', type=str, default='', help='Username for authentication' - ) - parser.add_argument( - '--password', type=str, default='', help='Password for authentication' - ) - parser.add_argument( - '--server_port', - type=int, - default=0, - help='Port to run the server listener on', - ) - parser.add_argument( - '--inbrowser', action='store_true', help='Open in browser' - ) - parser.add_argument( - '--share', action='store_true', help='Share the gradio UI' - ) - parser.add_argument( - '--unload_model', action='store_true', help='Unload the model after every generation to save GPU memory' - ) - - parser.add_argument( - '--unload_to_cpu', action='store_true', help='Move the model to main RAM after every generation to save GPU memory but reload faster than after full unload (see above)' - ) - - parser.add_argument( - '--cache', action='store_true', help='Cache models in RAM to quickly switch between them' - ) - - args = parser.parse_args() - UNLOAD_MODEL = args.unload_model - MOVE_TO_CPU = args.unload_to_cpu - if args.cache: - MODELS = {} - - launch_kwargs = {} - launch_kwargs['server_name'] = args.listen - - if args.username and args.password: - launch_kwargs['auth'] = (args.username, args.password) - if args.server_port: - launch_kwargs['server_port'] = args.server_port - if args.inbrowser: - launch_kwargs['inbrowser'] = args.inbrowser - if args.share: - launch_kwargs['share'] = args.share - - # Show the interface - if IS_BATCHED: - ui_batched(launch_kwargs) - else: - ui_full(launch_kwargs) diff --git a/spaces/Groq/mlagility/README.md b/spaces/Groq/mlagility/README.md deleted file mode 100644 index 90d06f91c9a88dba5ce17720d8372210031c8077..0000000000000000000000000000000000000000 --- a/spaces/Groq/mlagility/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Mlagility -emoji: 😻 -colorFrom: blue -colorTo: pink -sdk: streamlit -sdk_version: 1.24.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/HESOAYM/ElviraMulti/assets/custom.js b/spaces/HESOAYM/ElviraMulti/assets/custom.js deleted file mode 100644 index b8071034f3618c541e3f4169c7fc6d6593d56f44..0000000000000000000000000000000000000000 --- a/spaces/HESOAYM/ElviraMulti/assets/custom.js +++ /dev/null @@ -1,224 +0,0 @@ - -// custom javascript here - -const MAX_HISTORY_LENGTH = 32; - -var key_down_history = []; -var currentIndex = -1; -var user_input_ta; - -var gradioContainer = null; -var user_input_ta = null; -var user_input_tb = null; -var userInfoDiv = null; -var appTitleDiv = null; -var chatbot = null; -var apSwitch = null; - -var ga = document.getElementsByTagName("gradio-app"); -var targetNode = ga[0]; -var isInIframe = (window.self !== window.top); - -// gradio 页面加载好了么??? 我能动你的元素了么?? -function gradioLoaded(mutations) { - for (var i = 0; i < mutations.length; i++) { - if (mutations[i].addedNodes.length) { - gradioContainer = document.querySelector(".gradio-container"); - user_input_tb = document.getElementById('user_input_tb'); - userInfoDiv = document.getElementById("user_info"); - appTitleDiv = document.getElementById("app_title"); - chatbot = document.querySelector('#chuanhu_chatbot'); - apSwitch = document.querySelector('.apSwitch input[type="checkbox"]'); - - if (gradioContainer && apSwitch) { // gradioCainter 加载出来了没? - adjustDarkMode(); - } - if (user_input_tb) { // user_input_tb 加载出来了没? - selectHistory(); - } - if (userInfoDiv && appTitleDiv) { // userInfoDiv 和 appTitleDiv 加载出来了没? - setTimeout(showOrHideUserInfo(), 2000); - } - if (chatbot) { // chatbot 加载出来了没? - setChatbotHeight() - } - } - } -} - -function selectHistory() { - user_input_ta = user_input_tb.querySelector("textarea"); - if (user_input_ta) { - observer.disconnect(); // 停止监听 - // 在 textarea 上监听 keydown 事件 - user_input_ta.addEventListener("keydown", function (event) { - var value = user_input_ta.value.trim(); - // 判断按下的是否为方向键 - if (event.code === 'ArrowUp' || event.code === 'ArrowDown') { - // 如果按下的是方向键,且输入框中有内容,且历史记录中没有该内容,则不执行操作 - if (value && key_down_history.indexOf(value) === -1) - return; - // 对于需要响应的动作,阻止默认行为。 - event.preventDefault(); - var length = key_down_history.length; - if (length === 0) { - currentIndex = -1; // 如果历史记录为空,直接将当前选中的记录重置 - return; - } - if (currentIndex === -1) { - currentIndex = length; - } - if (event.code === 'ArrowUp' && currentIndex > 0) { - currentIndex--; - user_input_ta.value = key_down_history[currentIndex]; - } else if (event.code === 'ArrowDown' && currentIndex < length - 1) { - currentIndex++; - user_input_ta.value = key_down_history[currentIndex]; - } - user_input_ta.selectionStart = user_input_ta.value.length; - user_input_ta.selectionEnd = user_input_ta.value.length; - const input_event = new InputEvent("input", { bubbles: true, cancelable: true }); - user_input_ta.dispatchEvent(input_event); - } else if (event.code === "Enter") { - if (value) { - currentIndex = -1; - if (key_down_history.indexOf(value) === -1) { - key_down_history.push(value); - if (key_down_history.length > MAX_HISTORY_LENGTH) { - key_down_history.shift(); - } - } - } - } - }); - } -} - -function toggleUserInfoVisibility(shouldHide) { - if (userInfoDiv) { - if (shouldHide) { - userInfoDiv.classList.add("hideK"); - } else { - userInfoDiv.classList.remove("hideK"); - } - } -} -function showOrHideUserInfo() { - var sendBtn = document.getElementById("submit_btn"); - - // Bind mouse/touch events to show/hide user info - appTitleDiv.addEventListener("mouseenter", function () { - toggleUserInfoVisibility(false); - }); - userInfoDiv.addEventListener("mouseenter", function () { - toggleUserInfoVisibility(false); - }); - sendBtn.addEventListener("mouseenter", function () { - toggleUserInfoVisibility(false); - }); - - appTitleDiv.addEventListener("mouseleave", function () { - toggleUserInfoVisibility(true); - }); - userInfoDiv.addEventListener("mouseleave", function () { - toggleUserInfoVisibility(true); - }); - sendBtn.addEventListener("mouseleave", function () { - toggleUserInfoVisibility(true); - }); - - appTitleDiv.ontouchstart = function () { - toggleUserInfoVisibility(false); - }; - userInfoDiv.ontouchstart = function () { - toggleUserInfoVisibility(false); - }; - sendBtn.ontouchstart = function () { - toggleUserInfoVisibility(false); - }; - - appTitleDiv.ontouchend = function () { - setTimeout(function () { - toggleUserInfoVisibility(true); - }, 3000); - }; - userInfoDiv.ontouchend = function () { - setTimeout(function () { - toggleUserInfoVisibility(true); - }, 3000); - }; - sendBtn.ontouchend = function () { - setTimeout(function () { - toggleUserInfoVisibility(true); - }, 3000); // Delay 1 second to hide user info - }; - - // Hide user info after 2 second - setTimeout(function () { - toggleUserInfoVisibility(true); - }, 2000); -} - -function toggleDarkMode(isEnabled) { - if (isEnabled) { - gradioContainer.classList.add("dark"); - document.body.style.setProperty("background-color", "var(--neutral-950)", "important"); - } else { - gradioContainer.classList.remove("dark"); - document.body.style.backgroundColor = ""; - } -} -function adjustDarkMode() { - const darkModeQuery = window.matchMedia("(prefers-color-scheme: dark)"); - - // 根据当前颜色模式设置初始状态 - apSwitch.checked = darkModeQuery.matches; - toggleDarkMode(darkModeQuery.matches); - // 监听颜色模式变化 - darkModeQuery.addEventListener("change", (e) => { - apSwitch.checked = e.matches; - toggleDarkMode(e.matches); - }); - // apSwitch = document.querySelector('.apSwitch input[type="checkbox"]'); - apSwitch.addEventListener("change", (e) => { - toggleDarkMode(e.target.checked); - }); -} - -function setChatbotHeight() { - const screenWidth = window.innerWidth; - const statusDisplay = document.querySelector('#status_display'); - const statusDisplayHeight = statusDisplay ? statusDisplay.offsetHeight : 0; - const wrap = chatbot.querySelector('.wrap'); - const vh = window.innerHeight * 0.01; - document.documentElement.style.setProperty('--vh', `${vh}px`); - if (isInIframe) { - chatbot.style.height = `700px`; - wrap.style.maxHeight = `calc(700px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))` - } else { - if (screenWidth <= 320) { - chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px)`; - wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`; - } else if (screenWidth <= 499) { - chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px)`; - wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`; - } else { - chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px)`; - wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`; - } - } -} - -// 监视页面内部 DOM 变动 -var observer = new MutationObserver(function (mutations) { - gradioLoaded(mutations); -}); -observer.observe(targetNode, { childList: true, subtree: true }); - -// 监视页面变化 -window.addEventListener("DOMContentLoaded", function () { - isInIframe = (window.self !== window.top); -}); -window.addEventListener('resize', setChatbotHeight); -window.addEventListener('scroll', setChatbotHeight); -window.matchMedia("(prefers-color-scheme: dark)").addEventListener("change", adjustDarkMode); \ No newline at end of file diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/linearized_convolution.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/linearized_convolution.py deleted file mode 100644 index f7e156cb0c75cb375447859c8b6749311372c35e..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/linearized_convolution.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn.functional as F -from fairseq import utils -from fairseq.incremental_decoding_utils import with_incremental_state - -from .conv_tbc import ConvTBC - -from typing import Dict, Optional -from torch import Tensor - -@with_incremental_state -class LinearizedConvolution(ConvTBC): - """An optimized version of nn.Conv1d. - - At training time, this module uses ConvTBC, which is an optimized version - of Conv1d. At inference time, it optimizes incremental generation (i.e., - one time step at a time) by replacing the convolutions with linear layers. - Note that the input order changes from training to inference. - """ - - def __init__(self, in_channels, out_channels, kernel_size, **kwargs): - super().__init__(in_channels, out_channels, kernel_size, **kwargs) - self._linearized_weight = None - self.register_backward_hook(self._clear_linearized_weight) - - def state_dict(self, destination=None, prefix="", keep_vars=False): - state = ConvTBC.state_dict(self, destination, prefix, keep_vars=keep_vars) - # don't store redundant _linearized_weight in checkpoints - if prefix + "_linearized_weight" in state: - del state[prefix + "_linearized_weight"] - return state - - def upgrade_state_dict_named(self, state_dict, name): - prefix = name + "." if name != "" else "" - if prefix + "_linearized_weight" in state_dict: - del state_dict[prefix + "_linearized_weight"] - - @torch.jit.export - def forward(self, input, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None): - """ - Args: - incremental_state: Used to buffer signal; if not None, then input is - expected to contain a single frame. If the input order changes - between time steps, call reorder_incremental_state. - Input: - Time x Batch x Channel during training - Batch x Time x Channel during inference - """ - if incremental_state is None: - output = self.conv_tbc(input) - if self.kernel_size[0] > 1 and self.padding[0] > 0: - # remove future timesteps added by padding - output = output[: -self.padding[0], :, :] - return output - - # reshape weight - weight = self._get_linearized_weight() - kw = self.kernel_size[0] - - bsz = input.size(0) # input: bsz x len x dim - if kw > 1: - input = input.data - input_buffer = self._get_input_buffer(incremental_state) - if input_buffer is None: - input_buffer = input.new(bsz, kw, input.size(2)).zero_() - self._set_input_buffer(incremental_state, input_buffer) - else: - # shift buffer - input_buffer[:, :-1, :] = input_buffer[:, 1:, :].clone() - # append next input - input_buffer[:, -1, :] = input[:, -1, :] - input = input_buffer - with torch.no_grad(): - output = F.linear(input.view(bsz, -1), weight, self.bias) - return output.view(bsz, 1, -1) - - @torch.jit.unused - def reorder_incremental_state(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_order): - input_buffer = self._get_input_buffer(incremental_state) - if input_buffer is not None: - input_buffer = input_buffer.index_select(0, new_order) - self._set_input_buffer(incremental_state, input_buffer) - - @torch.jit.unused - def _get_input_buffer(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]): - return utils.get_incremental_state(self, incremental_state, "input_buffer") - - @torch.jit.unused - def _set_input_buffer(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_buffer): - return utils.set_incremental_state( - self, incremental_state, "input_buffer", new_buffer - ) - - @torch.jit.unused - def _get_linearized_weight(self): - if self._linearized_weight is None: - kw = self.kernel_size[0] - weight = self.weight.transpose(2, 1).transpose(1, 0).contiguous() - assert weight.size() == (self.out_channels, kw, self.in_channels) - return weight.view(self.out_channels, -1) - return self._linearized_weight - - @torch.jit.unused - def _clear_linearized_weight(self, *args): - self._linearized_weight = None diff --git a/spaces/HarryLee/eCommerceImageCaptioning/utils/cider/pyciderevalcap/ciderD/__init__.py b/spaces/HarryLee/eCommerceImageCaptioning/utils/cider/pyciderevalcap/ciderD/__init__.py deleted file mode 100644 index 3f7d85bba884ea8f83fc6ab2a1e6ade80d98d4d9..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/utils/cider/pyciderevalcap/ciderD/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__author__ = 'tylin' diff --git a/spaces/HarshulNanda/HARM_ML_App_ludwig/colors.py b/spaces/HarshulNanda/HARM_ML_App_ludwig/colors.py deleted file mode 100644 index e9cdea11a8c22dc95c4a2ec4ff46360a2009b4e4..0000000000000000000000000000000000000000 --- a/spaces/HarshulNanda/HARM_ML_App_ludwig/colors.py +++ /dev/null @@ -1,124 +0,0 @@ -class colorOf: - HEADER = '\033[95m' - OKBLUE = '\033[94m' - OKCYAN = '\033[96m' - OKGREEN = '\033[92m' - WARNING = '\033[93m' - FAIL = '\033[91m' - ENDC = '\033[0m' - BOLD = '\033[1m' - UNDERLINE = '\033[4m' - -dataset = { - "Coding" : [ - "Web Development", - "Data Science", - "Mobile Development", - "Programming Languages", - "Database and Design", - "Software Testing", - "Software Engineering", - "Development Tools", - "No-code Development", - "Basic Programming for kids", - "Coding Questions for TCS NQT, TCS Ninja, TCS Digital", - "Think Like a Coder", - ], - "Business" : [ - "Entrepreneurship", - "Communications", - "Management", - "Sales", - "Business Strategy", - "Project Management", - "Human Resources", - "Industry", - "Other Business", - ], - "Finanace and Accounting" : [ - "Accounting and Bookkeeping", - "Cryptocurrency and Blockchain", - "Economics", - "Finance", - "Finance Cert and Exam Prep", - "Financial Modelling and Analysis", - "Investing and Trading", - "Other Finance and Accounting", - ], - "IT and Software" : [ - "IT Certification", - "Network and Security", - "Hardware", - "Other IT and Software", - ], - "Office Productivity" : [ - "Google", - "Other Office Productivity", - ], - "Personal Development" : [ - "Memory and Study Skills", - "Personal Transformation", - "Personal Productivity", - "Career Development", - "Happiness", - "Personal Brand Building", - "Creativity", - "Influence", - "Self Esteem and Confidence", - "Other Personal Development", - "Set up your first blog on blogger", - ], - "Design" : [ - "Web Design", - "Graphics Desgin and Illustrations", - "Desgin Tools", - "User Experience Design", - "Game Design", - "Design Thinking", - "3D and Animation", - "Architectural Design", - "Other Design", - ], - "Marketing" : [ - "Digital Marketing", - "Social Media Marketing", - "Marketing Fundamentals", - "Growth Hacking", - ], - "Lifestyle" : [ - "Arts and Crafts", - "Travel", - ], - "Photography and Video" : [ - "Photography", - "Video Design", - "Other Photography and Video", - ], - "Health and Fitness" : [ - "Fitness", - "General Health", - "Sports", - "Mental Health", - "Meditation", - "Other Health and Fitness", - ], - "Music" : [ - "Vocal", - ], - "Teaching and Academics" : [ - "Engineering", - "Math", - "Science", - "Online Education", - "Social Science", - "Language", - "Teacher Training", - "Test Prep", - "Other Teaching and Academics", - "Pedagogy of Education", - ], - "Competitive Exams" : [ - "SSC CHSL", - "Other Competitive Exams", - ], -} \ No newline at end of file diff --git a/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/src/glow_tts/utils.py b/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/src/glow_tts/utils.py deleted file mode 100644 index a591aa319ccb264110111cda55c4a232b41aae74..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/src/glow_tts/utils.py +++ /dev/null @@ -1,282 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location="cpu") - iteration = 1 - if "iteration" in checkpoint_dict.keys(): - iteration = checkpoint_dict["iteration"] - if "learning_rate" in checkpoint_dict.keys(): - learning_rate = checkpoint_dict["learning_rate"] - if optimizer is not None and "optimizer" in checkpoint_dict.keys(): - optimizer.load_state_dict(checkpoint_dict["optimizer"]) - saved_state_dict = checkpoint_dict["model"] - if hasattr(model, "module"): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, "module"): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info( - "Loaded checkpoint '{}' (iteration {})".format(checkpoint_path, iteration) - ) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info( - "Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path - ) - ) - if hasattr(model, "module"): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save( - { - "model": state_dict, - "iteration": iteration, - "optimizer": optimizer.state_dict(), - "learning_rate": learning_rate, - }, - checkpoint_path, - ) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats="HWC") - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger("matplotlib") - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots() - im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none") - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger("matplotlib") - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment, aspect="auto", origin="lower", interpolation="none") - fig.colorbar(im, ax=ax) - xlabel = "Decoder timestep" - if info is not None: - xlabel += "\n\n" + info - plt.xlabel(xlabel) - plt.ylabel("Encoder timestep") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding="utf-8") as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument("-c", "--config", type=str, help="JSON file for configuration") - parser.add_argument("-m", "--model", type=str, help="Model name") - # parser.add_argument('-g', '--gan', type=str, - # help='Model name') - parser.add_argument("-l", "--logs", type=str, help="logs name") - # parser.add_argument('-s', '--mels', type=str, - # help='logs name') - - args = parser.parse_args() - # model_dir = os.path.join("./logs", args.model) - model_dir = args.model - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - - # if not config_path : config_path = config_save_path - - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - hparams.log_dir = args.logs - # hparams.mels_dir = args.mels - # hparams.gan_dir = args.gan - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn( - "{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - ) - ) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn( - "git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8] - ) - ) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams: - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/Haswanth/haswanthpalepu/README.md b/spaces/Haswanth/haswanthpalepu/README.md deleted file mode 100644 index 3bd8bd7b5e828d670cc1c8fdc228c2709b6ca61c..0000000000000000000000000000000000000000 --- a/spaces/Haswanth/haswanthpalepu/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Haswanthpalepu -emoji: 👀 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/HighCWu/GPEN/retinaface/data/__init__.py b/spaces/HighCWu/GPEN/retinaface/data/__init__.py deleted file mode 100644 index ea50ebaf88d64e75f4960bc99b14f138a343e575..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/GPEN/retinaface/data/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .wider_face import WiderFaceDetection, detection_collate -from .data_augment import * -from .config import * diff --git a/spaces/HugoHE/monitoringObjectDetection/README.md b/spaces/HugoHE/monitoringObjectDetection/README.md deleted file mode 100644 index 87d91274ff3ff5486d3b74e109db4513675a0242..0000000000000000000000000000000000000000 --- a/spaces/HugoHE/monitoringObjectDetection/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: MonitoringObjectDetection -emoji: 🐢 -colorFrom: purple -colorTo: purple -sdk: gradio -sdk_version: 3.28.2 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ICML2022/OFA/fairseq/examples/linformer/README.md b/spaces/ICML2022/OFA/fairseq/examples/linformer/README.md deleted file mode 100644 index f8b36bc691cb8f5bf82942e07b6d9c014387bdd8..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/linformer/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# Linformer: Self-Attention with Linear Complexity (Wang et al., 2020) - -This example contains code to train Linformer models as described in our paper -[Linformer: Self-Attention with Linear Complexity](https://arxiv.org/abs/2006.04768). - -## Training a new Linformer RoBERTa model - -You can mostly follow the [RoBERTa pretraining README](/examples/roberta/README.pretraining.md), -updating your training command with `--user-dir examples/linformer/linformer_src --arch linformer_roberta_base`. - -## Citation - -If you use our work, please cite: - -```bibtex -@article{wang2020linformer, - title={Linformer: Self-Attention with Linear Complexity}, - author={Wang, Sinong and Li, Belinda and Khabsa, Madian and Fang, Han and Ma, Hao}, - journal={arXiv preprint arXiv:2006.04768}, - year={2020} -} -``` diff --git a/spaces/ICML2022/OFA/fairseq/examples/pay_less_attention_paper/README.md b/spaces/ICML2022/OFA/fairseq/examples/pay_less_attention_paper/README.md deleted file mode 100644 index 5adab11f4dc3461f9e7126ac391b04e703616e6b..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/pay_less_attention_paper/README.md +++ /dev/null @@ -1,176 +0,0 @@ -# Pay Less Attention with Lightweight and Dynamic Convolutions (Wu et al., 2019) - -This page contains pointers to pre-trained models as well as instructions on how to train new models for [our paper](https://arxiv.org/abs/1901.10430). - -## Citation: -```bibtex -@inproceedings{wu2018pay, - title = {Pay Less Attention with Lightweight and Dynamic Convolutions}, - author = {Felix Wu and Angela Fan and Alexei Baevski and Yann Dauphin and Michael Auli}, - booktitle = {International Conference on Learning Representations}, - year = {2019}, - url = {https://arxiv.org/abs/1901.10430}, -} -``` - -## Translation - -### Pre-trained models -For some datasets we release models without GLUs which are faster at inference. - -Model | Description | Dataset | Download ----|---|---|--- -`lightconv.no_glu.iwslt14.de-en` | LightConv (without GLUs) | [IWSLT14 German-English](https://wit3.fbk.eu/archive/2014-01/texts/de/en/de-en.tgz) | model:
[download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/iwslt14.de-en.lightconv.tar.gz)
IWSLT14 test:
[download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/iwslt14.de-en.test.tar.bz2) -`dynamicconv.no_glu.iwslt14.de-en` | DynamicConv (without GLUs) | [IWSLT14 German-English](https://wit3.fbk.eu/archive/2014-01/texts/de/en/de-en.tgz) | model:
[download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/iwslt14.de-en.dynamicconv.tar.gz)
IWSLT14 test:
[download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/iwslt14.de-en.test.tar.bz2) -`lightconv.no_glu.wmt16.en-de` | LightConv (without GLUs) | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model:
[download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv.tar.gz)
newstest2014 (shared vocab):
[download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2) -`dynamicconv.no_glu.wmt16.en-de` | DynamicConv (without GLUs) | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model:
[download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv.tar.gz)
newstest2014 (shared vocab):
[download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2) -`lightconv.glu.wmt16.en-de` | LightConv | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model:
[download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv-glu.tar.gz)
newstest2014 (shared vocab):
[download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2) -`dynamicconv.glu.wmt16.en-de` | DynamicConv | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model:
[download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv-glu.tar.gz)
newstest2014 (shared vocab):
[download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2) -`lightconv.glu.wmt14.en-fr` | LightConv | [WMT14 English-French](http://statmt.org/wmt14/translation-task.html#Download) | model:
[download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt14.en-fr.joined-dict.lightconv-glu.tar.gz)
newstest2014:
[download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.en-fr.joined-dict.newstest2014.tar.bz2) -`dynamicconv.glu.wmt14.en-fr` | DynamicConv | [WMT14 English-French](http://statmt.org/wmt14/translation-task.html#Download) | model:
[download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt14.en-fr.joined-dict.dynamicconv-glu.tar.gz)
newstest2014:
[download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.en-fr.joined-dict.newstest2014.tar.bz2) -`lightconv.glu.wmt17.zh-en` | LightConv | [WMT17 Chinese-English](http://statmt.org/wmt17/translation-task.html#Download) | model:
[download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt17.zh-en.lightconv-glu.tar.gz)
newstest2017:
[download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt17.zh-en.newstest2017.tar.bz2) -`dynamicconv.glu.wmt17.zh-en` | DynamicConv | [WMT17 Chinese-English](http://statmt.org/wmt17/translation-task.html#Download) | model:
[download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt17.zh-en.dynamicconv-glu.tar.gz)
newstest2017:
[download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt17.zh-en.newstest2017.tar.bz2) - -### Memory-Efficient CUDA Kernels - -Since the PyTorch implementations of Light/Dynamic conv are quite memory intensive, we have developed CUDA kernels that implement the light and dynamic convolution operator in a memory-efficient and performant manner. For large sequence lengths, these kernels save about 50% memory compared to the PyTorch equivalent. - -To install the kernels, use the commands below. Once installed, they will automatically be used in place of the PyTorch implementations whenever a light or dynamic convolution is used. - -```sh -# to install lightconv -cd fairseq/modules/lightconv_layer -python cuda_function_gen.py -python setup.py install - -# to install dynamicconv -cd fairseq/modules/dynamicconv_layer -python cuda_function_gen.py -python setup.py install -``` - -### Example usage (torch.hub) - -We require a few additional Python dependencies for preprocessing: -```bash -pip install sacremoses subword_nmt -``` - -Interactive translation via PyTorch Hub: -```python -import torch - -# List available models -torch.hub.list('pytorch/fairseq') # [..., 'lightconv.glu.wmt17.zh-en', ... ] - -# Load a transformer trained on WMT'16 En-De -zh2en = torch.hub.load('pytorch/fairseq', 'lightconv.glu.wmt17.zh-en', tokenizer='moses', bpe='subword_nmt') - -# The underlying model is available under the *models* attribute -assert isinstance(zh2en.models[0], fairseq.models.lightconv.LightConvModel) - -# Translate a sentence -zh2en.translate('你好 世界') -# 'Hello World' -``` - -Loading custom models: -```python -from fairseq.models.lightconv import LightConvModel -en2fr = LightConvModel.from_pretrained( - '/path/to/checkpoints', - checkpoint_file='checkpoint_best.pt', - data_name_or_path='data-bin/wmt14_en_fr', - bpe='subword_nmt', - bpe_codes='data-bin/wmt14_en_fr/en.code' -) -en2fr.translate('Hello world!') -# 'Bonjour le monde' -``` - -### Preprocessing the training datasets - -Please follow the instructions in [`examples/translation/README.md`](../translation/README.md) to preprocess the data. - -### Training and evaluation options: -To use the model without GLU, please set `--encoder-glu 0 --decoder-glu 0`. -For LightConv, please use `--encoder-conv-type lightweight --decoder-conv-type lightweight`, otherwise the default is DynamicConv. -For best BLEU results, lenpen may need to be manually tuned. - -To use the CUDA kernels, first install the PyTorch modules using the commands -above. Once the CUDA modules are installed, they will automatically be used -instead of the PyTorch modules. - -### IWSLT14 De-En -Training and evaluating DynamicConv (without GLU) on a GPU: -```sh -# Training -SAVE="save/dynamic_conv_iwslt" -mkdir -p $SAVE -CUDA_VISIBLE_DEVICES=0 $(which fairseq-train) data-bin/iwslt14.tokenized.de-en \ - --clip-norm 0 --optimizer adam --lr 0.0005 \ - --source-lang de --target-lang en --max-tokens 4000 --no-progress-bar \ - --log-interval 100 --stop-min-lr '1e-09' --weight-decay 0.0001 \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \ - --lr-scheduler inverse_sqrt \ - --ddp-backend=legacy_ddp \ - --max-update 50000 --warmup-updates 4000 --warmup-init-lr '1e-07' \ - --adam-betas '(0.9, 0.98)' --keep-last-epochs 10 \ - -a lightconv_iwslt_de_en --save-dir $SAVE \ - --dropout 0.3 --attention-dropout 0.1 --weight-dropout 0.1 \ - --encoder-glu 0 --decoder-glu 0 -python scripts/average_checkpoints.py --inputs $SAVE \ - --num-epoch-checkpoints 10 --output "${SAVE}/checkpoint_last10_avg.pt" - -# Evaluation -CUDA_VISIBLE_DEVICES=0 fairseq-generate data-bin/iwslt14.tokenized.de-en --path "${SAVE}/checkpoint_last10_avg.pt" --batch-size 128 --beam 4 --remove-bpe --lenpen 1 --gen-subset test --quiet -``` - -### WMT16 En-De -Training and evaluating DynamicConv (with GLU) on WMT16 En-De using cosine scheduler on one machine with 8 V100 GPUs: -```sh -# Training -SAVE="save/dynamic_conv_wmt16en2de" -mkdir -p $SAVE -python -m torch.distributed.launch --nproc_per_node 8 $(which fairseq-train) \ - data-bin/wmt16_en_de_bpe32k --fp16 --log-interval 100 --no-progress-bar \ - --max-update 30000 --share-all-embeddings --optimizer adam \ - --adam-betas '(0.9, 0.98)' --clip-norm 0.0 --weight-decay 0.0 \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \ - --stop-min-lr 1e-09 --update-freq 16 --attention-dropout 0.1 --keep-last-epochs 10 \ - --ddp-backend=legacy_ddp --max-tokens 3584 \ - --lr-scheduler cosine --warmup-init-lr 1e-7 --warmup-updates 10000 \ - --lr-shrink 1 --lr 0.001 --min-lr 1e-7 --warmup-init-lr 1e-07 \ - --t-mult 1 --lr-period-updates 20000 \ - --arch lightconv_wmt_en_de_big --save-dir $SAVE \ - --dropout 0.3 --attention-dropout 0.1 --weight-dropout 0.1 \ - --encoder-glu 1 --decoder-glu 1 - -# Evaluation -CUDA_VISIBLE_DEVICES=0 fairseq-generate data-bin/wmt16.en-de.joined-dict.newstest2014 --path "${SAVE}/checkpoint_best.pt" --batch-size 128 --beam 5 --remove-bpe --lenpen 0.5 --gen-subset test > wmt16_gen.txt -bash scripts/compound_split_bleu.sh wmt16_gen.txt -``` - -### WMT14 En-Fr -Training DynamicConv (with GLU) on WMT14 En-Fr using cosine scheduler on one machine with 8 V100 GPUs: -```sh -# Training -SAVE="save/dynamic_conv_wmt14en2fr" -mkdir -p $SAVE -python -m torch.distributed.launch --nproc_per_node 8 $(which fairseq-train) \ - data-bin/wmt14_en_fr --fp16 --log-interval 100 --no-progress-bar \ - --max-update 30000 --share-all-embeddings --optimizer adam \ - --adam-betas '(0.9, 0.98)' --clip-norm 0.0 --weight-decay 0.0 \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \ - --stop-min-lr 1e-09 --update-freq 16 --attention-dropout 0.1 --keep-last-epochs 10 \ - --ddp-backend=legacy_ddp --max-tokens 3584 \ - --lr-scheduler cosine --warmup-init-lr 1e-7 --warmup-updates 10000 \ - --lr-shrink 1 --lr 0.001 --min-lr 1e-7 --warmup-init-lr 1e-07 \ - --t-mult 1 --lr-period-updates 70000 \ - --arch lightconv_wmt_en_fr_big --save-dir $SAVE \ - --dropout 0.1 --attention-dropout 0.1 --weight-dropout 0.1 \ - --encoder-glu 1 --decoder-glu 1 - -# Evaluation -CUDA_VISIBLE_DEVICES=0 fairseq-generate data-bin/wmt14.en-fr.joined-dict.newstest2014 --path "${SAVE}/checkpoint_best.pt" --batch-size 128 --beam 5 --remove-bpe --lenpen 0.9 --gen-subset test -``` diff --git a/spaces/Ignahugging/Image_filtering/app.py b/spaces/Ignahugging/Image_filtering/app.py deleted file mode 100644 index 503be109417c9a1df5f227e809c308594983e6fb..0000000000000000000000000000000000000000 --- a/spaces/Ignahugging/Image_filtering/app.py +++ /dev/null @@ -1,22 +0,0 @@ -import cv2 -import numpy as np -import gradio as gr - -def imagen(image): - image= cv2.cvtColor(image,cv2.COLOR_BGR2RGB) - grayImage = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) - grayImage = cv2.GaussianBlur(grayImage, (3, 3), 0) - edgeImage = cv2.Laplacian(grayImage, -1, ksize=5) - edgeImage = 255 - edgeImage - ret, edgeImage = cv2.threshold(edgeImage, 150, 255, cv2.THRESH_BINARY) - edgePreservingImage = cv2.edgePreservingFilter(image, flags=2, sigma_s=50, sigma_r=0.4) - output = np.zeros(grayImage.shape) - output = cv2.bitwise_and(edgePreservingImage, edgePreservingImage, mask=edgeImage) - candidate = cv2.cvtColor(output,cv2.COLOR_RGB2BGR) - return candidate - -interface = gr.Interface(imagen, - inputs = gr.inputs.Image(shape=(1024,1024)), - outputs = "image", - title="Gamify your photo") -interface.launch(inline=False) \ No newline at end of file diff --git a/spaces/Ikaros521/VITS-fast-fine-tuning_nymph/mel_processing.py b/spaces/Ikaros521/VITS-fast-fine-tuning_nymph/mel_processing.py deleted file mode 100644 index 238336afd9ae893eb37e8eef2bc108b1786d9e12..0000000000000000000000000000000000000000 --- a/spaces/Ikaros521/VITS-fast-fine-tuning_nymph/mel_processing.py +++ /dev/null @@ -1,112 +0,0 @@ -import math -import os -import random -import torch -from torch import nn -import torch.nn.functional as F -import torch.utils.data -import numpy as np -import librosa -import librosa.util as librosa_util -from librosa.util import normalize, pad_center, tiny -from scipy.signal import get_window -from scipy.io.wavfile import read -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y.float(), n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/Jamkonams/AutoGPT/autogpt/commands/times.py b/spaces/Jamkonams/AutoGPT/autogpt/commands/times.py deleted file mode 100644 index 3c9b8a4fc67a251c9e81a8c4a725cd1e25fcbebe..0000000000000000000000000000000000000000 --- a/spaces/Jamkonams/AutoGPT/autogpt/commands/times.py +++ /dev/null @@ -1,10 +0,0 @@ -from datetime import datetime - - -def get_datetime() -> str: - """Return the current date and time - - Returns: - str: The current date and time - """ - return "Current date and time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S") diff --git a/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/utils/lmdb_util.py b/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/utils/lmdb_util.py deleted file mode 100644 index e0a10f60ffca2e36ac5f5564aafd70e79d06a723..0000000000000000000000000000000000000000 --- a/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/utils/lmdb_util.py +++ /dev/null @@ -1,196 +0,0 @@ -import cv2 -import lmdb -import sys -from multiprocessing import Pool -from os import path as osp -from tqdm import tqdm - - -def make_lmdb_from_imgs(data_path, - lmdb_path, - img_path_list, - keys, - batch=5000, - compress_level=1, - multiprocessing_read=False, - n_thread=40, - map_size=None): - """Make lmdb from images. - - Contents of lmdb. The file structure is: - example.lmdb - ├── data.mdb - ├── lock.mdb - ├── meta_info.txt - - The data.mdb and lock.mdb are standard lmdb files and you can refer to - https://lmdb.readthedocs.io/en/release/ for more details. - - The meta_info.txt is a specified txt file to record the meta information - of our datasets. It will be automatically created when preparing - datasets by our provided dataset tools. - Each line in the txt file records 1)image name (with extension), - 2)image shape, and 3)compression level, separated by a white space. - - For example, the meta information could be: - `000_00000000.png (720,1280,3) 1`, which means: - 1) image name (with extension): 000_00000000.png; - 2) image shape: (720,1280,3); - 3) compression level: 1 - - We use the image name without extension as the lmdb key. - - If `multiprocessing_read` is True, it will read all the images to memory - using multiprocessing. Thus, your server needs to have enough memory. - - Args: - data_path (str): Data path for reading images. - lmdb_path (str): Lmdb save path. - img_path_list (str): Image path list. - keys (str): Used for lmdb keys. - batch (int): After processing batch images, lmdb commits. - Default: 5000. - compress_level (int): Compress level when encoding images. Default: 1. - multiprocessing_read (bool): Whether use multiprocessing to read all - the images to memory. Default: False. - n_thread (int): For multiprocessing. - map_size (int | None): Map size for lmdb env. If None, use the - estimated size from images. Default: None - """ - - assert len(img_path_list) == len(keys), ('img_path_list and keys should have the same length, ' - f'but got {len(img_path_list)} and {len(keys)}') - print(f'Create lmdb for {data_path}, save to {lmdb_path}...') - print(f'Totoal images: {len(img_path_list)}') - if not lmdb_path.endswith('.lmdb'): - raise ValueError("lmdb_path must end with '.lmdb'.") - if osp.exists(lmdb_path): - print(f'Folder {lmdb_path} already exists. Exit.') - sys.exit(1) - - if multiprocessing_read: - # read all the images to memory (multiprocessing) - dataset = {} # use dict to keep the order for multiprocessing - shapes = {} - print(f'Read images with multiprocessing, #thread: {n_thread} ...') - pbar = tqdm(total=len(img_path_list), unit='image') - - def callback(arg): - """get the image data and update pbar.""" - key, dataset[key], shapes[key] = arg - pbar.update(1) - pbar.set_description(f'Read {key}') - - pool = Pool(n_thread) - for path, key in zip(img_path_list, keys): - pool.apply_async(read_img_worker, args=(osp.join(data_path, path), key, compress_level), callback=callback) - pool.close() - pool.join() - pbar.close() - print(f'Finish reading {len(img_path_list)} images.') - - # create lmdb environment - if map_size is None: - # obtain data size for one image - img = cv2.imread(osp.join(data_path, img_path_list[0]), cv2.IMREAD_UNCHANGED) - _, img_byte = cv2.imencode('.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level]) - data_size_per_img = img_byte.nbytes - print('Data size per image is: ', data_size_per_img) - data_size = data_size_per_img * len(img_path_list) - map_size = data_size * 10 - - env = lmdb.open(lmdb_path, map_size=map_size) - - # write data to lmdb - pbar = tqdm(total=len(img_path_list), unit='chunk') - txn = env.begin(write=True) - txt_file = open(osp.join(lmdb_path, 'meta_info.txt'), 'w') - for idx, (path, key) in enumerate(zip(img_path_list, keys)): - pbar.update(1) - pbar.set_description(f'Write {key}') - key_byte = key.encode('ascii') - if multiprocessing_read: - img_byte = dataset[key] - h, w, c = shapes[key] - else: - _, img_byte, img_shape = read_img_worker(osp.join(data_path, path), key, compress_level) - h, w, c = img_shape - - txn.put(key_byte, img_byte) - # write meta information - txt_file.write(f'{key}.png ({h},{w},{c}) {compress_level}\n') - if idx % batch == 0: - txn.commit() - txn = env.begin(write=True) - pbar.close() - txn.commit() - env.close() - txt_file.close() - print('\nFinish writing lmdb.') - - -def read_img_worker(path, key, compress_level): - """Read image worker. - - Args: - path (str): Image path. - key (str): Image key. - compress_level (int): Compress level when encoding images. - - Returns: - str: Image key. - byte: Image byte. - tuple[int]: Image shape. - """ - - img = cv2.imread(path, cv2.IMREAD_UNCHANGED) - if img.ndim == 2: - h, w = img.shape - c = 1 - else: - h, w, c = img.shape - _, img_byte = cv2.imencode('.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level]) - return (key, img_byte, (h, w, c)) - - -class LmdbMaker(): - """LMDB Maker. - - Args: - lmdb_path (str): Lmdb save path. - map_size (int): Map size for lmdb env. Default: 1024 ** 4, 1TB. - batch (int): After processing batch images, lmdb commits. - Default: 5000. - compress_level (int): Compress level when encoding images. Default: 1. - """ - - def __init__(self, lmdb_path, map_size=1024**4, batch=5000, compress_level=1): - if not lmdb_path.endswith('.lmdb'): - raise ValueError("lmdb_path must end with '.lmdb'.") - if osp.exists(lmdb_path): - print(f'Folder {lmdb_path} already exists. Exit.') - sys.exit(1) - - self.lmdb_path = lmdb_path - self.batch = batch - self.compress_level = compress_level - self.env = lmdb.open(lmdb_path, map_size=map_size) - self.txn = self.env.begin(write=True) - self.txt_file = open(osp.join(lmdb_path, 'meta_info.txt'), 'w') - self.counter = 0 - - def put(self, img_byte, key, img_shape): - self.counter += 1 - key_byte = key.encode('ascii') - self.txn.put(key_byte, img_byte) - # write meta information - h, w, c = img_shape - self.txt_file.write(f'{key}.png ({h},{w},{c}) {self.compress_level}\n') - if self.counter % self.batch == 0: - self.txn.commit() - self.txn = self.env.begin(write=True) - - def close(self): - self.txn.commit() - self.env.close() - self.txt_file.close() diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-2.0/app.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-2.0/app.py deleted file mode 100644 index 7fdcf98f2146603c0ba4f921752e48c85d9ccbae..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-2.0/app.py +++ /dev/null @@ -1,241 +0,0 @@ -from turtle import title -import gradio as gr - -import git -import os -os.system('git clone https://github.com/Edresson/Coqui-TTS -b multilingual-torchaudio-SE TTS') -os.system('pip install -q -e TTS/') -os.system('pip install -q torchaudio==0.9.0') - -import sys -TTS_PATH = "TTS/" - -# add libraries into environment -sys.path.append(TTS_PATH) # set this if TTS is not installed globally - -import os -import string -import time -import argparse -import json - -import numpy as np -import IPython -from IPython.display import Audio - - -import torch - -from TTS.tts.utils.synthesis import synthesis -from TTS.tts.utils.text.symbols import make_symbols, phonemes, symbols -try: - from TTS.utils.audio import AudioProcessor -except: - from TTS.utils.audio import AudioProcessor - - -from TTS.tts.models import setup_model -from TTS.config import load_config -from TTS.tts.models.vits import * - -os.system('pip install voicefixer --upgrade') -from voicefixer import VoiceFixer -voicefixer = VoiceFixer() - -import openai - -import torchaudio -from speechbrain.pretrained import SpectralMaskEnhancement - -enhance_model = SpectralMaskEnhancement.from_hparams( -source="speechbrain/metricgan-plus-voicebank", -savedir="pretrained_models/metricgan-plus-voicebank", -run_opts={"device":"cuda"}, -) - -mes1 = [ - {"role": "system", "content": "You are a TOEFL examiner. Help me improve my oral Englsih and give me feedback."} -] - -mes2 = [ - {"role": "system", "content": "You are a mental health therapist. Your name is Tina."} -] - -mes3 = [ - {"role": "system", "content": "You are my personal assistant. Your name is Alice."} -] - -OUT_PATH = 'out/' - -# create output path -os.makedirs(OUT_PATH, exist_ok=True) - -# model vars -MODEL_PATH = '/home/user/app/best_model_latest.pth.tar' -CONFIG_PATH = '/home/user/app/config.json' -TTS_LANGUAGES = "/home/user/app/language_ids.json" -TTS_SPEAKERS = "/home/user/app/speakers.json" -USE_CUDA = torch.cuda.is_available() - -# load the config -C = load_config(CONFIG_PATH) - - -# load the audio processor -ap = AudioProcessor(**C.audio) - -speaker_embedding = None - -C.model_args['d_vector_file'] = TTS_SPEAKERS -C.model_args['use_speaker_encoder_as_loss'] = False - -model = setup_model(C) -model.language_manager.set_language_ids_from_file(TTS_LANGUAGES) -# print(model.language_manager.num_languages, model.embedded_language_dim) -# print(model.emb_l) -cp = torch.load(MODEL_PATH, map_location=torch.device('cpu')) -# remove speaker encoder -model_weights = cp['model'].copy() -for key in list(model_weights.keys()): - if "speaker_encoder" in key: - del model_weights[key] - -model.load_state_dict(model_weights) - - -model.eval() - -if USE_CUDA: - model = model.cuda() - -# synthesize voice -use_griffin_lim = False - -os.system('pip install -q pydub ffmpeg-normalize') - -CONFIG_SE_PATH = "config_se.json" -CHECKPOINT_SE_PATH = "SE_checkpoint.pth.tar" - -from TTS.tts.utils.speakers import SpeakerManager -from pydub import AudioSegment -import librosa - -SE_speaker_manager = SpeakerManager(encoder_model_path=CHECKPOINT_SE_PATH, encoder_config_path=CONFIG_SE_PATH, use_cuda=USE_CUDA) - -def compute_spec(ref_file): - y, sr = librosa.load(ref_file, sr=ap.sample_rate) - spec = ap.spectrogram(y) - spec = torch.FloatTensor(spec).unsqueeze(0) - return spec - - - -def greet(apikey, Voicetoclone, VoiceMicrophone, Texts, choice1): - - openai.api_key = apikey - - if choice1 == "TOEFL": - messages = mes1 - elif choice1 == "Therapist": - messages = mes2 - elif choice1 == "Alice": - messages = mes3 - - # chatgpt - content = Texts - messages.append({"role": "user", "content": content}) - - completion = openai.ChatCompletion.create( - model = "gpt-3.5-turbo", - messages = messages - ) - - chat_response = completion.choices[0].message.content - - messages.append({"role": "assistant", "content": chat_response}) - - text= "%s" % (chat_response) - if Voicetoclone is not None: - reference_files= "%s" % (Voicetoclone) - print("path url") - print(Voicetoclone) - sample= str(Voicetoclone) - else: - reference_files= "%s" % (VoiceMicrophone) - print("path url") - print(VoiceMicrophone) - sample= str(VoiceMicrophone) - size= len(reference_files)*sys.getsizeof(reference_files) - size2= size / 1000000 - if (size2 > 0.012) or len(text)>2000: - message="File is greater than 30mb or Text inserted is longer than 2000 characters. Please re-try with smaller sizes." - print(message) - raise SystemExit("File is greater than 30mb. Please re-try or Text inserted is longer than 2000 characters. Please re-try with smaller sizes.") - else: - os.system('ffmpeg-normalize $sample -nt rms -t=-27 -o $sample -ar 16000 -f') - reference_emb = SE_speaker_manager.compute_d_vector_from_clip(reference_files) - model.length_scale = 1 # scaler for the duration predictor. The larger it is, the slower the speech. - model.inference_noise_scale = 0.3 # defines the noise variance applied to the random z vector at inference. - model.inference_noise_scale_dp = 0.3 # defines the noise variance applied to the duration predictor z vector at inference. - text = text - model.language_manager.language_id_mapping - language_id = 0 - - print(" > text: {}".format(text)) - wav, alignment, _, _ = synthesis( - model, - text, - C, - "cuda" in str(next(model.parameters()).device), - ap, - speaker_id=None, - d_vector=reference_emb, - style_wav=None, - language_id=language_id, - enable_eos_bos_chars=C.enable_eos_bos_chars, - use_griffin_lim=True, - do_trim_silence=False, - ).values() - print("Generated Audio") - IPython.display.display(Audio(wav, rate=ap.sample_rate)) - #file_name = text.replace(" ", "_") - #file_name = file_name.translate(str.maketrans('', '', string.punctuation.replace('_', ''))) + '.wav' - file_name="Audio.wav" - out_path = os.path.join(OUT_PATH, file_name) - print(" > Saving output to {}".format(out_path)) - ap.save_wav(wav, out_path) - - voicefixer.restore(input=out_path, # input wav file path - output="audio1.wav", # output wav file path - cuda=True, # whether to use gpu acceleration - mode = 0) # You can try out mode 0, 1 to find out the best result - - noisy = enhance_model.load_audio( - "audio1.wav" - ).unsqueeze(0) - - enhanced = enhance_model.enhance_batch(noisy, lengths=torch.tensor([1.])) - torchaudio.save("enhanced.wav", enhanced.cpu(), 16000) - - return [result.text, chat_response, "enhanced.wav"] - -output_1 = gr.Textbox(label="Speech to Text") -output_2 = gr.Textbox(label="ChatGPT Output") -output_3 = gr.Audio(label="Audio with Custom Voice") - -gr.Interface( - title = '🥳💬💕 - TalktoAI,随时随地,谈天说地!', - theme="huggingface", - description = "🤖 - 让有人文关怀的AI造福每一个人!AI向善,文明璀璨!TalktoAI - Enable the future!", - fn=greet, - inputs=[ - gr.Textbox(lines=1, label = "请填写您的OpenAI-API-key"), - gr.Audio(source="upload", label = "请上传您喜欢的声音(wav文件)", type="filepath"), - gr.Audio(source="microphone", streaming = True, label = "请用语音上传您喜欢的声音,语音和文件上传二选一即可", type="filepath"), - gr.Textbox(lines=3, label = "请开始对话吧!"), - gr.Radio(["TOEFL", "Therapist", "Alice"], label="TOEFL Examiner, Therapist Tina, or Assistant Alice?"), - ], - outputs=[ - output_1, output_2, output_3 - ], - ).launch() \ No newline at end of file diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg_extractor/encoder/positionwise_feed_forward.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg_extractor/encoder/positionwise_feed_forward.py deleted file mode 100644 index 7a9237a38314e3f758f064ab78d8983b94a9eb0a..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg_extractor/encoder/positionwise_feed_forward.py +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -# Copyright 2019 Shigeki Karita -# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) - -"""Positionwise feed forward layer definition.""" - -import torch - - -class PositionwiseFeedForward(torch.nn.Module): - """Positionwise feed forward layer. - - :param int idim: input dimenstion - :param int hidden_units: number of hidden units - :param float dropout_rate: dropout rate - - """ - - def __init__(self, idim, hidden_units, dropout_rate, activation=torch.nn.ReLU()): - """Construct an PositionwiseFeedForward object.""" - super(PositionwiseFeedForward, self).__init__() - self.w_1 = torch.nn.Linear(idim, hidden_units) - self.w_2 = torch.nn.Linear(hidden_units, idim) - self.dropout = torch.nn.Dropout(dropout_rate) - self.activation = activation - - def forward(self, x): - """Forward funciton.""" - return self.w_2(self.dropout(self.activation(self.w_1(x)))) diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/synthesizer/models/sublayer/lsa.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/synthesizer/models/sublayer/lsa.py deleted file mode 100644 index cf2dfa52d629793b11a2460be10d17a726ab5303..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/synthesizer/models/sublayer/lsa.py +++ /dev/null @@ -1,42 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -class LSA(nn.Module): - def __init__(self, attn_dim, kernel_size=31, filters=32): - super().__init__() - self.conv = nn.Conv1d(1, filters, padding=(kernel_size - 1) // 2, kernel_size=kernel_size, bias=True) - self.L = nn.Linear(filters, attn_dim, bias=False) - self.W = nn.Linear(attn_dim, attn_dim, bias=True) # Include the attention bias in this term - self.v = nn.Linear(attn_dim, 1, bias=False) - self.cumulative = None - self.attention = None - - def init_attention(self, encoder_seq_proj): - device = encoder_seq_proj.device # use same device as parameters - b, t, c = encoder_seq_proj.size() - self.cumulative = torch.zeros(b, t, device=device) - self.attention = torch.zeros(b, t, device=device) - - def forward(self, encoder_seq_proj, query, times, chars): - - if times == 0: self.init_attention(encoder_seq_proj) - - processed_query = self.W(query).unsqueeze(1) - - location = self.cumulative.unsqueeze(1) - processed_loc = self.L(self.conv(location).transpose(1, 2)) - - u = self.v(torch.tanh(processed_query + encoder_seq_proj + processed_loc)) - u = u.squeeze(-1) - - # Mask zero padding chars - u = u * (chars != 0).float() - - # Smooth Attention - # scores = torch.sigmoid(u) / torch.sigmoid(u).sum(dim=1, keepdim=True) - scores = F.softmax(u, dim=1) - self.attention = scores - self.cumulative = self.cumulative + self.attention - - return scores.unsqueeze(-1).transpose(1, 2) diff --git a/spaces/KyanChen/RSPrompter/mmdet/evaluation/metrics/coco_metric.py b/spaces/KyanChen/RSPrompter/mmdet/evaluation/metrics/coco_metric.py deleted file mode 100644 index f77d6516bfa32bdf616ab0a01dfe331220a814e1..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/evaluation/metrics/coco_metric.py +++ /dev/null @@ -1,590 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import datetime -import itertools -import os.path as osp -import tempfile -from collections import OrderedDict -from typing import Dict, List, Optional, Sequence, Union - -import numpy as np -import torch -from mmengine.evaluator import BaseMetric -from mmengine.fileio import dump, get_local_path, load -from mmengine.logging import MMLogger -from terminaltables import AsciiTable - -from mmdet.datasets.api_wrappers import COCO, COCOeval -from mmdet.registry import METRICS -from mmdet.structures.mask import encode_mask_results -from ..functional import eval_recalls - - -@METRICS.register_module() -class CocoMetric(BaseMetric): - """COCO evaluation metric. - - Evaluate AR, AP, and mAP for detection tasks including proposal/box - detection and instance segmentation. Please refer to - https://cocodataset.org/#detection-eval for more details. - - Args: - ann_file (str, optional): Path to the coco format annotation file. - If not specified, ground truth annotations from the dataset will - be converted to coco format. Defaults to None. - metric (str | List[str]): Metrics to be evaluated. Valid metrics - include 'bbox', 'segm', 'proposal', and 'proposal_fast'. - Defaults to 'bbox'. - classwise (bool): Whether to evaluate the metric class-wise. - Defaults to False. - proposal_nums (Sequence[int]): Numbers of proposals to be evaluated. - Defaults to (100, 300, 1000). - iou_thrs (float | List[float], optional): IoU threshold to compute AP - and AR. If not specified, IoUs from 0.5 to 0.95 will be used. - Defaults to None. - metric_items (List[str], optional): Metric result names to be - recorded in the evaluation result. Defaults to None. - format_only (bool): Format the output results without perform - evaluation. It is useful when you want to format the result - to a specific format and submit it to the test server. - Defaults to False. - outfile_prefix (str, optional): The prefix of json files. It includes - the file path and the prefix of filename, e.g., "a/b/prefix". - If not specified, a temp file will be created. Defaults to None. - file_client_args (dict, optional): Arguments to instantiate the - corresponding backend in mmdet <= 3.0.0rc6. Defaults to None. - backend_args (dict, optional): Arguments to instantiate the - corresponding backend. Defaults to None. - collect_device (str): Device name used for collecting results from - different ranks during distributed training. Must be 'cpu' or - 'gpu'. Defaults to 'cpu'. - prefix (str, optional): The prefix that will be added in the metric - names to disambiguate homonymous metrics of different evaluators. - If prefix is not provided in the argument, self.default_prefix - will be used instead. Defaults to None. - sort_categories (bool): Whether sort categories in annotations. Only - used for `Objects365V1Dataset`. Defaults to False. - """ - default_prefix: Optional[str] = 'coco' - - def __init__(self, - ann_file: Optional[str] = None, - metric: Union[str, List[str]] = 'bbox', - classwise: bool = False, - proposal_nums: Sequence[int] = (100, 300, 1000), - iou_thrs: Optional[Union[float, Sequence[float]]] = None, - metric_items: Optional[Sequence[str]] = None, - format_only: bool = False, - outfile_prefix: Optional[str] = None, - file_client_args: dict = None, - backend_args: dict = None, - collect_device: str = 'cpu', - prefix: Optional[str] = None, - sort_categories: bool = False) -> None: - super().__init__(collect_device=collect_device, prefix=prefix) - # coco evaluation metrics - self.metrics = metric if isinstance(metric, list) else [metric] - allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] - for metric in self.metrics: - if metric not in allowed_metrics: - raise KeyError( - "metric should be one of 'bbox', 'segm', 'proposal', " - f"'proposal_fast', but got {metric}.") - - # do class wise evaluation, default False - self.classwise = classwise - - # proposal_nums used to compute recall or precision. - self.proposal_nums = list(proposal_nums) - - # iou_thrs used to compute recall or precision. - if iou_thrs is None: - iou_thrs = np.linspace( - .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) - self.iou_thrs = iou_thrs - self.metric_items = metric_items - self.format_only = format_only - if self.format_only: - assert outfile_prefix is not None, 'outfile_prefix must be not' - 'None when format_only is True, otherwise the result files will' - 'be saved to a temp directory which will be cleaned up at the end.' - - self.outfile_prefix = outfile_prefix - - self.backend_args = backend_args - if file_client_args is not None: - raise RuntimeError( - 'The `file_client_args` is deprecated, ' - 'please use `backend_args` instead, please refer to' - 'https://github.com/open-mmlab/mmdetection/blob/main/configs/_base_/datasets/coco_detection.py' # noqa: E501 - ) - - # if ann_file is not specified, - # initialize coco api with the converted dataset - if ann_file is not None: - with get_local_path( - ann_file, backend_args=self.backend_args) as local_path: - self._coco_api = COCO(local_path) - if sort_categories: - # 'categories' list in objects365_train.json and - # objects365_val.json is inconsistent, need sort - # list(or dict) before get cat_ids. - cats = self._coco_api.cats - sorted_cats = {i: cats[i] for i in sorted(cats)} - self._coco_api.cats = sorted_cats - categories = self._coco_api.dataset['categories'] - sorted_categories = sorted( - categories, key=lambda i: i['id']) - self._coco_api.dataset['categories'] = sorted_categories - else: - self._coco_api = None - - # handle dataset lazy init - self.cat_ids = None - self.img_ids = None - - def fast_eval_recall(self, - results: List[dict], - proposal_nums: Sequence[int], - iou_thrs: Sequence[float], - logger: Optional[MMLogger] = None) -> np.ndarray: - """Evaluate proposal recall with COCO's fast_eval_recall. - - Args: - results (List[dict]): Results of the dataset. - proposal_nums (Sequence[int]): Proposal numbers used for - evaluation. - iou_thrs (Sequence[float]): IoU thresholds used for evaluation. - logger (MMLogger, optional): Logger used for logging the recall - summary. - Returns: - np.ndarray: Averaged recall results. - """ - gt_bboxes = [] - pred_bboxes = [result['bboxes'] for result in results] - for i in range(len(self.img_ids)): - ann_ids = self._coco_api.get_ann_ids(img_ids=self.img_ids[i]) - ann_info = self._coco_api.load_anns(ann_ids) - if len(ann_info) == 0: - gt_bboxes.append(np.zeros((0, 4))) - continue - bboxes = [] - for ann in ann_info: - if ann.get('ignore', False) or ann['iscrowd']: - continue - x1, y1, w, h = ann['bbox'] - bboxes.append([x1, y1, x1 + w, y1 + h]) - bboxes = np.array(bboxes, dtype=np.float32) - if bboxes.shape[0] == 0: - bboxes = np.zeros((0, 4)) - gt_bboxes.append(bboxes) - - recalls = eval_recalls( - gt_bboxes, pred_bboxes, proposal_nums, iou_thrs, logger=logger) - ar = recalls.mean(axis=1) - return ar - - def xyxy2xywh(self, bbox: np.ndarray) -> list: - """Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO - evaluation. - - Args: - bbox (numpy.ndarray): The bounding boxes, shape (4, ), in - ``xyxy`` order. - - Returns: - list[float]: The converted bounding boxes, in ``xywh`` order. - """ - - _bbox: List = bbox.tolist() - return [ - _bbox[0], - _bbox[1], - _bbox[2] - _bbox[0], - _bbox[3] - _bbox[1], - ] - - def results2json(self, results: Sequence[dict], - outfile_prefix: str) -> dict: - """Dump the detection results to a COCO style json file. - - There are 3 types of results: proposals, bbox predictions, mask - predictions, and they have different data types. This method will - automatically recognize the type, and dump them to json files. - - Args: - results (Sequence[dict]): Testing results of the - dataset. - outfile_prefix (str): The filename prefix of the json files. If the - prefix is "somepath/xxx", the json files will be named - "somepath/xxx.bbox.json", "somepath/xxx.segm.json", - "somepath/xxx.proposal.json". - - Returns: - dict: Possible keys are "bbox", "segm", "proposal", and - values are corresponding filenames. - """ - bbox_json_results = [] - segm_json_results = [] if 'masks' in results[0] else None - for idx, result in enumerate(results): - image_id = result.get('img_id', idx) - labels = result['labels'] - bboxes = result['bboxes'] - scores = result['scores'] - # bbox results - for i, label in enumerate(labels): - data = dict() - data['image_id'] = image_id - data['bbox'] = self.xyxy2xywh(bboxes[i]) - data['score'] = float(scores[i]) - data['category_id'] = self.cat_ids[label] - bbox_json_results.append(data) - - if segm_json_results is None: - continue - - # segm results - masks = result['masks'] - mask_scores = result.get('mask_scores', scores) - for i, label in enumerate(labels): - data = dict() - data['image_id'] = image_id - data['bbox'] = self.xyxy2xywh(bboxes[i]) - data['score'] = float(mask_scores[i]) - data['category_id'] = self.cat_ids[label] - if isinstance(masks[i]['counts'], bytes): - masks[i]['counts'] = masks[i]['counts'].decode() - data['segmentation'] = masks[i] - segm_json_results.append(data) - - result_files = dict() - result_files['bbox'] = f'{outfile_prefix}.bbox.json' - result_files['proposal'] = f'{outfile_prefix}.bbox.json' - dump(bbox_json_results, result_files['bbox']) - - if segm_json_results is not None: - result_files['segm'] = f'{outfile_prefix}.segm.json' - dump(segm_json_results, result_files['segm']) - - return result_files - - def gt_to_coco_json(self, gt_dicts: Sequence[dict], - outfile_prefix: str) -> str: - """Convert ground truth to coco format json file. - - Args: - gt_dicts (Sequence[dict]): Ground truth of the dataset. - outfile_prefix (str): The filename prefix of the json files. If the - prefix is "somepath/xxx", the json file will be named - "somepath/xxx.gt.json". - Returns: - str: The filename of the json file. - """ - categories = [ - dict(id=id, name=name) - for id, name in enumerate(self.dataset_meta['classes']) - ] - image_infos = [] - annotations = [] - - for idx, gt_dict in enumerate(gt_dicts): - img_id = gt_dict.get('img_id', idx) - image_info = dict( - id=img_id, - width=gt_dict['width'], - height=gt_dict['height'], - file_name='') - image_infos.append(image_info) - for ann in gt_dict['anns']: - label = ann['bbox_label'] - bbox = ann['bbox'] - coco_bbox = [ - bbox[0], - bbox[1], - bbox[2] - bbox[0], - bbox[3] - bbox[1], - ] - - annotation = dict( - id=len(annotations) + - 1, # coco api requires id starts with 1 - image_id=img_id, - bbox=coco_bbox, - iscrowd=ann.get('ignore_flag', 0), - category_id=int(label), - area=coco_bbox[2] * coco_bbox[3]) - if ann.get('mask', None): - mask = ann['mask'] - # area = mask_util.area(mask) - if isinstance(mask, dict) and isinstance( - mask['counts'], bytes): - mask['counts'] = mask['counts'].decode() - annotation['segmentation'] = mask - # annotation['area'] = float(area) - annotations.append(annotation) - - info = dict( - date_created=str(datetime.datetime.now()), - description='Coco json file converted by mmdet CocoMetric.') - coco_json = dict( - info=info, - images=image_infos, - categories=categories, - licenses=None, - ) - if len(annotations) > 0: - coco_json['annotations'] = annotations - converted_json_path = f'{outfile_prefix}.gt.json' - dump(coco_json, converted_json_path) - return converted_json_path - - # TODO: data_batch is no longer needed, consider adjusting the - # parameter position - def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None: - """Process one batch of data samples and predictions. The processed - results should be stored in ``self.results``, which will be used to - compute the metrics when all batches have been processed. - - Args: - data_batch (dict): A batch of data from the dataloader. - data_samples (Sequence[dict]): A batch of data samples that - contain annotations and predictions. - """ - for data_sample in data_samples: - result = dict() - pred = data_sample['pred_instances'] - result['img_id'] = data_sample['img_id'] - result['bboxes'] = pred['bboxes'].cpu().numpy() - result['scores'] = pred['scores'].cpu().numpy() - result['labels'] = pred['labels'].cpu().numpy() - # encode mask to RLE - if 'masks' in pred: - result['masks'] = encode_mask_results( - pred['masks'].detach().cpu().numpy()) if isinstance( - pred['masks'], torch.Tensor) else pred['masks'] - # some detectors use different scores for bbox and mask - if 'mask_scores' in pred: - result['mask_scores'] = pred['mask_scores'].cpu().numpy() - - # parse gt - gt = dict() - gt['width'] = data_sample['ori_shape'][1] - gt['height'] = data_sample['ori_shape'][0] - gt['img_id'] = data_sample['img_id'] - if self._coco_api is None: - # TODO: Need to refactor to support LoadAnnotations - assert 'instances' in data_sample, \ - 'ground truth is required for evaluation when ' \ - '`ann_file` is not provided' - gt['anns'] = data_sample['instances'] - # add converted result to the results list - self.results.append((gt, result)) - - def compute_metrics(self, results: list) -> Dict[str, float]: - """Compute the metrics from processed results. - - Args: - results (list): The processed results of each batch. - - Returns: - Dict[str, float]: The computed metrics. The keys are the names of - the metrics, and the values are corresponding results. - """ - logger: MMLogger = MMLogger.get_current_instance() - - # split gt and prediction list - gts, preds = zip(*results) - - tmp_dir = None - if self.outfile_prefix is None: - tmp_dir = tempfile.TemporaryDirectory() - outfile_prefix = osp.join(tmp_dir.name, 'results') - else: - outfile_prefix = self.outfile_prefix - - if self._coco_api is None: - # use converted gt json file to initialize coco api - logger.info('Converting ground truth to coco format...') - coco_json_path = self.gt_to_coco_json( - gt_dicts=gts, outfile_prefix=outfile_prefix) - self._coco_api = COCO(coco_json_path) - - # handle lazy init - if self.cat_ids is None: - self.cat_ids = self._coco_api.get_cat_ids( - cat_names=self.dataset_meta['classes']) - if self.img_ids is None: - self.img_ids = self._coco_api.get_img_ids() - - # convert predictions to coco format and dump to json file - result_files = self.results2json(preds, outfile_prefix) - - eval_results = OrderedDict() - if self.format_only: - logger.info('results are saved in ' - f'{osp.dirname(outfile_prefix)}') - return eval_results - - for metric in self.metrics: - logger.info(f'Evaluating {metric}...') - - # TODO: May refactor fast_eval_recall to an independent metric? - # fast eval recall - if metric == 'proposal_fast': - ar = self.fast_eval_recall( - preds, self.proposal_nums, self.iou_thrs, logger=logger) - log_msg = [] - for i, num in enumerate(self.proposal_nums): - eval_results[f'AR@{num}'] = ar[i] - log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}') - log_msg = ''.join(log_msg) - logger.info(log_msg) - continue - - # evaluate proposal, bbox and segm - iou_type = 'bbox' if metric == 'proposal' else metric - if metric not in result_files: - raise KeyError(f'{metric} is not in results') - try: - predictions = load(result_files[metric]) - if iou_type == 'segm': - # Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331 # noqa - # When evaluating mask AP, if the results contain bbox, - # cocoapi will use the box area instead of the mask area - # for calculating the instance area. Though the overall AP - # is not affected, this leads to different - # small/medium/large mask AP results. - for x in predictions: - x.pop('bbox') - coco_dt = self._coco_api.loadRes(predictions) - - except IndexError: - logger.error( - 'The testing results of the whole dataset is empty.') - break - - coco_eval = COCOeval(self._coco_api, coco_dt, iou_type) - - coco_eval.params.catIds = self.cat_ids - coco_eval.params.imgIds = self.img_ids - coco_eval.params.maxDets = list(self.proposal_nums) - coco_eval.params.iouThrs = self.iou_thrs - - # mapping of cocoEval.stats - coco_metric_names = { - 'mAP': 0, - 'mAP_50': 1, - 'mAP_75': 2, - 'mAP_s': 3, - 'mAP_m': 4, - 'mAP_l': 5, - 'AR@100': 6, - 'AR@300': 7, - 'AR@1000': 8, - 'AR_s@1000': 9, - 'AR_m@1000': 10, - 'AR_l@1000': 11 - } - metric_items = self.metric_items - if metric_items is not None: - for metric_item in metric_items: - if metric_item not in coco_metric_names: - raise KeyError( - f'metric item "{metric_item}" is not supported') - - if metric == 'proposal': - coco_eval.params.useCats = 0 - coco_eval.evaluate() - coco_eval.accumulate() - coco_eval.summarize() - if metric_items is None: - metric_items = [ - 'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', - 'AR_m@1000', 'AR_l@1000' - ] - - for item in metric_items: - val = float( - f'{coco_eval.stats[coco_metric_names[item]]:.3f}') - eval_results[item] = val - else: - coco_eval.evaluate() - coco_eval.accumulate() - coco_eval.summarize() - if self.classwise: # Compute per-category AP - # Compute per-category AP - # from https://github.com/facebookresearch/detectron2/ - precisions = coco_eval.eval['precision'] - # precision: (iou, recall, cls, area range, max dets) - assert len(self.cat_ids) == precisions.shape[2] - - results_per_category = [] - for idx, cat_id in enumerate(self.cat_ids): - t = [] - # area range index 0: all area ranges - # max dets index -1: typically 100 per image - nm = self._coco_api.loadCats(cat_id)[0] - precision = precisions[:, :, idx, 0, -1] - precision = precision[precision > -1] - if precision.size: - ap = np.mean(precision) - else: - ap = float('nan') - t.append(f'{nm["name"]}') - t.append(f'{round(ap, 3)}') - eval_results[f'{nm["name"]}_precision'] = round(ap, 3) - - # indexes of IoU @50 and @75 - for iou in [0, 5]: - precision = precisions[iou, :, idx, 0, -1] - precision = precision[precision > -1] - if precision.size: - ap = np.mean(precision) - else: - ap = float('nan') - t.append(f'{round(ap, 3)}') - - # indexes of area of small, median and large - for area in [1, 2, 3]: - precision = precisions[:, :, idx, area, -1] - precision = precision[precision > -1] - if precision.size: - ap = np.mean(precision) - else: - ap = float('nan') - t.append(f'{round(ap, 3)}') - results_per_category.append(tuple(t)) - - num_columns = len(results_per_category[0]) - results_flatten = list( - itertools.chain(*results_per_category)) - headers = [ - 'category', 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', - 'mAP_m', 'mAP_l' - ] - results_2d = itertools.zip_longest(*[ - results_flatten[i::num_columns] - for i in range(num_columns) - ]) - table_data = [headers] - table_data += [result for result in results_2d] - table = AsciiTable(table_data) - logger.info('\n' + table.table) - - if metric_items is None: - metric_items = [ - 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l' - ] - - for metric_item in metric_items: - key = f'{metric}_{metric_item}' - val = coco_eval.stats[coco_metric_names[metric_item]] - eval_results[key] = float(f'{round(val, 3)}') - - ap = coco_eval.stats[:6] - logger.info(f'{metric}_mAP_copypaste: {ap[0]:.3f} ' - f'{ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} ' - f'{ap[4]:.3f} {ap[5]:.3f}') - - if tmp_dir is not None: - tmp_dir.cleanup() - return eval_results diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/task_modules/samplers/pseudo_sampler.py b/spaces/KyanChen/RSPrompter/mmdet/models/task_modules/samplers/pseudo_sampler.py deleted file mode 100644 index a8186cc3364516f34abe1c293017db6e2042d92a..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/task_modules/samplers/pseudo_sampler.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from mmengine.structures import InstanceData - -from mmdet.registry import TASK_UTILS -from ..assigners import AssignResult -from .base_sampler import BaseSampler -from .sampling_result import SamplingResult - - -@TASK_UTILS.register_module() -class PseudoSampler(BaseSampler): - """A pseudo sampler that does not do sampling actually.""" - - def __init__(self, **kwargs): - pass - - def _sample_pos(self, **kwargs): - """Sample positive samples.""" - raise NotImplementedError - - def _sample_neg(self, **kwargs): - """Sample negative samples.""" - raise NotImplementedError - - def sample(self, assign_result: AssignResult, pred_instances: InstanceData, - gt_instances: InstanceData, *args, **kwargs): - """Directly returns the positive and negative indices of samples. - - Args: - assign_result (:obj:`AssignResult`): Bbox assigning results. - pred_instances (:obj:`InstanceData`): Instances of model - predictions. It includes ``priors``, and the priors can - be anchors, points, or bboxes predicted by the model, - shape(n, 4). - gt_instances (:obj:`InstanceData`): Ground truth of instance - annotations. It usually includes ``bboxes`` and ``labels`` - attributes. - - Returns: - :obj:`SamplingResult`: sampler results - """ - gt_bboxes = gt_instances.bboxes - priors = pred_instances.priors - - pos_inds = torch.nonzero( - assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique() - neg_inds = torch.nonzero( - assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique() - - gt_flags = priors.new_zeros(priors.shape[0], dtype=torch.uint8) - sampling_result = SamplingResult( - pos_inds=pos_inds, - neg_inds=neg_inds, - priors=priors, - gt_bboxes=gt_bboxes, - assign_result=assign_result, - gt_flags=gt_flags, - avg_factor_with_neg=False) - return sampling_result diff --git a/spaces/Laronix/Laronix_ASR_TTS_VC/local/streaming_VAD.py b/spaces/Laronix/Laronix_ASR_TTS_VC/local/streaming_VAD.py deleted file mode 100644 index 4876eb8cde513aae3bc58f6d0b2bf2f7d3cd6de3..0000000000000000000000000000000000000000 --- a/spaces/Laronix/Laronix_ASR_TTS_VC/local/streaming_VAD.py +++ /dev/null @@ -1,74 +0,0 @@ -import pyaudio -import numpy as np -import webrtcvad - -# Set up PyAudio -FORMAT = pyaudio.paInt16 -CHANNELS = 1 -RATE = 48000 -CHUNK_SIZE = 960 # 20ms audio chunks -# p = pyaudio.PyAudio() - -# wav = "/home/kevingeng/Disk2/laronix/Laronix_ASR_TTS_VC/wav/20221228_video_good_normed_5/take1_001_norm.wav" -wav = "/home/kevingeng/Disk2/laronix/Laronix_ASR_TTS_VC/wav/VAD_test.wav" -import wave -wf = wave.open(wav, "rb") -# import pdb -# stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), -# channels=wf.getnchannels(), -# rate=wf.getframerate(), -# output=True) -# pdb.set_trace() -# Set up VAD - -def streaming_VAD(wf): - vad = webrtcvad.Vad() - vad.set_mode(2) # Aggressive mode - - # Start audio stream - # stream = p.open(format=FORMAT, - # channels=CHANNELS, - # rate=RATE, - # input=True, - # frames_per_buffer=CHUNK_SIZE) - - # VAD constants - MIN_SILENCE_DURATION = 2000 # in ms - MAX_SILENCE_DURATION = 4000 # in ms - BUFFER_SIZE = MAX_SILENCE_DURATION // CHUNK_SIZE - BUFFER_THRESHOLD = int(BUFFER_SIZE * 0.5) - - # Initialize VAD buffer - vad_buffer = [] - VAD_indicator = [] - VAD_frame_indicator = [] - data = wf.readframes(CHUNK_SIZE) - # Loop through audio stream - while data: - # Read audio chunk from stream - # pdb.set_trace() - # audio_chunk = np.frombuffer(stream.read(CHUNK_SIZE), dtype=np.int16) - audio_chunk = np.frombuffer(data, dtype=np.int16) - # Detect voice activity - # is_speech = vad.is_speech(audio_chunk.tobytes(), RATE) - try: - is_speech = vad.is_speech(audio_chunk, RATE) - except: - is_speech = False - vad_buffer.append(is_speech) - - # If VAD buffer is full, check for silence and reset buffer - if len(vad_buffer) == BUFFER_SIZE: - # Check if buffer contains mostly silence - if vad_buffer.count(False) >= BUFFER_THRESHOLD: - # print("Slience") - # VAD_indicator.append(0) - # vad_buffer = [] - return(False) - else: - # print("Voice detected!") - # VAD_indicator.append(1) - vad_buffer = vad_buffer[CHUNK_SIZE // BUFFER_SIZE:] - return(True) - data = wf.readframes(CHUNK_SIZE) - diff --git a/spaces/LightSY/W2L-TD/facelib/parsing/bisenet.py b/spaces/LightSY/W2L-TD/facelib/parsing/bisenet.py deleted file mode 100644 index 3898cab76ae5876459cd4899c54cafa14234971d..0000000000000000000000000000000000000000 --- a/spaces/LightSY/W2L-TD/facelib/parsing/bisenet.py +++ /dev/null @@ -1,140 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -from .resnet import ResNet18 - - -class ConvBNReLU(nn.Module): - - def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1): - super(ConvBNReLU, self).__init__() - self.conv = nn.Conv2d(in_chan, out_chan, kernel_size=ks, stride=stride, padding=padding, bias=False) - self.bn = nn.BatchNorm2d(out_chan) - - def forward(self, x): - x = self.conv(x) - x = F.relu(self.bn(x)) - return x - - -class BiSeNetOutput(nn.Module): - - def __init__(self, in_chan, mid_chan, num_class): - super(BiSeNetOutput, self).__init__() - self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1) - self.conv_out = nn.Conv2d(mid_chan, num_class, kernel_size=1, bias=False) - - def forward(self, x): - feat = self.conv(x) - out = self.conv_out(feat) - return out, feat - - -class AttentionRefinementModule(nn.Module): - - def __init__(self, in_chan, out_chan): - super(AttentionRefinementModule, self).__init__() - self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1) - self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size=1, bias=False) - self.bn_atten = nn.BatchNorm2d(out_chan) - self.sigmoid_atten = nn.Sigmoid() - - def forward(self, x): - feat = self.conv(x) - atten = F.avg_pool2d(feat, feat.size()[2:]) - atten = self.conv_atten(atten) - atten = self.bn_atten(atten) - atten = self.sigmoid_atten(atten) - out = torch.mul(feat, atten) - return out - - -class ContextPath(nn.Module): - - def __init__(self): - super(ContextPath, self).__init__() - self.resnet = ResNet18() - self.arm16 = AttentionRefinementModule(256, 128) - self.arm32 = AttentionRefinementModule(512, 128) - self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) - self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) - self.conv_avg = ConvBNReLU(512, 128, ks=1, stride=1, padding=0) - - def forward(self, x): - feat8, feat16, feat32 = self.resnet(x) - h8, w8 = feat8.size()[2:] - h16, w16 = feat16.size()[2:] - h32, w32 = feat32.size()[2:] - - avg = F.avg_pool2d(feat32, feat32.size()[2:]) - avg = self.conv_avg(avg) - avg_up = F.interpolate(avg, (h32, w32), mode='nearest') - - feat32_arm = self.arm32(feat32) - feat32_sum = feat32_arm + avg_up - feat32_up = F.interpolate(feat32_sum, (h16, w16), mode='nearest') - feat32_up = self.conv_head32(feat32_up) - - feat16_arm = self.arm16(feat16) - feat16_sum = feat16_arm + feat32_up - feat16_up = F.interpolate(feat16_sum, (h8, w8), mode='nearest') - feat16_up = self.conv_head16(feat16_up) - - return feat8, feat16_up, feat32_up # x8, x8, x16 - - -class FeatureFusionModule(nn.Module): - - def __init__(self, in_chan, out_chan): - super(FeatureFusionModule, self).__init__() - self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0) - self.conv1 = nn.Conv2d(out_chan, out_chan // 4, kernel_size=1, stride=1, padding=0, bias=False) - self.conv2 = nn.Conv2d(out_chan // 4, out_chan, kernel_size=1, stride=1, padding=0, bias=False) - self.relu = nn.ReLU(inplace=True) - self.sigmoid = nn.Sigmoid() - - def forward(self, fsp, fcp): - fcat = torch.cat([fsp, fcp], dim=1) - feat = self.convblk(fcat) - atten = F.avg_pool2d(feat, feat.size()[2:]) - atten = self.conv1(atten) - atten = self.relu(atten) - atten = self.conv2(atten) - atten = self.sigmoid(atten) - feat_atten = torch.mul(feat, atten) - feat_out = feat_atten + feat - return feat_out - - -class BiSeNet(nn.Module): - - def __init__(self, num_class): - super(BiSeNet, self).__init__() - self.cp = ContextPath() - self.ffm = FeatureFusionModule(256, 256) - self.conv_out = BiSeNetOutput(256, 256, num_class) - self.conv_out16 = BiSeNetOutput(128, 64, num_class) - self.conv_out32 = BiSeNetOutput(128, 64, num_class) - - def forward(self, x, return_feat=False): - h, w = x.size()[2:] - feat_res8, feat_cp8, feat_cp16 = self.cp(x) # return res3b1 feature - feat_sp = feat_res8 # replace spatial path feature with res3b1 feature - feat_fuse = self.ffm(feat_sp, feat_cp8) - - out, feat = self.conv_out(feat_fuse) - out16, feat16 = self.conv_out16(feat_cp8) - out32, feat32 = self.conv_out32(feat_cp16) - - out = F.interpolate(out, (h, w), mode='bilinear', align_corners=True) - out16 = F.interpolate(out16, (h, w), mode='bilinear', align_corners=True) - out32 = F.interpolate(out32, (h, w), mode='bilinear', align_corners=True) - - if return_feat: - feat = F.interpolate(feat, (h, w), mode='bilinear', align_corners=True) - feat16 = F.interpolate(feat16, (h, w), mode='bilinear', align_corners=True) - feat32 = F.interpolate(feat32, (h, w), mode='bilinear', align_corners=True) - return out, out16, out32, feat, feat16, feat32 - else: - return out, out16, out32 diff --git a/spaces/LittleYuan/My-Real-Bot/realesrgan/data/realesrgan_paired_dataset.py b/spaces/LittleYuan/My-Real-Bot/realesrgan/data/realesrgan_paired_dataset.py deleted file mode 100644 index 386c8d72496245dae8df033c2ebbd76b41ff45f1..0000000000000000000000000000000000000000 --- a/spaces/LittleYuan/My-Real-Bot/realesrgan/data/realesrgan_paired_dataset.py +++ /dev/null @@ -1,108 +0,0 @@ -import os -from basicsr.data.data_util import paired_paths_from_folder, paired_paths_from_lmdb -from basicsr.data.transforms import augment, paired_random_crop -from basicsr.utils import FileClient, imfrombytes, img2tensor -from basicsr.utils.registry import DATASET_REGISTRY -from torch.utils import data as data -from torchvision.transforms.functional import normalize - - -@DATASET_REGISTRY.register() -class RealESRGANPairedDataset(data.Dataset): - """Paired image dataset for image restoration. - - Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, noisy, etc) and GT image pairs. - - There are three modes: - 1. 'lmdb': Use lmdb files. - If opt['io_backend'] == lmdb. - 2. 'meta_info': Use meta information file to generate paths. - If opt['io_backend'] != lmdb and opt['meta_info'] is not None. - 3. 'folder': Scan folders to generate paths. - The rest. - - Args: - opt (dict): Config for train datasets. It contains the following keys: - dataroot_gt (str): Data root path for gt. - dataroot_lq (str): Data root path for lq. - meta_info (str): Path for meta information file. - io_backend (dict): IO backend type and other kwarg. - filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. - Default: '{}'. - gt_size (int): Cropped patched size for gt patches. - use_hflip (bool): Use horizontal flips. - use_rot (bool): Use rotation (use vertical flip and transposing h - and w for implementation). - - scale (bool): Scale, which will be added automatically. - phase (str): 'train' or 'val'. - """ - - def __init__(self, opt): - super(RealESRGANPairedDataset, self).__init__() - self.opt = opt - self.file_client = None - self.io_backend_opt = opt['io_backend'] - # mean and std for normalizing the input images - self.mean = opt['mean'] if 'mean' in opt else None - self.std = opt['std'] if 'std' in opt else None - - self.gt_folder, self.lq_folder = opt['dataroot_gt'], opt['dataroot_lq'] - self.filename_tmpl = opt['filename_tmpl'] if 'filename_tmpl' in opt else '{}' - - # file client (lmdb io backend) - if self.io_backend_opt['type'] == 'lmdb': - self.io_backend_opt['db_paths'] = [self.lq_folder, self.gt_folder] - self.io_backend_opt['client_keys'] = ['lq', 'gt'] - self.paths = paired_paths_from_lmdb([self.lq_folder, self.gt_folder], ['lq', 'gt']) - elif 'meta_info' in self.opt and self.opt['meta_info'] is not None: - # disk backend with meta_info - # Each line in the meta_info describes the relative path to an image - with open(self.opt['meta_info']) as fin: - paths = [line.strip() for line in fin] - self.paths = [] - for path in paths: - gt_path, lq_path = path.split(', ') - gt_path = os.path.join(self.gt_folder, gt_path) - lq_path = os.path.join(self.lq_folder, lq_path) - self.paths.append(dict([('gt_path', gt_path), ('lq_path', lq_path)])) - else: - # disk backend - # it will scan the whole folder to get meta info - # it will be time-consuming for folders with too many files. It is recommended using an extra meta txt file - self.paths = paired_paths_from_folder([self.lq_folder, self.gt_folder], ['lq', 'gt'], self.filename_tmpl) - - def __getitem__(self, index): - if self.file_client is None: - self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) - - scale = self.opt['scale'] - - # Load gt and lq images. Dimension order: HWC; channel order: BGR; - # image range: [0, 1], float32. - gt_path = self.paths[index]['gt_path'] - img_bytes = self.file_client.get(gt_path, 'gt') - img_gt = imfrombytes(img_bytes, float32=True) - lq_path = self.paths[index]['lq_path'] - img_bytes = self.file_client.get(lq_path, 'lq') - img_lq = imfrombytes(img_bytes, float32=True) - - # augmentation for training - if self.opt['phase'] == 'train': - gt_size = self.opt['gt_size'] - # random crop - img_gt, img_lq = paired_random_crop(img_gt, img_lq, gt_size, scale, gt_path) - # flip, rotation - img_gt, img_lq = augment([img_gt, img_lq], self.opt['use_hflip'], self.opt['use_rot']) - - # BGR to RGB, HWC to CHW, numpy to tensor - img_gt, img_lq = img2tensor([img_gt, img_lq], bgr2rgb=True, float32=True) - # normalize - if self.mean is not None or self.std is not None: - normalize(img_lq, self.mean, self.std, inplace=True) - normalize(img_gt, self.mean, self.std, inplace=True) - - return {'lq': img_lq, 'gt': img_gt, 'lq_path': lq_path, 'gt_path': gt_path} - - def __len__(self): - return len(self.paths) diff --git a/spaces/LucasCodeBreak/MusicGen/tests/modules/test_seanet.py b/spaces/LucasCodeBreak/MusicGen/tests/modules/test_seanet.py deleted file mode 100644 index e5c51b340a2f94fb2828b14daf83d5fad645073d..0000000000000000000000000000000000000000 --- a/spaces/LucasCodeBreak/MusicGen/tests/modules/test_seanet.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from itertools import product - -import pytest -import torch - -from audiocraft.modules.seanet import SEANetEncoder, SEANetDecoder, SEANetResnetBlock -from audiocraft.modules import StreamableConv1d, StreamableConvTranspose1d - - -class TestSEANetModel: - - def test_base(self): - encoder = SEANetEncoder() - decoder = SEANetDecoder() - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_causal(self): - encoder = SEANetEncoder(causal=True) - decoder = SEANetDecoder(causal=True) - x = torch.randn(1, 1, 24000) - - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_conv_skip_connection(self): - encoder = SEANetEncoder(true_skip=False) - decoder = SEANetDecoder(true_skip=False) - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_seanet_encoder_decoder_final_act(self): - encoder = SEANetEncoder(true_skip=False) - decoder = SEANetDecoder(true_skip=False, final_activation='Tanh') - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def _check_encoder_blocks_norm(self, encoder: SEANetEncoder, n_disable_blocks: int, norm: str): - n_blocks = 0 - for layer in encoder.model: - if isinstance(layer, StreamableConv1d): - n_blocks += 1 - assert layer.conv.norm_type == 'none' if n_blocks <= n_disable_blocks else norm - elif isinstance(layer, SEANetResnetBlock): - for resnet_layer in layer.block: - if isinstance(resnet_layer, StreamableConv1d): - # here we add + 1 to n_blocks as we increment n_blocks just after the block - assert resnet_layer.conv.norm_type == 'none' if (n_blocks + 1) <= n_disable_blocks else norm - - def test_encoder_disable_norm(self): - n_residuals = [0, 1, 3] - disable_blocks = [0, 1, 2, 3, 4, 5, 6] - norms = ['weight_norm', 'none'] - for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms): - encoder = SEANetEncoder(n_residual_layers=n_res, norm=norm, - disable_norm_outer_blocks=disable_blocks) - self._check_encoder_blocks_norm(encoder, disable_blocks, norm) - - def _check_decoder_blocks_norm(self, decoder: SEANetDecoder, n_disable_blocks: int, norm: str): - n_blocks = 0 - for layer in decoder.model: - if isinstance(layer, StreamableConv1d): - n_blocks += 1 - assert layer.conv.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - elif isinstance(layer, StreamableConvTranspose1d): - n_blocks += 1 - assert layer.convtr.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - elif isinstance(layer, SEANetResnetBlock): - for resnet_layer in layer.block: - if isinstance(resnet_layer, StreamableConv1d): - assert resnet_layer.conv.norm_type == 'none' \ - if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - - def test_decoder_disable_norm(self): - n_residuals = [0, 1, 3] - disable_blocks = [0, 1, 2, 3, 4, 5, 6] - norms = ['weight_norm', 'none'] - for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms): - decoder = SEANetDecoder(n_residual_layers=n_res, norm=norm, - disable_norm_outer_blocks=disable_blocks) - self._check_decoder_blocks_norm(decoder, disable_blocks, norm) - - def test_disable_norm_raises_exception(self): - # Invalid disable_norm_outer_blocks values raise exceptions - with pytest.raises(AssertionError): - SEANetEncoder(disable_norm_outer_blocks=-1) - - with pytest.raises(AssertionError): - SEANetEncoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7) - - with pytest.raises(AssertionError): - SEANetDecoder(disable_norm_outer_blocks=-1) - - with pytest.raises(AssertionError): - SEANetDecoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7) diff --git a/spaces/MCkernick/Image_Restoration_Colorization/Global/detection_models/sync_batchnorm/replicate.py b/spaces/MCkernick/Image_Restoration_Colorization/Global/detection_models/sync_batchnorm/replicate.py deleted file mode 100644 index b71c7b8ed51a1d6c55b1f753bdd8d90bad79bd06..0000000000000000000000000000000000000000 --- a/spaces/MCkernick/Image_Restoration_Colorization/Global/detection_models/sync_batchnorm/replicate.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# File : replicate.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import functools - -from torch.nn.parallel.data_parallel import DataParallel - -__all__ = [ - 'CallbackContext', - 'execute_replication_callbacks', - 'DataParallelWithCallback', - 'patch_replication_callback' -] - - -class CallbackContext(object): - pass - - -def execute_replication_callbacks(modules): - """ - Execute an replication callback `__data_parallel_replicate__` on each module created by original replication. - - The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` - - Note that, as all modules are isomorphism, we assign each sub-module with a context - (shared among multiple copies of this module on different devices). - Through this context, different copies can share some information. - - We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback - of any slave copies. - """ - master_copy = modules[0] - nr_modules = len(list(master_copy.modules())) - ctxs = [CallbackContext() for _ in range(nr_modules)] - - for i, module in enumerate(modules): - for j, m in enumerate(module.modules()): - if hasattr(m, '__data_parallel_replicate__'): - m.__data_parallel_replicate__(ctxs[j], i) - - -class DataParallelWithCallback(DataParallel): - """ - Data Parallel with a replication callback. - - An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by - original `replicate` function. - The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` - - Examples: - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - # sync_bn.__data_parallel_replicate__ will be invoked. - """ - - def replicate(self, module, device_ids): - modules = super(DataParallelWithCallback, self).replicate(module, device_ids) - execute_replication_callbacks(modules) - return modules - - -def patch_replication_callback(data_parallel): - """ - Monkey-patch an existing `DataParallel` object. Add the replication callback. - Useful when you have customized `DataParallel` implementation. - - Examples: - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallel(sync_bn, device_ids=[0, 1]) - > patch_replication_callback(sync_bn) - # this is equivalent to - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - """ - - assert isinstance(data_parallel, DataParallel) - - old_replicate = data_parallel.replicate - - @functools.wraps(old_replicate) - def new_replicate(module, device_ids): - modules = old_replicate(module, device_ids) - execute_replication_callbacks(modules) - return modules - - data_parallel.replicate = new_replicate diff --git a/spaces/Mahiruoshi/MyGO_VIts-bert/commons.py b/spaces/Mahiruoshi/MyGO_VIts-bert/commons.py deleted file mode 100644 index d3fa07f65b1681e1f469b04b2fe689b7c174eaaa..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/MyGO_VIts-bert/commons.py +++ /dev/null @@ -1,160 +0,0 @@ -import math -import torch -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - layer = pad_shape[::-1] - pad_shape = [item for sublist in layer for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - layer = pad_shape[::-1] - pad_shape = [item for sublist in layer for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/config/GroundingDINO_SwinB.cfg.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/config/GroundingDINO_SwinB.cfg.py deleted file mode 100644 index f490c4bbd598a35de43d36ceafcbd769e7ff21bf..0000000000000000000000000000000000000000 --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/config/GroundingDINO_SwinB.cfg.py +++ /dev/null @@ -1,43 +0,0 @@ -batch_size = 1 -modelname = "groundingdino" -backbone = "swin_B_384_22k" -position_embedding = "sine" -pe_temperatureH = 20 -pe_temperatureW = 20 -return_interm_indices = [1, 2, 3] -backbone_freeze_keywords = None -enc_layers = 6 -dec_layers = 6 -pre_norm = False -dim_feedforward = 2048 -hidden_dim = 256 -dropout = 0.0 -nheads = 8 -num_queries = 900 -query_dim = 4 -num_patterns = 0 -num_feature_levels = 4 -enc_n_points = 4 -dec_n_points = 4 -two_stage_type = "standard" -two_stage_bbox_embed_share = False -two_stage_class_embed_share = False -transformer_activation = "relu" -dec_pred_bbox_embed_share = True -dn_box_noise_scale = 1.0 -dn_label_noise_ratio = 0.5 -dn_label_coef = 1.0 -dn_bbox_coef = 1.0 -embed_init_tgt = True -dn_labelbook_size = 2000 -max_text_len = 256 -text_encoder_type = "bert-base-uncased" -use_text_enhancer = True -use_fusion_layer = True -use_checkpoint = True -use_transformer_ckpt = True -use_text_cross_attention = True -text_dropout = 0.0 -fusion_dropout = 0.0 -fusion_droppath = 0.1 -sub_sentence_present = True diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/inference/interact/s2m/utils.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/inference/interact/s2m/utils.py deleted file mode 100644 index c2adecf63baa9c2db4cc70b04c25200f6bc0a6a6..0000000000000000000000000000000000000000 --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/inference/interact/s2m/utils.py +++ /dev/null @@ -1,78 +0,0 @@ -# Credit: https://github.com/VainF/DeepLabV3Plus-Pytorch - -import torch -import torch.nn as nn -import numpy as np -import torch.nn.functional as F -from collections import OrderedDict - -class _SimpleSegmentationModel(nn.Module): - def __init__(self, backbone, classifier): - super(_SimpleSegmentationModel, self).__init__() - self.backbone = backbone - self.classifier = classifier - - def forward(self, x): - input_shape = x.shape[-2:] - features = self.backbone(x) - x = self.classifier(features) - x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False) - return x - - -class IntermediateLayerGetter(nn.ModuleDict): - """ - Module wrapper that returns intermediate layers from a model - - It has a strong assumption that the modules have been registered - into the model in the same order as they are used. - This means that one should **not** reuse the same nn.Module - twice in the forward if you want this to work. - - Additionally, it is only able to query submodules that are directly - assigned to the model. So if `model` is passed, `model.feature1` can - be returned, but not `model.feature1.layer2`. - - Arguments: - model (nn.Module): model on which we will extract the features - return_layers (Dict[name, new_name]): a dict containing the names - of the modules for which the activations will be returned as - the key of the dict, and the value of the dict is the name - of the returned activation (which the user can specify). - - Examples:: - - >>> m = torchvision.models.resnet18(pretrained=True) - >>> # extract layer1 and layer3, giving as names `feat1` and feat2` - >>> new_m = torchvision.models._utils.IntermediateLayerGetter(m, - >>> {'layer1': 'feat1', 'layer3': 'feat2'}) - >>> out = new_m(torch.rand(1, 3, 224, 224)) - >>> print([(k, v.shape) for k, v in out.items()]) - >>> [('feat1', torch.Size([1, 64, 56, 56])), - >>> ('feat2', torch.Size([1, 256, 14, 14]))] - """ - def __init__(self, model, return_layers): - if not set(return_layers).issubset([name for name, _ in model.named_children()]): - raise ValueError("return_layers are not present in model") - - orig_return_layers = return_layers - return_layers = {k: v for k, v in return_layers.items()} - layers = OrderedDict() - for name, module in model.named_children(): - layers[name] = module - if name in return_layers: - del return_layers[name] - if not return_layers: - break - - super(IntermediateLayerGetter, self).__init__(layers) - self.return_layers = orig_return_layers - - def forward(self, x): - out = OrderedDict() - for name, module in self.named_children(): - x = module(x) - if name in self.return_layers: - out_name = self.return_layers[name] - out[out_name] = x - return out diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/util/__init__.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/util/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/MarcusSu1216/XingTong/modules/modules.py b/spaces/MarcusSu1216/XingTong/modules/modules.py deleted file mode 100644 index 54290fd207b25e93831bd21005990ea137e6b50e..0000000000000000000000000000000000000000 --- a/spaces/MarcusSu1216/XingTong/modules/modules.py +++ /dev/null @@ -1,342 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import modules.commons as commons -from modules.commons import init_weights, get_padding - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/furthest_point_sample.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/furthest_point_sample.py deleted file mode 100644 index 374b7a878f1972c183941af28ba1df216ac1a60f..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/furthest_point_sample.py +++ /dev/null @@ -1,83 +0,0 @@ -import torch -from torch.autograd import Function - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', [ - 'furthest_point_sampling_forward', - 'furthest_point_sampling_with_dist_forward' -]) - - -class FurthestPointSampling(Function): - """Uses iterative furthest point sampling to select a set of features whose - corresponding points have the furthest distance.""" - - @staticmethod - def forward(ctx, points_xyz: torch.Tensor, - num_points: int) -> torch.Tensor: - """ - Args: - points_xyz (Tensor): (B, N, 3) where N > num_points. - num_points (int): Number of points in the sampled set. - - Returns: - Tensor: (B, num_points) indices of the sampled points. - """ - assert points_xyz.is_contiguous() - - B, N = points_xyz.size()[:2] - output = torch.cuda.IntTensor(B, num_points) - temp = torch.cuda.FloatTensor(B, N).fill_(1e10) - - ext_module.furthest_point_sampling_forward( - points_xyz, - temp, - output, - b=B, - n=N, - m=num_points, - ) - if torch.__version__ != 'parrots': - ctx.mark_non_differentiable(output) - return output - - @staticmethod - def backward(xyz, a=None): - return None, None - - -class FurthestPointSamplingWithDist(Function): - """Uses iterative furthest point sampling to select a set of features whose - corresponding points have the furthest distance.""" - - @staticmethod - def forward(ctx, points_dist: torch.Tensor, - num_points: int) -> torch.Tensor: - """ - Args: - points_dist (Tensor): (B, N, N) Distance between each point pair. - num_points (int): Number of points in the sampled set. - - Returns: - Tensor: (B, num_points) indices of the sampled points. - """ - assert points_dist.is_contiguous() - - B, N, _ = points_dist.size() - output = points_dist.new_zeros([B, num_points], dtype=torch.int32) - temp = points_dist.new_zeros([B, N]).fill_(1e10) - - ext_module.furthest_point_sampling_with_dist_forward( - points_dist, temp, output, b=B, n=N, m=num_points) - if torch.__version__ != 'parrots': - ctx.mark_non_differentiable(output) - return output - - @staticmethod - def backward(xyz, a=None): - return None, None - - -furthest_point_sample = FurthestPointSampling.apply -furthest_point_sample_with_dist = FurthestPointSamplingWithDist.apply diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/backbones/fast_scnn.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/backbones/fast_scnn.py deleted file mode 100644 index 38c2350177cbc2066f45add568d30eb6041f74f3..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/backbones/fast_scnn.py +++ /dev/null @@ -1,375 +0,0 @@ -import torch -import torch.nn as nn -from annotator.uniformer.mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, constant_init, - kaiming_init) -from torch.nn.modules.batchnorm import _BatchNorm - -from annotator.uniformer.mmseg.models.decode_heads.psp_head import PPM -from annotator.uniformer.mmseg.ops import resize -from ..builder import BACKBONES -from ..utils.inverted_residual import InvertedResidual - - -class LearningToDownsample(nn.Module): - """Learning to downsample module. - - Args: - in_channels (int): Number of input channels. - dw_channels (tuple[int]): Number of output channels of the first and - the second depthwise conv (dwconv) layers. - out_channels (int): Number of output channels of the whole - 'learning to downsample' module. - conv_cfg (dict | None): Config of conv layers. Default: None - norm_cfg (dict | None): Config of norm layers. Default: - dict(type='BN') - act_cfg (dict): Config of activation layers. Default: - dict(type='ReLU') - """ - - def __init__(self, - in_channels, - dw_channels, - out_channels, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU')): - super(LearningToDownsample, self).__init__() - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - dw_channels1 = dw_channels[0] - dw_channels2 = dw_channels[1] - - self.conv = ConvModule( - in_channels, - dw_channels1, - 3, - stride=2, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.dsconv1 = DepthwiseSeparableConvModule( - dw_channels1, - dw_channels2, - kernel_size=3, - stride=2, - padding=1, - norm_cfg=self.norm_cfg) - self.dsconv2 = DepthwiseSeparableConvModule( - dw_channels2, - out_channels, - kernel_size=3, - stride=2, - padding=1, - norm_cfg=self.norm_cfg) - - def forward(self, x): - x = self.conv(x) - x = self.dsconv1(x) - x = self.dsconv2(x) - return x - - -class GlobalFeatureExtractor(nn.Module): - """Global feature extractor module. - - Args: - in_channels (int): Number of input channels of the GFE module. - Default: 64 - block_channels (tuple[int]): Tuple of ints. Each int specifies the - number of output channels of each Inverted Residual module. - Default: (64, 96, 128) - out_channels(int): Number of output channels of the GFE module. - Default: 128 - expand_ratio (int): Adjusts number of channels of the hidden layer - in InvertedResidual by this amount. - Default: 6 - num_blocks (tuple[int]): Tuple of ints. Each int specifies the - number of times each Inverted Residual module is repeated. - The repeated Inverted Residual modules are called a 'group'. - Default: (3, 3, 3) - strides (tuple[int]): Tuple of ints. Each int specifies - the downsampling factor of each 'group'. - Default: (2, 2, 1) - pool_scales (tuple[int]): Tuple of ints. Each int specifies - the parameter required in 'global average pooling' within PPM. - Default: (1, 2, 3, 6) - conv_cfg (dict | None): Config of conv layers. Default: None - norm_cfg (dict | None): Config of norm layers. Default: - dict(type='BN') - act_cfg (dict): Config of activation layers. Default: - dict(type='ReLU') - align_corners (bool): align_corners argument of F.interpolate. - Default: False - """ - - def __init__(self, - in_channels=64, - block_channels=(64, 96, 128), - out_channels=128, - expand_ratio=6, - num_blocks=(3, 3, 3), - strides=(2, 2, 1), - pool_scales=(1, 2, 3, 6), - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU'), - align_corners=False): - super(GlobalFeatureExtractor, self).__init__() - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - assert len(block_channels) == len(num_blocks) == 3 - self.bottleneck1 = self._make_layer(in_channels, block_channels[0], - num_blocks[0], strides[0], - expand_ratio) - self.bottleneck2 = self._make_layer(block_channels[0], - block_channels[1], num_blocks[1], - strides[1], expand_ratio) - self.bottleneck3 = self._make_layer(block_channels[1], - block_channels[2], num_blocks[2], - strides[2], expand_ratio) - self.ppm = PPM( - pool_scales, - block_channels[2], - block_channels[2] // 4, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - align_corners=align_corners) - self.out = ConvModule( - block_channels[2] * 2, - out_channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def _make_layer(self, - in_channels, - out_channels, - blocks, - stride=1, - expand_ratio=6): - layers = [ - InvertedResidual( - in_channels, - out_channels, - stride, - expand_ratio, - norm_cfg=self.norm_cfg) - ] - for i in range(1, blocks): - layers.append( - InvertedResidual( - out_channels, - out_channels, - 1, - expand_ratio, - norm_cfg=self.norm_cfg)) - return nn.Sequential(*layers) - - def forward(self, x): - x = self.bottleneck1(x) - x = self.bottleneck2(x) - x = self.bottleneck3(x) - x = torch.cat([x, *self.ppm(x)], dim=1) - x = self.out(x) - return x - - -class FeatureFusionModule(nn.Module): - """Feature fusion module. - - Args: - higher_in_channels (int): Number of input channels of the - higher-resolution branch. - lower_in_channels (int): Number of input channels of the - lower-resolution branch. - out_channels (int): Number of output channels. - conv_cfg (dict | None): Config of conv layers. Default: None - norm_cfg (dict | None): Config of norm layers. Default: - dict(type='BN') - act_cfg (dict): Config of activation layers. Default: - dict(type='ReLU') - align_corners (bool): align_corners argument of F.interpolate. - Default: False - """ - - def __init__(self, - higher_in_channels, - lower_in_channels, - out_channels, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU'), - align_corners=False): - super(FeatureFusionModule, self).__init__() - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - self.align_corners = align_corners - self.dwconv = ConvModule( - lower_in_channels, - out_channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.conv_lower_res = ConvModule( - out_channels, - out_channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=None) - self.conv_higher_res = ConvModule( - higher_in_channels, - out_channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=None) - self.relu = nn.ReLU(True) - - def forward(self, higher_res_feature, lower_res_feature): - lower_res_feature = resize( - lower_res_feature, - size=higher_res_feature.size()[2:], - mode='bilinear', - align_corners=self.align_corners) - lower_res_feature = self.dwconv(lower_res_feature) - lower_res_feature = self.conv_lower_res(lower_res_feature) - - higher_res_feature = self.conv_higher_res(higher_res_feature) - out = higher_res_feature + lower_res_feature - return self.relu(out) - - -@BACKBONES.register_module() -class FastSCNN(nn.Module): - """Fast-SCNN Backbone. - - Args: - in_channels (int): Number of input image channels. Default: 3. - downsample_dw_channels (tuple[int]): Number of output channels after - the first conv layer & the second conv layer in - Learning-To-Downsample (LTD) module. - Default: (32, 48). - global_in_channels (int): Number of input channels of - Global Feature Extractor(GFE). - Equal to number of output channels of LTD. - Default: 64. - global_block_channels (tuple[int]): Tuple of integers that describe - the output channels for each of the MobileNet-v2 bottleneck - residual blocks in GFE. - Default: (64, 96, 128). - global_block_strides (tuple[int]): Tuple of integers - that describe the strides (downsampling factors) for each of the - MobileNet-v2 bottleneck residual blocks in GFE. - Default: (2, 2, 1). - global_out_channels (int): Number of output channels of GFE. - Default: 128. - higher_in_channels (int): Number of input channels of the higher - resolution branch in FFM. - Equal to global_in_channels. - Default: 64. - lower_in_channels (int): Number of input channels of the lower - resolution branch in FFM. - Equal to global_out_channels. - Default: 128. - fusion_out_channels (int): Number of output channels of FFM. - Default: 128. - out_indices (tuple): Tuple of indices of list - [higher_res_features, lower_res_features, fusion_output]. - Often set to (0,1,2) to enable aux. heads. - Default: (0, 1, 2). - conv_cfg (dict | None): Config of conv layers. Default: None - norm_cfg (dict | None): Config of norm layers. Default: - dict(type='BN') - act_cfg (dict): Config of activation layers. Default: - dict(type='ReLU') - align_corners (bool): align_corners argument of F.interpolate. - Default: False - """ - - def __init__(self, - in_channels=3, - downsample_dw_channels=(32, 48), - global_in_channels=64, - global_block_channels=(64, 96, 128), - global_block_strides=(2, 2, 1), - global_out_channels=128, - higher_in_channels=64, - lower_in_channels=128, - fusion_out_channels=128, - out_indices=(0, 1, 2), - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU'), - align_corners=False): - - super(FastSCNN, self).__init__() - if global_in_channels != higher_in_channels: - raise AssertionError('Global Input Channels must be the same \ - with Higher Input Channels!') - elif global_out_channels != lower_in_channels: - raise AssertionError('Global Output Channels must be the same \ - with Lower Input Channels!') - - self.in_channels = in_channels - self.downsample_dw_channels1 = downsample_dw_channels[0] - self.downsample_dw_channels2 = downsample_dw_channels[1] - self.global_in_channels = global_in_channels - self.global_block_channels = global_block_channels - self.global_block_strides = global_block_strides - self.global_out_channels = global_out_channels - self.higher_in_channels = higher_in_channels - self.lower_in_channels = lower_in_channels - self.fusion_out_channels = fusion_out_channels - self.out_indices = out_indices - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - self.align_corners = align_corners - self.learning_to_downsample = LearningToDownsample( - in_channels, - downsample_dw_channels, - global_in_channels, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.global_feature_extractor = GlobalFeatureExtractor( - global_in_channels, - global_block_channels, - global_out_channels, - strides=self.global_block_strides, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - align_corners=self.align_corners) - self.feature_fusion = FeatureFusionModule( - higher_in_channels, - lower_in_channels, - fusion_out_channels, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - align_corners=self.align_corners) - - def init_weights(self, pretrained=None): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - kaiming_init(m) - elif isinstance(m, (_BatchNorm, nn.GroupNorm)): - constant_init(m, 1) - - def forward(self, x): - higher_res_features = self.learning_to_downsample(x) - lower_res_features = self.global_feature_extractor(higher_res_features) - fusion_output = self.feature_fusion(higher_res_features, - lower_res_features) - - outs = [higher_res_features, lower_res_features, fusion_output] - outs = [outs[i] for i in self.out_indices] - return tuple(outs) diff --git a/spaces/MetaWabbit/Auto-GPT/tests/unit/test_browse_scrape_links.py b/spaces/MetaWabbit/Auto-GPT/tests/unit/test_browse_scrape_links.py deleted file mode 100644 index 0a3340e7397a997da96b8ab9828954230e1a3c20..0000000000000000000000000000000000000000 --- a/spaces/MetaWabbit/Auto-GPT/tests/unit/test_browse_scrape_links.py +++ /dev/null @@ -1,118 +0,0 @@ -# Generated by CodiumAI - -# Dependencies: -# pip install pytest-mock -import pytest - -from autogpt.commands.web_requests import scrape_links - -""" -Code Analysis - -Objective: -The objective of the 'scrape_links' function is to scrape hyperlinks from a -given URL and return them in a formatted way. - -Inputs: -- url: a string representing the URL to be scraped. - -Flow: -1. Send a GET request to the given URL using the requests library and the user agent header from the config file. -2. Check if the response contains an HTTP error. If it does, return "error". -3. Parse the HTML content of the response using the BeautifulSoup library. -4. Remove any script and style tags from the parsed HTML. -5. Extract all hyperlinks from the parsed HTML using the 'extract_hyperlinks' function. -6. Format the extracted hyperlinks using the 'format_hyperlinks' function. -7. Return the formatted hyperlinks. - -Outputs: -- A list of formatted hyperlinks. - -Additional aspects: -- The function uses the 'requests' and 'BeautifulSoup' libraries to send HTTP -requests and parse HTML content, respectively. -- The 'extract_hyperlinks' function is called to extract hyperlinks from the parsed HTML. -- The 'format_hyperlinks' function is called to format the extracted hyperlinks. -- The function checks for HTTP errors and returns "error" if any are found. -""" - - -class TestScrapeLinks: - # Tests that the function returns a list of formatted hyperlinks when - # provided with a valid url that returns a webpage with hyperlinks. - def test_valid_url_with_hyperlinks(self): - url = "https://www.google.com" - result = scrape_links(url) - assert len(result) > 0 - assert isinstance(result, list) - assert isinstance(result[0], str) - - # Tests that the function returns correctly formatted hyperlinks when given a valid url. - def test_valid_url(self, mocker): - # Mock the requests.get() function to return a response with sample HTML containing hyperlinks - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = ( - "Google" - ) - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a valid URL - result = scrape_links("https://www.example.com") - - # Assert that the function returns correctly formatted hyperlinks - assert result == ["Google (https://www.google.com)"] - - # Tests that the function returns "error" when given an invalid url. - def test_invalid_url(self, mocker): - # Mock the requests.get() function to return an HTTP error response - mock_response = mocker.Mock() - mock_response.status_code = 404 - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with an invalid URL - result = scrape_links("https://www.invalidurl.com") - - # Assert that the function returns "error" - assert "Error:" in result - - # Tests that the function returns an empty list when the html contains no hyperlinks. - def test_no_hyperlinks(self, mocker): - # Mock the requests.get() function to return a response with sample HTML containing no hyperlinks - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = "

No hyperlinks here

" - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a URL containing no hyperlinks - result = scrape_links("https://www.example.com") - - # Assert that the function returns an empty list - assert result == [] - - # Tests that scrape_links() correctly extracts and formats hyperlinks from - # a sample HTML containing a few hyperlinks. - def test_scrape_links_with_few_hyperlinks(self, mocker): - # Mock the requests.get() function to return a response with a sample HTML containing hyperlinks - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = """ - - - - - - - - """ - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function being tested - result = scrape_links("https://www.example.com") - - # Assert that the function returns a list of formatted hyperlinks - assert isinstance(result, list) - assert len(result) == 3 - assert result[0] == "Google (https://www.google.com)" - assert result[1] == "GitHub (https://github.com)" - assert result[2] == "CodiumAI (https://www.codium.ai)" diff --git a/spaces/MoEternal/Hoshino/README.md b/spaces/MoEternal/Hoshino/README.md deleted file mode 100644 index 6167af17702fad24fb20e8cf492c9b0b655a6812..0000000000000000000000000000000000000000 --- a/spaces/MoEternal/Hoshino/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Hoshino -emoji: 👀 -colorFrom: gray -colorTo: pink -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/utils/processing.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/utils/processing.py deleted file mode 100644 index 2da6ff2c90d746c67c18fd1f22e6bd8d1f2bf887..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/utils/processing.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import sys -from collections.abc import Iterable - -from mmengine.utils.progressbar import ProgressBar, init_pool - - -def track_parallel_progress_multi_args(func, - args, - nproc, - initializer=None, - initargs=None, - bar_width=50, - chunksize=1, - skip_first=False, - file=sys.stdout): - """Track the progress of parallel task execution with a progress bar. - - The built-in :mod:`multiprocessing` module is used for process pools and - tasks are done with :func:`Pool.map` or :func:`Pool.imap_unordered`. - - Args: - func (callable): The function to be applied to each task. - tasks (tuple[Iterable]): A tuple of tasks. - nproc (int): Process (worker) number. - initializer (None or callable): Refer to :class:`multiprocessing.Pool` - for details. - initargs (None or tuple): Refer to :class:`multiprocessing.Pool` for - details. - chunksize (int): Refer to :class:`multiprocessing.Pool` for details. - bar_width (int): Width of progress bar. - skip_first (bool): Whether to skip the first sample for each worker - when estimating fps, since the initialization step may takes - longer. - keep_order (bool): If True, :func:`Pool.imap` is used, otherwise - :func:`Pool.imap_unordered` is used. - - Returns: - list: The task results. - """ - assert isinstance(args, tuple) - for arg in args: - assert isinstance(arg, Iterable) - assert len(set([len(arg) - for arg in args])) == 1, 'args must have same length' - task_num = len(args[0]) - tasks = zip(*args) - - pool = init_pool(nproc, initializer, initargs) - start = not skip_first - task_num -= nproc * chunksize * int(skip_first) - prog_bar = ProgressBar(task_num, bar_width, start, file=file) - results = [] - gen = pool.starmap(func, tasks, chunksize) - for result in gen: - results.append(result) - if skip_first: - if len(results) < nproc * chunksize: - continue - elif len(results) == nproc * chunksize: - prog_bar.start() - continue - prog_bar.update() - prog_bar.file.write('\n') - pool.close() - pool.join() - return results diff --git a/spaces/NATSpeech/DiffSpeech/mfa_usr/mfa.py b/spaces/NATSpeech/DiffSpeech/mfa_usr/mfa.py deleted file mode 100644 index d91bff2668540fe1a190d1a0901ccd47a2216ba1..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/DiffSpeech/mfa_usr/mfa.py +++ /dev/null @@ -1,498 +0,0 @@ -import atexit -import sys -import os -import time -import argparse -from datetime import datetime -import multiprocessing as mp - -from montreal_forced_aligner import __version__ - -from montreal_forced_aligner.utils import get_available_acoustic_languages, get_available_g2p_languages, \ - get_available_dict_languages, get_available_lm_languages, get_available_ivector_languages -from montreal_forced_aligner.command_line.align import run_align_corpus - -from mfa_usr.adapt import run_adapt_model -from montreal_forced_aligner.command_line.train_and_align import run_train_corpus -from montreal_forced_aligner.command_line.g2p import run_g2p -from montreal_forced_aligner.command_line.train_g2p import run_train_g2p -from montreal_forced_aligner.command_line.validate import run_validate_corpus -from montreal_forced_aligner.command_line.download import run_download -from montreal_forced_aligner.command_line.train_lm import run_train_lm -from montreal_forced_aligner.command_line.thirdparty import run_thirdparty -from montreal_forced_aligner.command_line.train_ivector_extractor import run_train_ivector_extractor -from montreal_forced_aligner.command_line.classify_speakers import run_classify_speakers -from montreal_forced_aligner.command_line.transcribe import run_transcribe_corpus -from montreal_forced_aligner.command_line.train_dictionary import run_train_dictionary -from montreal_forced_aligner.command_line.create_segments import run_create_segments -from montreal_forced_aligner.exceptions import MFAError -from montreal_forced_aligner.config import update_global_config, load_global_config, update_command_history, \ - load_command_history - - -class ExitHooks(object): - def __init__(self): - self.exit_code = None - self.exception = None - - def hook(self): - self._orig_exit = sys.exit - sys.exit = self.exit - sys.excepthook = self.exc_handler - - def exit(self, code=0): - self.exit_code = code - self._orig_exit(code) - - def exc_handler(self, exc_type, exc, *args): - self.exception = exc - - -hooks = ExitHooks() -hooks.hook() - -BEGIN = time.time() -BEGIN_DATE = datetime.now() - - -def history_save_handler(): - history_data = { - 'command': ' '.join(sys.argv), - 'execution_time': time.time() - BEGIN, - 'date': BEGIN_DATE, - 'version': __version__ - } - - if hooks.exit_code is not None: - history_data['exit_code'] = hooks.exit_code - history_data['exception'] = '' - elif hooks.exception is not None: - history_data['exit_code'] = 1 - history_data['exception'] = hooks.exception - else: - history_data['exception'] = '' - history_data['exit_code'] = 0 - update_command_history(history_data) - - -atexit.register(history_save_handler) - - -def fix_path(): - from montreal_forced_aligner.config import TEMP_DIR - thirdparty_dir = os.path.join(TEMP_DIR, 'thirdparty', 'bin') - old_path = os.environ.get('PATH', '') - if sys.platform == 'win32': - os.environ['PATH'] = thirdparty_dir + ';' + old_path - else: - os.environ['PATH'] = thirdparty_dir + ':' + old_path - os.environ['LD_LIBRARY_PATH'] = thirdparty_dir + ':' + os.environ.get('LD_LIBRARY_PATH', '') - - -def unfix_path(): - if sys.platform == 'win32': - sep = ';' - os.environ['PATH'] = sep.join(os.environ['PATH'].split(sep)[1:]) - else: - sep = ':' - os.environ['PATH'] = sep.join(os.environ['PATH'].split(sep)[1:]) - os.environ['LD_LIBRARY_PATH'] = sep.join(os.environ['PATH'].split(sep)[1:]) - - -acoustic_languages = get_available_acoustic_languages() -ivector_languages = get_available_ivector_languages() -lm_languages = get_available_lm_languages() -g2p_languages = get_available_g2p_languages() -dict_languages = get_available_dict_languages() - - -def create_parser(): - GLOBAL_CONFIG = load_global_config() - - def add_global_options(subparser, textgrid_output=False): - subparser.add_argument('-t', '--temp_directory', type=str, default=GLOBAL_CONFIG['temp_directory'], - help=f"Temporary directory root to store MFA created files, default is {GLOBAL_CONFIG['temp_directory']}") - subparser.add_argument('--disable_mp', - help=f"Disable any multiprocessing during alignment (not recommended), default is {not GLOBAL_CONFIG['use_mp']}", - action='store_true', - default=not GLOBAL_CONFIG['use_mp']) - subparser.add_argument('-j', '--num_jobs', type=int, default=GLOBAL_CONFIG['num_jobs'], - help=f"Number of data splits (and cores to use if multiprocessing is enabled), defaults " - f"is {GLOBAL_CONFIG['num_jobs']}") - subparser.add_argument('-v', '--verbose', help=f"Output debug messages, default is {GLOBAL_CONFIG['verbose']}", - action='store_true', - default=GLOBAL_CONFIG['verbose']) - subparser.add_argument('--clean', help=f"Remove files from previous runs, default is {GLOBAL_CONFIG['clean']}", - action='store_true', - default=GLOBAL_CONFIG['clean']) - subparser.add_argument('--overwrite', - help=f"Overwrite output files when they exist, default is {GLOBAL_CONFIG['overwrite']}", - action='store_true', - default=GLOBAL_CONFIG['overwrite']) - subparser.add_argument('--debug', - help=f"Run extra steps for debugging issues, default is {GLOBAL_CONFIG['debug']}", - action='store_true', - default=GLOBAL_CONFIG['debug']) - if textgrid_output: - subparser.add_argument('--disable_textgrid_cleanup', - help=f"Disable extra clean up steps on TextGrid output, default is {not GLOBAL_CONFIG['cleanup_textgrids']}", - action='store_true', - default=not GLOBAL_CONFIG['cleanup_textgrids']) - - parser = argparse.ArgumentParser() - - subparsers = parser.add_subparsers(dest="subcommand") - subparsers.required = True - - version_parser = subparsers.add_parser('version') - - align_parser = subparsers.add_parser('align') - align_parser.add_argument('corpus_directory', help="Full path to the directory to align") - align_parser.add_argument('dictionary_path', help="Full path to the pronunciation dictionary to use") - align_parser.add_argument('acoustic_model_path', - help=f"Full path to the archive containing pre-trained model or language ({', '.join(acoustic_languages)})") - align_parser.add_argument('output_directory', - help="Full path to output directory, will be created if it doesn't exist") - align_parser.add_argument('--config_path', type=str, default='', - help="Path to config file to use for alignment") - align_parser.add_argument('-s', '--speaker_characters', type=str, default='0', - help="Number of characters of file names to use for determining speaker, " - 'default is to use directory names') - align_parser.add_argument('-a', '--audio_directory', type=str, default='', - help="Audio directory root to use for finding audio files") - add_global_options(align_parser, textgrid_output=True) - - adapt_parser = subparsers.add_parser('adapt') - adapt_parser.add_argument('corpus_directory', help="Full path to the directory to align") - adapt_parser.add_argument('dictionary_path', help="Full path to the pronunciation dictionary to use") - adapt_parser.add_argument('acoustic_model_path', - help=f"Full path to the archive containing pre-trained model or language ({', '.join(acoustic_languages)})") - adapt_parser.add_argument('output_model_path', - help="Full path to save adapted_model") - adapt_parser.add_argument('output_directory', - help="Full path to output directory, will be created if it doesn't exist") - adapt_parser.add_argument('--config_path', type=str, default='', - help="Path to config file to use for alignment") - adapt_parser.add_argument('-s', '--speaker_characters', type=str, default='0', - help="Number of characters of file names to use for determining speaker, " - 'default is to use directory names') - adapt_parser.add_argument('-a', '--audio_directory', type=str, default='', - help="Audio directory root to use for finding audio files") - add_global_options(adapt_parser, textgrid_output=True) - - train_parser = subparsers.add_parser('train') - train_parser.add_argument('corpus_directory', help="Full path to the source directory to align") - train_parser.add_argument('dictionary_path', help="Full path to the pronunciation dictionary to use", - default='') - train_parser.add_argument('output_directory', - help="Full path to output directory, will be created if it doesn't exist") - train_parser.add_argument('--config_path', type=str, default='', - help="Path to config file to use for training and alignment") - train_parser.add_argument('-o', '--output_model_path', type=str, default='', - help="Full path to save resulting acoustic and dictionary model") - train_parser.add_argument('-s', '--speaker_characters', type=str, default='0', - help="Number of characters of filenames to use for determining speaker, " - 'default is to use directory names') - train_parser.add_argument('-a', '--audio_directory', type=str, default='', - help="Audio directory root to use for finding audio files") - train_parser.add_argument('-m', '--acoustic_model_path', type=str, default='', - help="Full path to save adapted_model") - - add_global_options(train_parser, textgrid_output=True) - - validate_parser = subparsers.add_parser('validate') - validate_parser.add_argument('corpus_directory', help="Full path to the source directory to align") - validate_parser.add_argument('dictionary_path', help="Full path to the pronunciation dictionary to use", - default='') - validate_parser.add_argument('acoustic_model_path', nargs='?', default='', - help=f"Full path to the archive containing pre-trained model or language ({', '.join(acoustic_languages)})") - validate_parser.add_argument('-s', '--speaker_characters', type=str, default='0', - help="Number of characters of file names to use for determining speaker, " - 'default is to use directory names') - validate_parser.add_argument('--test_transcriptions', help="Test accuracy of transcriptions", action='store_true') - validate_parser.add_argument('--ignore_acoustics', - help="Skip acoustic feature generation and associated validation", - action='store_true') - add_global_options(validate_parser) - - g2p_model_help_message = f'''Full path to the archive containing pre-trained model or language ({', '.join(g2p_languages)}) - If not specified, then orthographic transcription is split into pronunciations.''' - g2p_parser = subparsers.add_parser('g2p') - g2p_parser.add_argument("g2p_model_path", help=g2p_model_help_message, nargs='?') - - g2p_parser.add_argument("input_path", - help="Corpus to base word list on or a text file of words to generate pronunciations") - g2p_parser.add_argument("output_path", help="Path to save output dictionary") - g2p_parser.add_argument('--include_bracketed', help="Included words enclosed by brackets, i.e. [...], (...), <...>", - action='store_true') - g2p_parser.add_argument('--config_path', type=str, default='', - help="Path to config file to use for G2P") - add_global_options(g2p_parser) - - train_g2p_parser = subparsers.add_parser('train_g2p') - train_g2p_parser.add_argument("dictionary_path", help="Location of existing dictionary") - - train_g2p_parser.add_argument("output_model_path", help="Desired location of generated model") - train_g2p_parser.add_argument('--config_path', type=str, default='', - help="Path to config file to use for G2P") - train_g2p_parser.add_argument("--validate", action='store_true', - help="Perform an analysis of accuracy training on " - "most of the data and validating on an unseen subset") - add_global_options(train_g2p_parser) - - download_parser = subparsers.add_parser('download') - download_parser.add_argument("model_type", - help="Type of model to download, one of 'acoustic', 'g2p', or 'dictionary'") - download_parser.add_argument("language", help="Name of language code to download, if not specified, " - "will list all available languages", nargs='?') - - train_lm_parser = subparsers.add_parser('train_lm') - train_lm_parser.add_argument('source_path', help="Full path to the source directory to train from, alternatively " - 'an ARPA format language model to convert for MFA use') - train_lm_parser.add_argument('output_model_path', type=str, - help="Full path to save resulting language model") - train_lm_parser.add_argument('-m', '--model_path', type=str, - help="Full path to existing language model to merge probabilities") - train_lm_parser.add_argument('-w', '--model_weight', type=float, default=1.0, - help="Weight factor for supplemental language model, defaults to 1.0") - train_lm_parser.add_argument('--dictionary_path', help="Full path to the pronunciation dictionary to use", - default='') - train_lm_parser.add_argument('--config_path', type=str, default='', - help="Path to config file to use for training and alignment") - add_global_options(train_lm_parser) - - train_dictionary_parser = subparsers.add_parser('train_dictionary') - train_dictionary_parser.add_argument('corpus_directory', help="Full path to the directory to align") - train_dictionary_parser.add_argument('dictionary_path', help="Full path to the pronunciation dictionary to use") - train_dictionary_parser.add_argument('acoustic_model_path', - help=f"Full path to the archive containing pre-trained model or language ({', '.join(acoustic_languages)})") - train_dictionary_parser.add_argument('output_directory', - help="Full path to output directory, will be created if it doesn't exist") - train_dictionary_parser.add_argument('--config_path', type=str, default='', - help="Path to config file to use for alignment") - train_dictionary_parser.add_argument('-s', '--speaker_characters', type=str, default='0', - help="Number of characters of file names to use for determining speaker, " - 'default is to use directory names') - add_global_options(train_dictionary_parser) - - train_ivector_parser = subparsers.add_parser('train_ivector') - train_ivector_parser.add_argument('corpus_directory', help="Full path to the source directory to " - 'train the ivector extractor') - train_ivector_parser.add_argument('dictionary_path', help="Full path to the pronunciation dictionary to use") - train_ivector_parser.add_argument('acoustic_model_path', type=str, default='', - help="Full path to acoustic model for alignment") - train_ivector_parser.add_argument('output_model_path', type=str, default='', - help="Full path to save resulting ivector extractor") - train_ivector_parser.add_argument('-s', '--speaker_characters', type=str, default='0', - help="Number of characters of filenames to use for determining speaker, " - 'default is to use directory names') - train_ivector_parser.add_argument('--config_path', type=str, default='', - help="Path to config file to use for training") - add_global_options(train_ivector_parser) - - classify_speakers_parser = subparsers.add_parser('classify_speakers') - classify_speakers_parser.add_argument('corpus_directory', help="Full path to the source directory to " - 'run speaker classification') - classify_speakers_parser.add_argument('ivector_extractor_path', type=str, default='', - help="Full path to ivector extractor model") - classify_speakers_parser.add_argument('output_directory', - help="Full path to output directory, will be created if it doesn't exist") - - classify_speakers_parser.add_argument('-s', '--num_speakers', type=int, default=0, - help="Number of speakers if known") - classify_speakers_parser.add_argument('--cluster', help="Using clustering instead of classification", - action='store_true') - classify_speakers_parser.add_argument('--config_path', type=str, default='', - help="Path to config file to use for ivector extraction") - add_global_options(classify_speakers_parser) - - create_segments_parser = subparsers.add_parser('create_segments') - create_segments_parser.add_argument('corpus_directory', help="Full path to the source directory to " - 'run VAD segmentation') - create_segments_parser.add_argument('output_directory', - help="Full path to output directory, will be created if it doesn't exist") - create_segments_parser.add_argument('--config_path', type=str, default='', - help="Path to config file to use for segmentation") - add_global_options(create_segments_parser) - - transcribe_parser = subparsers.add_parser('transcribe') - transcribe_parser.add_argument('corpus_directory', help="Full path to the directory to transcribe") - transcribe_parser.add_argument('dictionary_path', help="Full path to the pronunciation dictionary to use") - transcribe_parser.add_argument('acoustic_model_path', - help=f"Full path to the archive containing pre-trained model or language ({', '.join(acoustic_languages)})") - transcribe_parser.add_argument('language_model_path', - help=f"Full path to the archive containing pre-trained model or language ({', '.join(lm_languages)})") - transcribe_parser.add_argument('output_directory', - help="Full path to output directory, will be created if it doesn't exist") - transcribe_parser.add_argument('--config_path', type=str, default='', - help="Path to config file to use for transcription") - transcribe_parser.add_argument('-s', '--speaker_characters', type=str, default='0', - help="Number of characters of file names to use for determining speaker, " - 'default is to use directory names') - transcribe_parser.add_argument('-a', '--audio_directory', type=str, default='', - help="Audio directory root to use for finding audio files") - transcribe_parser.add_argument('-e', '--evaluate', help="Evaluate the transcription " - "against golden texts", action='store_true') - add_global_options(transcribe_parser) - - config_parser = subparsers.add_parser('configure', - help="The configure command is used to set global defaults for MFA so " - "you don't have to set them every time you call an MFA command.") - config_parser.add_argument('-t', '--temp_directory', type=str, default='', - help=f"Set the default temporary directory, default is {GLOBAL_CONFIG['temp_directory']}") - config_parser.add_argument('-j', '--num_jobs', type=int, - help=f"Set the number of processes to use by default, defaults to {GLOBAL_CONFIG['num_jobs']}") - config_parser.add_argument('--always_clean', help="Always remove files from previous runs by default", - action='store_true') - config_parser.add_argument('--never_clean', help="Don't remove files from previous runs by default", - action='store_true') - config_parser.add_argument('--always_verbose', help="Default to verbose output", action='store_true') - config_parser.add_argument('--never_verbose', help="Default to non-verbose output", action='store_true') - config_parser.add_argument('--always_debug', help="Default to running debugging steps", action='store_true') - config_parser.add_argument('--never_debug', help="Default to not running debugging steps", action='store_true') - config_parser.add_argument('--always_overwrite', help="Always overwrite output files", action='store_true') - config_parser.add_argument('--never_overwrite', help="Never overwrite output files (if file already exists, " - "the output will be saved in the temp directory)", - action='store_true') - config_parser.add_argument('--disable_mp', help="Disable all multiprocessing (not recommended as it will usually " - "increase processing times)", action='store_true') - config_parser.add_argument('--enable_mp', help="Enable multiprocessing (recommended and enabled by default)", - action='store_true') - config_parser.add_argument('--disable_textgrid_cleanup', help="Disable postprocessing of TextGrids that cleans up " - "silences and recombines compound words and clitics", - action='store_true') - config_parser.add_argument('--enable_textgrid_cleanup', help="Enable postprocessing of TextGrids that cleans up " - "silences and recombines compound words and clitics", - action='store_true') - - history_parser = subparsers.add_parser('history') - - history_parser.add_argument('depth', help='Number of commands to list', nargs='?', default=10) - history_parser.add_argument('--verbose', help="Flag for whether to output additional information", - action='store_true') - - annotator_parser = subparsers.add_parser('annotator') - anchor_parser = subparsers.add_parser('anchor') - - thirdparty_parser = subparsers.add_parser('thirdparty') - - thirdparty_parser.add_argument("command", - help="One of 'download', 'validate', or 'kaldi'") - thirdparty_parser.add_argument('local_directory', - help="Full path to the built executables to collect", nargs="?", - default='') - return parser - - -parser = create_parser() - - -def main(): - parser = create_parser() - mp.freeze_support() - args, unknown = parser.parse_known_args() - for short in ['-c', '-d']: - if short in unknown: - print(f'Due to the number of options that `{short}` could refer to, it is not accepted. ' - 'Please specify the full argument') - sys.exit(1) - try: - fix_path() - if args.subcommand in ['align', 'train', 'train_ivector']: - from montreal_forced_aligner.thirdparty.kaldi import validate_alignment_binaries - if not validate_alignment_binaries(): - print("There was an issue validating Kaldi binaries, please ensure you've downloaded them via the " - "'mfa thirdparty download' command. See 'mfa thirdparty validate' for more detailed information " - "on why this check failed.") - sys.exit(1) - elif args.subcommand in ['transcribe']: - from montreal_forced_aligner.thirdparty.kaldi import validate_transcribe_binaries - if not validate_transcribe_binaries(): - print("There was an issue validating Kaldi binaries, please ensure you've downloaded them via the " - "'mfa thirdparty download' command. See 'mfa thirdparty validate' for more detailed information " - "on why this check failed. If you are on MacOS, please note that the thirdparty binaries available " - "via the download command do not contain the transcription ones. To get this functionality working " - "for the time being, please build kaldi locally and follow the instructions for running the " - "'mfa thirdparty kaldi' command.") - sys.exit(1) - elif args.subcommand in ['train_dictionary']: - from montreal_forced_aligner.thirdparty.kaldi import validate_train_dictionary_binaries - if not validate_train_dictionary_binaries(): - print("There was an issue validating Kaldi binaries, please ensure you've downloaded them via the " - "'mfa thirdparty download' command. See 'mfa thirdparty validate' for more detailed information " - "on why this check failed. If you are on MacOS, please note that the thirdparty binaries available " - "via the download command do not contain the train_dictionary ones. To get this functionality working " - "for the time being, please build kaldi locally and follow the instructions for running the " - "'mfa thirdparty kaldi' command.") - sys.exit(1) - elif args.subcommand in ['g2p', 'train_g2p']: - try: - import pynini - except ImportError: - print("There was an issue importing Pynini, please ensure that it is installed. If you are on Windows, " - "please use the Windows Subsystem for Linux to use g2p functionality.") - sys.exit(1) - if args.subcommand == 'align': - run_align_corpus(args, unknown, acoustic_languages) - elif args.subcommand == 'adapt': - run_adapt_model(args, unknown, acoustic_languages) - elif args.subcommand == 'train': - run_train_corpus(args, unknown) - elif args.subcommand == 'g2p': - run_g2p(args, unknown, g2p_languages) - elif args.subcommand == 'train_g2p': - run_train_g2p(args, unknown) - elif args.subcommand == 'validate': - run_validate_corpus(args, unknown) - elif args.subcommand == 'download': - run_download(args) - elif args.subcommand == 'train_lm': - run_train_lm(args, unknown) - elif args.subcommand == 'train_dictionary': - run_train_dictionary(args, unknown) - elif args.subcommand == 'train_ivector': - run_train_ivector_extractor(args, unknown) - elif args.subcommand == 'classify_speakers': - run_classify_speakers(args, unknown) - elif args.subcommand in ['annotator', 'anchor']: - from montreal_forced_aligner.command_line.anchor import run_anchor - run_anchor(args) - elif args.subcommand == 'thirdparty': - run_thirdparty(args) - elif args.subcommand == 'transcribe': - run_transcribe_corpus(args, unknown) - elif args.subcommand == 'create_segments': - run_create_segments(args, unknown) - elif args.subcommand == 'configure': - update_global_config(args) - global GLOBAL_CONFIG - GLOBAL_CONFIG = load_global_config() - elif args.subcommand == 'history': - depth = args.depth - history = load_command_history()[-depth:] - for h in history: - if args.verbose: - print('command\tDate\tExecution time\tVersion\tExit code\tException') - for h in history: - execution_time = time.strftime('%H:%M:%S', time.gmtime(h['execution_time'])) - d = h['date'].isoformat() - print( - f"{h['command']}\t{d}\t{execution_time}\t{h['version']}\t{h['exit_code']}\t{h['exception']}") - pass - else: - for h in history: - print(h['command']) - - elif args.subcommand == 'version': - print(__version__) - except MFAError as e: - if getattr(args, 'debug', False): - raise - print(e) - sys.exit(1) - finally: - unfix_path() - - -if __name__ == '__main__': - main() diff --git a/spaces/NATSpeech/DiffSpeech/utils/commons/ddp_utils.py b/spaces/NATSpeech/DiffSpeech/utils/commons/ddp_utils.py deleted file mode 100644 index 4b529198c13a1ffc622baea6e5178407b24aee8f..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/DiffSpeech/utils/commons/ddp_utils.py +++ /dev/null @@ -1,137 +0,0 @@ -from torch.nn.parallel import DistributedDataParallel -from torch.nn.parallel.distributed import _find_tensors -import torch.optim -import torch.utils.data -import torch -from packaging import version - -class DDP(DistributedDataParallel): - """ - Override the forward call in lightning so it goes to training and validation step respectively - """ - - def forward(self, *inputs, **kwargs): # pragma: no cover - if version.parse(torch.__version__[:6]) < version.parse("1.11"): - self._sync_params() - inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids) - assert len(self.device_ids) == 1 - if self.module.training: - output = self.module.training_step(*inputs[0], **kwargs[0]) - elif self.module.testing: - output = self.module.test_step(*inputs[0], **kwargs[0]) - else: - output = self.module.validation_step(*inputs[0], **kwargs[0]) - if torch.is_grad_enabled(): - # We'll return the output object verbatim since it is a freeform - # object. We need to find any tensors in this object, though, - # because we need to figure out which parameters were used during - # this forward pass, to ensure we short circuit reduction for any - # unused parameters. Only if `find_unused_parameters` is set. - if self.find_unused_parameters: - self.reducer.prepare_for_backward(list(_find_tensors(output))) - else: - self.reducer.prepare_for_backward([]) - else: - from torch.nn.parallel.distributed import \ - logging, Join, _DDPSink, _tree_flatten_with_rref, _tree_unflatten_with_rref - with torch.autograd.profiler.record_function("DistributedDataParallel.forward"): - if torch.is_grad_enabled() and self.require_backward_grad_sync: - self.logger.set_runtime_stats_and_log() - self.num_iterations += 1 - self.reducer.prepare_for_forward() - - # Notify the join context that this process has not joined, if - # needed - work = Join.notify_join_context(self) - if work: - self.reducer._set_forward_pass_work_handle( - work, self._divide_by_initial_world_size - ) - - # Calling _rebuild_buckets before forward compuation, - # It may allocate new buckets before deallocating old buckets - # inside _rebuild_buckets. To save peak memory usage, - # call _rebuild_buckets before the peak memory usage increases - # during forward computation. - # This should be called only once during whole training period. - if torch.is_grad_enabled() and self.reducer._rebuild_buckets(): - logging.info("Reducer buckets have been rebuilt in this iteration.") - self._has_rebuilt_buckets = True - - # sync params according to location (before/after forward) user - # specified as part of hook, if hook was specified. - buffer_hook_registered = hasattr(self, 'buffer_hook') - if self._check_sync_bufs_pre_fwd(): - self._sync_buffers() - - if self._join_config.enable: - # Notify joined ranks whether they should sync in backwards pass or not. - self._check_global_requires_backward_grad_sync(is_joined_rank=False) - - inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids) - if self.module.training: - output = self.module.training_step(*inputs[0], **kwargs[0]) - elif self.module.testing: - output = self.module.test_step(*inputs[0], **kwargs[0]) - else: - output = self.module.validation_step(*inputs[0], **kwargs[0]) - - # sync params according to location (before/after forward) user - # specified as part of hook, if hook was specified. - if self._check_sync_bufs_post_fwd(): - self._sync_buffers() - - if torch.is_grad_enabled() and self.require_backward_grad_sync: - self.require_forward_param_sync = True - # We'll return the output object verbatim since it is a freeform - # object. We need to find any tensors in this object, though, - # because we need to figure out which parameters were used during - # this forward pass, to ensure we short circuit reduction for any - # unused parameters. Only if `find_unused_parameters` is set. - if self.find_unused_parameters and not self.static_graph: - # Do not need to populate this for static graph. - self.reducer.prepare_for_backward(list(_find_tensors(output))) - else: - self.reducer.prepare_for_backward([]) - else: - self.require_forward_param_sync = False - - # TODO: DDPSink is currently enabled for unused parameter detection and - # static graph training for first iteration. - if (self.find_unused_parameters and not self.static_graph) or ( - self.static_graph and self.num_iterations == 1 - ): - state_dict = { - 'static_graph': self.static_graph, - 'num_iterations': self.num_iterations, - } - - output_tensor_list, treespec, output_is_rref = _tree_flatten_with_rref( - output - ) - output_placeholders = [None for _ in range(len(output_tensor_list))] - # Do not touch tensors that have no grad_fn, which can cause issues - # such as https://github.com/pytorch/pytorch/issues/60733 - for i, output in enumerate(output_tensor_list): - if torch.is_tensor(output) and output.grad_fn is None: - output_placeholders[i] = output - - # When find_unused_parameters=True, makes tensors which require grad - # run through the DDPSink backward pass. When not all outputs are - # used in loss, this makes those corresponding tensors receive - # undefined gradient which the reducer then handles to ensure - # param.grad field is not touched and we don't error out. - passthrough_tensor_list = _DDPSink.apply( - self.reducer, - state_dict, - *output_tensor_list, - ) - for i in range(len(output_placeholders)): - if output_placeholders[i] is None: - output_placeholders[i] = passthrough_tensor_list[i] - - # Reconstruct output data structure. - output = _tree_unflatten_with_rref( - output_placeholders, treespec, output_is_rref - ) - return output diff --git a/spaces/NSect/VALL-E-X/data/__init__.py b/spaces/NSect/VALL-E-X/data/__init__.py deleted file mode 100644 index 68f9defe677e03da5224c42cb28932f2e7f75ada..0000000000000000000000000000000000000000 --- a/spaces/NSect/VALL-E-X/data/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .collation import * diff --git a/spaces/NohTow/Llama2_watermarking/app.py b/spaces/NohTow/Llama2_watermarking/app.py deleted file mode 100644 index 36538b10e8da9e894c59e784895fd29b7a31a9ef..0000000000000000000000000000000000000000 --- a/spaces/NohTow/Llama2_watermarking/app.py +++ /dev/null @@ -1,98 +0,0 @@ -import torch -import argparse -import os -import numpy as np - -from watermark import Watermarker -import time -import gradio as gr -from transformers import AutoModelForCausalLM, AutoTokenizer - -hf_token = os.getenv('HF_TOKEN') - -device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') - -parser = argparse.ArgumentParser(description='Generative Text Watermarking demo') -parser.add_argument('--model', '-m', type=str, default="meta-llama/Llama-2-7b-chat-hf", help='Language model') -parser.add_argument('--key', '-k', type=int, default=42, - help='The seed of the pseudo random number generator') - -args = parser.parse_args() - -USERS = ['Alice', 'Bob', 'Charlie', 'Dan'] -EMBED_METHODS = [ 'aaronson', 'kirchenbauer', 'sampling', 'greedy' ] -DETECT_METHODS = [ 'aaronson', 'aaronson_simplified', 'aaronson_neyman_pearson', 'kirchenbauer'] -PAYLOAD_BITS = 2 -device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu') - - -model = AutoModelForCausalLM.from_pretrained(args.model, use_auth_token=hf_token, torch_dtype=torch.float16, - device_map='auto').to(device) -tokenizer = AutoTokenizer.from_pretrained(args.model, use_auth_token=hf_token) -tokenizer.pad_token = tokenizer.eos_token - -DEFAULT_SYSTEM_PROMPT = """\ -You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. -If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\ -""" -LEN_DEFAULT_PROMPT = len(tokenizer.encode(DEFAULT_SYSTEM_PROMPT)) -def embed(user, max_length, window_size, method, prompt): - - uid = USERS.index(user) - watermarker = Watermarker(tokenizer=tokenizer, model=model, window_size=window_size, payload_bits=PAYLOAD_BITS) - prompt = get_prompt(prompt) - watermarked_texts = watermarker.embed(key=args.key, messages=[ uid ], - max_length=max_length+LEN_DEFAULT_PROMPT, method=method, prompt=prompt) - return watermarked_texts[0].split("[/INST]")[1] - -def detect(attacked_text, window_size, method, prompt): - watermarker = Watermarker(tokenizer=tokenizer, model=model, window_size=window_size, payload_bits=PAYLOAD_BITS) - prompt = get_prompt(prompt) - print([ prompt + attacked_text ]) - pvalues, messages = watermarker.detect([ prompt + attacked_text ], key=args.key, method=method, prompts=[prompt]) - - print("messages: ", messages) - print("p-values: ", pvalues) - user = USERS[messages[0]] - pf = pvalues[0] - label = 'The user detected is {:s} with pvalue of {:.3e}'.format(user, pf) - - return label - -def get_prompt(message: str) -> str: - # texts = [f'[INST] <>\n{DEFAULT_SYSTEM_PROMPT}\n<>\n\n'] - # The first user input is _not_ stripped - # texts.append(f'{message} [/INST]') - # return ''.join(texts) - return f"[INST] {message} [/INST]" - - -with gr.Blocks() as demo: - gr.Markdown("""# LLM generation watermarking - This spaces let you to try different watermarking scheme for LLM generation.\n - It leverages the upgrades introduced in the paper, reducing the gap between empirical and theoretical false positive detection rate and give the ability to embed a message (of n bits). Here we use this capacity to embed the identity of the user generating the text, but it could also be used to identify different version of a model or just convey a secret message.\n - Simply select an user name, set the maximum text length, the watermarking window size and the prompt. Aaronson and Kirchenbauer watermarking scheme are proposed, along traditional sampling and greedy search without watermarking.\n - Once the text is generated, you can eventually apply some attacks to it (e.g, remove words), select the associated detection method and run the detection. Please note that the detection is non-blind, and require the original prompt to be known and so left untouched.\n - For Aaronson, the original detection function, along the Neyman-Pearson and Simplified Score version are available.""") - with gr.Row(): - user = gr.Dropdown(choices=USERS, value=USERS[0], label="User") - text_length = gr.Number(minimum=1, maximum=512, value=256, step=1, precision=0, label="Max text length") - window_size = gr.Number(minimum=0, maximum=10, value=0, step=1, precision=0, label="Watermarking window size") - embed_method = gr.Dropdown(choices=EMBED_METHODS, value=EMBED_METHODS[0], label="Sampling method") - prompt = gr.Textbox(label="prompt") - with gr.Row(): - btn1 = gr.Button("Embed") - with gr.Row(): - watermarked_text = gr.Textbox(label="Generated text") - detect_method = gr.Dropdown(choices=DETECT_METHODS, value=DETECT_METHODS[0], label="Detection method") - with gr.Row(): - btn2 = gr.Button("Detect") - with gr.Row(): - detection_label = gr.Label(label="Detection result") - - btn1.click(fn=embed, inputs=[user, text_length, window_size, embed_method, prompt], outputs=[watermarked_text], api_name="watermark") - btn2.click(fn=detect, inputs=[watermarked_text, window_size, detect_method, prompt], outputs=[detection_label], api_name="detect") - - demo.launch() - - diff --git a/spaces/OAOA/DifFace/basicsr/data/video_test_dataset.py b/spaces/OAOA/DifFace/basicsr/data/video_test_dataset.py deleted file mode 100644 index 929f7d97472a0eb810e33e694d5362a6749ab4b6..0000000000000000000000000000000000000000 --- a/spaces/OAOA/DifFace/basicsr/data/video_test_dataset.py +++ /dev/null @@ -1,283 +0,0 @@ -import glob -import torch -from os import path as osp -from torch.utils import data as data - -from basicsr.data.data_util import duf_downsample, generate_frame_indices, read_img_seq -from basicsr.utils import get_root_logger, scandir -from basicsr.utils.registry import DATASET_REGISTRY - - -@DATASET_REGISTRY.register() -class VideoTestDataset(data.Dataset): - """Video test dataset. - - Supported datasets: Vid4, REDS4, REDSofficial. - More generally, it supports testing dataset with following structures: - - :: - - dataroot - ├── subfolder1 - ├── frame000 - ├── frame001 - ├── ... - ├── subfolder2 - ├── frame000 - ├── frame001 - ├── ... - ├── ... - - For testing datasets, there is no need to prepare LMDB files. - - Args: - opt (dict): Config for train dataset. It contains the following keys: - dataroot_gt (str): Data root path for gt. - dataroot_lq (str): Data root path for lq. - io_backend (dict): IO backend type and other kwarg. - cache_data (bool): Whether to cache testing datasets. - name (str): Dataset name. - meta_info_file (str): The path to the file storing the list of test folders. If not provided, all the folders - in the dataroot will be used. - num_frame (int): Window size for input frames. - padding (str): Padding mode. - """ - - def __init__(self, opt): - super(VideoTestDataset, self).__init__() - self.opt = opt - self.cache_data = opt['cache_data'] - self.gt_root, self.lq_root = opt['dataroot_gt'], opt['dataroot_lq'] - self.data_info = {'lq_path': [], 'gt_path': [], 'folder': [], 'idx': [], 'border': []} - # file client (io backend) - self.file_client = None - self.io_backend_opt = opt['io_backend'] - assert self.io_backend_opt['type'] != 'lmdb', 'No need to use lmdb during validation/test.' - - logger = get_root_logger() - logger.info(f'Generate data info for VideoTestDataset - {opt["name"]}') - self.imgs_lq, self.imgs_gt = {}, {} - if 'meta_info_file' in opt: - with open(opt['meta_info_file'], 'r') as fin: - subfolders = [line.split(' ')[0] for line in fin] - subfolders_lq = [osp.join(self.lq_root, key) for key in subfolders] - subfolders_gt = [osp.join(self.gt_root, key) for key in subfolders] - else: - subfolders_lq = sorted(glob.glob(osp.join(self.lq_root, '*'))) - subfolders_gt = sorted(glob.glob(osp.join(self.gt_root, '*'))) - - if opt['name'].lower() in ['vid4', 'reds4', 'redsofficial']: - for subfolder_lq, subfolder_gt in zip(subfolders_lq, subfolders_gt): - # get frame list for lq and gt - subfolder_name = osp.basename(subfolder_lq) - img_paths_lq = sorted(list(scandir(subfolder_lq, full_path=True))) - img_paths_gt = sorted(list(scandir(subfolder_gt, full_path=True))) - - max_idx = len(img_paths_lq) - assert max_idx == len(img_paths_gt), (f'Different number of images in lq ({max_idx})' - f' and gt folders ({len(img_paths_gt)})') - - self.data_info['lq_path'].extend(img_paths_lq) - self.data_info['gt_path'].extend(img_paths_gt) - self.data_info['folder'].extend([subfolder_name] * max_idx) - for i in range(max_idx): - self.data_info['idx'].append(f'{i}/{max_idx}') - border_l = [0] * max_idx - for i in range(self.opt['num_frame'] // 2): - border_l[i] = 1 - border_l[max_idx - i - 1] = 1 - self.data_info['border'].extend(border_l) - - # cache data or save the frame list - if self.cache_data: - logger.info(f'Cache {subfolder_name} for VideoTestDataset...') - self.imgs_lq[subfolder_name] = read_img_seq(img_paths_lq) - self.imgs_gt[subfolder_name] = read_img_seq(img_paths_gt) - else: - self.imgs_lq[subfolder_name] = img_paths_lq - self.imgs_gt[subfolder_name] = img_paths_gt - else: - raise ValueError(f'Non-supported video test dataset: {type(opt["name"])}') - - def __getitem__(self, index): - folder = self.data_info['folder'][index] - idx, max_idx = self.data_info['idx'][index].split('/') - idx, max_idx = int(idx), int(max_idx) - border = self.data_info['border'][index] - lq_path = self.data_info['lq_path'][index] - - select_idx = generate_frame_indices(idx, max_idx, self.opt['num_frame'], padding=self.opt['padding']) - - if self.cache_data: - imgs_lq = self.imgs_lq[folder].index_select(0, torch.LongTensor(select_idx)) - img_gt = self.imgs_gt[folder][idx] - else: - img_paths_lq = [self.imgs_lq[folder][i] for i in select_idx] - imgs_lq = read_img_seq(img_paths_lq) - img_gt = read_img_seq([self.imgs_gt[folder][idx]]) - img_gt.squeeze_(0) - - return { - 'lq': imgs_lq, # (t, c, h, w) - 'gt': img_gt, # (c, h, w) - 'folder': folder, # folder name - 'idx': self.data_info['idx'][index], # e.g., 0/99 - 'border': border, # 1 for border, 0 for non-border - 'lq_path': lq_path # center frame - } - - def __len__(self): - return len(self.data_info['gt_path']) - - -@DATASET_REGISTRY.register() -class VideoTestVimeo90KDataset(data.Dataset): - """Video test dataset for Vimeo90k-Test dataset. - - It only keeps the center frame for testing. - For testing datasets, there is no need to prepare LMDB files. - - Args: - opt (dict): Config for train dataset. It contains the following keys: - dataroot_gt (str): Data root path for gt. - dataroot_lq (str): Data root path for lq. - io_backend (dict): IO backend type and other kwarg. - cache_data (bool): Whether to cache testing datasets. - name (str): Dataset name. - meta_info_file (str): The path to the file storing the list of test folders. If not provided, all the folders - in the dataroot will be used. - num_frame (int): Window size for input frames. - padding (str): Padding mode. - """ - - def __init__(self, opt): - super(VideoTestVimeo90KDataset, self).__init__() - self.opt = opt - self.cache_data = opt['cache_data'] - if self.cache_data: - raise NotImplementedError('cache_data in Vimeo90K-Test dataset is not implemented.') - self.gt_root, self.lq_root = opt['dataroot_gt'], opt['dataroot_lq'] - self.data_info = {'lq_path': [], 'gt_path': [], 'folder': [], 'idx': [], 'border': []} - neighbor_list = [i + (9 - opt['num_frame']) // 2 for i in range(opt['num_frame'])] - - # file client (io backend) - self.file_client = None - self.io_backend_opt = opt['io_backend'] - assert self.io_backend_opt['type'] != 'lmdb', 'No need to use lmdb during validation/test.' - - logger = get_root_logger() - logger.info(f'Generate data info for VideoTestDataset - {opt["name"]}') - with open(opt['meta_info_file'], 'r') as fin: - subfolders = [line.split(' ')[0] for line in fin] - for idx, subfolder in enumerate(subfolders): - gt_path = osp.join(self.gt_root, subfolder, 'im4.png') - self.data_info['gt_path'].append(gt_path) - lq_paths = [osp.join(self.lq_root, subfolder, f'im{i}.png') for i in neighbor_list] - self.data_info['lq_path'].append(lq_paths) - self.data_info['folder'].append('vimeo90k') - self.data_info['idx'].append(f'{idx}/{len(subfolders)}') - self.data_info['border'].append(0) - - def __getitem__(self, index): - lq_path = self.data_info['lq_path'][index] - gt_path = self.data_info['gt_path'][index] - imgs_lq = read_img_seq(lq_path) - img_gt = read_img_seq([gt_path]) - img_gt.squeeze_(0) - - return { - 'lq': imgs_lq, # (t, c, h, w) - 'gt': img_gt, # (c, h, w) - 'folder': self.data_info['folder'][index], # folder name - 'idx': self.data_info['idx'][index], # e.g., 0/843 - 'border': self.data_info['border'][index], # 0 for non-border - 'lq_path': lq_path[self.opt['num_frame'] // 2] # center frame - } - - def __len__(self): - return len(self.data_info['gt_path']) - - -@DATASET_REGISTRY.register() -class VideoTestDUFDataset(VideoTestDataset): - """ Video test dataset for DUF dataset. - - Args: - opt (dict): Config for train dataset. Most of keys are the same as VideoTestDataset. - It has the following extra keys: - use_duf_downsampling (bool): Whether to use duf downsampling to generate low-resolution frames. - scale (bool): Scale, which will be added automatically. - """ - - def __getitem__(self, index): - folder = self.data_info['folder'][index] - idx, max_idx = self.data_info['idx'][index].split('/') - idx, max_idx = int(idx), int(max_idx) - border = self.data_info['border'][index] - lq_path = self.data_info['lq_path'][index] - - select_idx = generate_frame_indices(idx, max_idx, self.opt['num_frame'], padding=self.opt['padding']) - - if self.cache_data: - if self.opt['use_duf_downsampling']: - # read imgs_gt to generate low-resolution frames - imgs_lq = self.imgs_gt[folder].index_select(0, torch.LongTensor(select_idx)) - imgs_lq = duf_downsample(imgs_lq, kernel_size=13, scale=self.opt['scale']) - else: - imgs_lq = self.imgs_lq[folder].index_select(0, torch.LongTensor(select_idx)) - img_gt = self.imgs_gt[folder][idx] - else: - if self.opt['use_duf_downsampling']: - img_paths_lq = [self.imgs_gt[folder][i] for i in select_idx] - # read imgs_gt to generate low-resolution frames - imgs_lq = read_img_seq(img_paths_lq, require_mod_crop=True, scale=self.opt['scale']) - imgs_lq = duf_downsample(imgs_lq, kernel_size=13, scale=self.opt['scale']) - else: - img_paths_lq = [self.imgs_lq[folder][i] for i in select_idx] - imgs_lq = read_img_seq(img_paths_lq) - img_gt = read_img_seq([self.imgs_gt[folder][idx]], require_mod_crop=True, scale=self.opt['scale']) - img_gt.squeeze_(0) - - return { - 'lq': imgs_lq, # (t, c, h, w) - 'gt': img_gt, # (c, h, w) - 'folder': folder, # folder name - 'idx': self.data_info['idx'][index], # e.g., 0/99 - 'border': border, # 1 for border, 0 for non-border - 'lq_path': lq_path # center frame - } - - -@DATASET_REGISTRY.register() -class VideoRecurrentTestDataset(VideoTestDataset): - """Video test dataset for recurrent architectures, which takes LR video - frames as input and output corresponding HR video frames. - - Args: - opt (dict): Same as VideoTestDataset. Unused opt: - padding (str): Padding mode. - - """ - - def __init__(self, opt): - super(VideoRecurrentTestDataset, self).__init__(opt) - # Find unique folder strings - self.folders = sorted(list(set(self.data_info['folder']))) - - def __getitem__(self, index): - folder = self.folders[index] - - if self.cache_data: - imgs_lq = self.imgs_lq[folder] - imgs_gt = self.imgs_gt[folder] - else: - raise NotImplementedError('Without cache_data is not implemented.') - - return { - 'lq': imgs_lq, - 'gt': imgs_gt, - 'folder': folder, - } - - def __len__(self): - return len(self.folders) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/multilingual/data_scripts/download_af_xh.sh b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/multilingual/data_scripts/download_af_xh.sh deleted file mode 100644 index a78fbbbbccb6f6ae005a1f03b97f083a2d958ebe..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/multilingual/data_scripts/download_af_xh.sh +++ /dev/null @@ -1,164 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# set -x -e - -if [ -z $WORKDIR_ROOT ] ; -then - echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..." - exit -fi - - -# put intermediate files -TMP_DIR=$WORKDIR_ROOT/temp/af_xhv2 -# output {train,valid,test} files to dest -DEST=${WORKDIR_ROOT}/ML50/raw - - - -ROOT=${WORKDIR_ROOT} -UTILS=$PWD/utils -TMX2CORPUS="${UTILS}/tmx2corpus" -TMX_TOOL="python ${TMX2CORPUS}/tmx2corpus.py" - -mkdir -p $TMP_DIR -mkdir -p $DEST -mkdir -p $UTILS - -function download_opus(){ - src=$1 - tgt=$2 - subset=$3 - ulr=$4 - - mkdir extract_$subset.$src-$tgt - pushd extract_$subset.$src-$tgt - if [ ! -f "$subset.$src-$tgt.tmx.gz" ]; then - wget $url -O "$subset.$src-$tgt.tmx.gz" - gzip -d "$subset.$src-$tgt.tmx.gz" - f=$subset.$src-$tgt.tmx - $TMX_TOOL $f - mv bitext.$src ../$subset.$src-$tgt.$src - mv bitext.$tgt ../$subset.$src-$tgt.$tgt - fi - popd -} - -function concat_subsets(){ - src=$1 - tgt=$2 - subsets=$3 - src_train=raw_train.$src-$tgt.$src - tgt_train=raw_train.$src-$tgt.$tgt - > $src_train - > $tgt_train - for subset in $subsets; do - cat $subset.$src-$tgt.$src >> $src_train - cat $subset.$src-$tgt.$tgt >> $tgt_train - done -} - - - -function get_seeded_random() -{ - seed="$1" - openssl enc -aes-256-ctr -pass pass:"$seed" -nosalt \ - /dev/null -} - -function split_train_valid(){ - src=$1 - tgt=$2 - raw_src_train=raw_train.$src-$tgt.$src - raw_tgt_train=raw_train.$src-$tgt.$tgt - - shuf --random-source=<(get_seeded_random 43) $raw_src_train > shuffled.$src-$tgt.$src - shuf --random-source=<(get_seeded_random 43) $raw_tgt_train > shuffled.$src-$tgt.$tgt - - head -n 1500 shuffled.$src-$tgt.$src > valid.$src-$tgt.$src - head -n 1500 shuffled.$src-$tgt.$tgt > valid.$src-$tgt.$tgt - - tail +1501 shuffled.$src-$tgt.$src > train.$src-$tgt.$src - tail +1501 shuffled.$src-$tgt.$tgt > train.$src-$tgt.$tgt -} - -function copy2dst(){ - lsrc=$1 - ltgt=$2 - src=${lsrc:0:2} - tgt=${ltgt:0:2} - - - cp valid.$src-$tgt.$src $DEST/valid.$lsrc-$ltgt.$lsrc - cp valid.$src-$tgt.$tgt $DEST/valid.$lsrc-$ltgt.$ltgt - - cp train.$src-$tgt.$src $DEST/train.$lsrc-$ltgt.$lsrc - cp train.$src-$tgt.$tgt $DEST/train.$lsrc-$ltgt.$ltgt -} - - - - -#for xh-en -declare -A xh_en_urls -xh_en_urls=( - [Tatoeba]=https://object.pouta.csc.fi/OPUS-Tatoeba/v20190709/tmx/en-xh.tmx.gz - [wikimedia]=https://object.pouta.csc.fi/OPUS-wikimedia/v20190628/tmx/en-xh.tmx.gz - [memat]=https://object.pouta.csc.fi/OPUS-memat/v1/tmx/en-xh.tmx.gz - [uedin]=https://object.pouta.csc.fi/OPUS-bible-uedin/v1/tmx/en-xh.tmx.gz - [GNOME]=https://object.pouta.csc.fi/OPUS-GNOME/v1/tmx/en-xh.tmx.gz - [XhosaNavy]=https://object.pouta.csc.fi/OPUS-XhosaNavy/v1/tmx/en-xh.tmx.gz - [KDE4]=https://object.pouta.csc.fi/OPUS-KDE4/v2/tmx/en-xh.tmx.gz - [Ubuntu]=https://object.pouta.csc.fi/OPUS-Ubuntu/v14.10/tmx/en-xh.tmx.gz -) - -mkdir $TMP_DIR/xh-en -pushd $TMP_DIR/xh-en -for k in "${!xh_en_urls[@]}" -do - name=$k - url=${xh_en_urls[$k]} - echo "$name: $url" - download_opus xh en $name $ulr -done -concat_subsets xh en "${!xh_en_urls[@]}" -split_train_valid xh en -copy2dst xh_ZA en_XX -popd - - -## -#for af-en -declare -A af_en_urls -af_en_urls=( - [Tatoeba]=https://object.pouta.csc.fi/OPUS-Tatoeba/v20190709/tmx/af-en.tmx.gz - [uedin]=https://object.pouta.csc.fi/OPUS-bible-uedin/v1/tmx/af-en.tmx.gz - [GNOME]=https://object.pouta.csc.fi/OPUS-GNOME/v1/tmx/af-en.tmx.gz - [QED]=https://object.pouta.csc.fi/OPUS-QED/v2.0a/tmx/af-en.tmx.gz - [KDE4]=https://object.pouta.csc.fi/OPUS-KDE4/v2/tmx/af-en.tmx.gz - [OpenSubtitles]=https://object.pouta.csc.fi/OPUS-OpenSubtitles/v2018/tmx/af-en.tmx.gz - [SPC]=https://object.pouta.csc.fi/OPUS-SPC/v1/tmx/af-en.tmx.gz - [Ubuntu]=https://object.pouta.csc.fi/OPUS-Ubuntu/v14.10/tmx/af-en.tmx.gz -) - -mkdir $TMP_DIR/af-en -pushd $TMP_DIR/af-en -for k in "${!af_en_urls[@]}" -do - name=$k - url=${af_en_urls[$k]} - echo "$name: $url" - download_opus af en $name $ulr -done -concat_subsets af en "${!af_en_urls[@]}" -split_train_valid af en -copy2dst af_ZA en_XX -popd - - diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/speech_recognition/models/vggtransformer.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/speech_recognition/models/vggtransformer.py deleted file mode 100644 index bca0ae59a8cbe2b7c337e395021c883a61d101ee..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/speech_recognition/models/vggtransformer.py +++ /dev/null @@ -1,1020 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import math -from collections.abc import Iterable - -import torch -import torch.nn as nn -from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask -from fairseq import utils -from fairseq.models import ( - FairseqEncoder, - FairseqEncoderDecoderModel, - FairseqEncoderModel, - FairseqIncrementalDecoder, - register_model, - register_model_architecture, -) -from fairseq.modules import ( - LinearizedConvolution, - TransformerDecoderLayer, - TransformerEncoderLayer, - VGGBlock, -) - - -@register_model("asr_vggtransformer") -class VGGTransformerModel(FairseqEncoderDecoderModel): - """ - Transformers with convolutional context for ASR - https://arxiv.org/abs/1904.11660 - """ - - def __init__(self, encoder, decoder): - super().__init__(encoder, decoder) - - @staticmethod - def add_args(parser): - """Add model-specific arguments to the parser.""" - parser.add_argument( - "--input-feat-per-channel", - type=int, - metavar="N", - help="encoder input dimension per input channel", - ) - parser.add_argument( - "--vggblock-enc-config", - type=str, - metavar="EXPR", - help=""" - an array of tuples each containing the configuration of one vggblock: - [(out_channels, - conv_kernel_size, - pooling_kernel_size, - num_conv_layers, - use_layer_norm), ...]) - """, - ) - parser.add_argument( - "--transformer-enc-config", - type=str, - metavar="EXPR", - help="""" - a tuple containing the configuration of the encoder transformer layers - configurations: - [(input_dim, - num_heads, - ffn_dim, - normalize_before, - dropout, - attention_dropout, - relu_dropout), ...]') - """, - ) - parser.add_argument( - "--enc-output-dim", - type=int, - metavar="N", - help=""" - encoder output dimension, can be None. If specified, projecting the - transformer output to the specified dimension""", - ) - parser.add_argument( - "--in-channels", - type=int, - metavar="N", - help="number of encoder input channels", - ) - parser.add_argument( - "--tgt-embed-dim", - type=int, - metavar="N", - help="embedding dimension of the decoder target tokens", - ) - parser.add_argument( - "--transformer-dec-config", - type=str, - metavar="EXPR", - help=""" - a tuple containing the configuration of the decoder transformer layers - configurations: - [(input_dim, - num_heads, - ffn_dim, - normalize_before, - dropout, - attention_dropout, - relu_dropout), ...] - """, - ) - parser.add_argument( - "--conv-dec-config", - type=str, - metavar="EXPR", - help=""" - an array of tuples for the decoder 1-D convolution config - [(out_channels, conv_kernel_size, use_layer_norm), ...]""", - ) - - @classmethod - def build_encoder(cls, args, task): - return VGGTransformerEncoder( - input_feat_per_channel=args.input_feat_per_channel, - vggblock_config=eval(args.vggblock_enc_config), - transformer_config=eval(args.transformer_enc_config), - encoder_output_dim=args.enc_output_dim, - in_channels=args.in_channels, - ) - - @classmethod - def build_decoder(cls, args, task): - return TransformerDecoder( - dictionary=task.target_dictionary, - embed_dim=args.tgt_embed_dim, - transformer_config=eval(args.transformer_dec_config), - conv_config=eval(args.conv_dec_config), - encoder_output_dim=args.enc_output_dim, - ) - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - # make sure that all args are properly defaulted - # (in case there are any new ones) - base_architecture(args) - - encoder = cls.build_encoder(args, task) - decoder = cls.build_decoder(args, task) - return cls(encoder, decoder) - - def get_normalized_probs(self, net_output, log_probs, sample=None): - # net_output['encoder_out'] is a (B, T, D) tensor - lprobs = super().get_normalized_probs(net_output, log_probs, sample) - lprobs.batch_first = True - return lprobs - - -DEFAULT_ENC_VGGBLOCK_CONFIG = ((32, 3, 2, 2, False),) * 2 -DEFAULT_ENC_TRANSFORMER_CONFIG = ((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2 -# 256: embedding dimension -# 4: number of heads -# 1024: FFN -# True: apply layerNorm before (dropout + resiaul) instead of after -# 0.2 (dropout): dropout after MultiheadAttention and second FC -# 0.2 (attention_dropout): dropout in MultiheadAttention -# 0.2 (relu_dropout): dropout after ReLu -DEFAULT_DEC_TRANSFORMER_CONFIG = ((256, 2, 1024, True, 0.2, 0.2, 0.2),) * 2 -DEFAULT_DEC_CONV_CONFIG = ((256, 3, True),) * 2 - - -# TODO: repace transformer encoder config from one liner -# to explicit args to get rid of this transformation -def prepare_transformer_encoder_params( - input_dim, - num_heads, - ffn_dim, - normalize_before, - dropout, - attention_dropout, - relu_dropout, -): - args = argparse.Namespace() - args.encoder_embed_dim = input_dim - args.encoder_attention_heads = num_heads - args.attention_dropout = attention_dropout - args.dropout = dropout - args.activation_dropout = relu_dropout - args.encoder_normalize_before = normalize_before - args.encoder_ffn_embed_dim = ffn_dim - return args - - -def prepare_transformer_decoder_params( - input_dim, - num_heads, - ffn_dim, - normalize_before, - dropout, - attention_dropout, - relu_dropout, -): - args = argparse.Namespace() - args.encoder_embed_dim = None - args.decoder_embed_dim = input_dim - args.decoder_attention_heads = num_heads - args.attention_dropout = attention_dropout - args.dropout = dropout - args.activation_dropout = relu_dropout - args.decoder_normalize_before = normalize_before - args.decoder_ffn_embed_dim = ffn_dim - return args - - -class VGGTransformerEncoder(FairseqEncoder): - """VGG + Transformer encoder""" - - def __init__( - self, - input_feat_per_channel, - vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG, - transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG, - encoder_output_dim=512, - in_channels=1, - transformer_context=None, - transformer_sampling=None, - ): - """constructor for VGGTransformerEncoder - - Args: - - input_feat_per_channel: feature dim (not including stacked, - just base feature) - - in_channel: # input channels (e.g., if stack 8 feature vector - together, this is 8) - - vggblock_config: configuration of vggblock, see comments on - DEFAULT_ENC_VGGBLOCK_CONFIG - - transformer_config: configuration of transformer layer, see comments - on DEFAULT_ENC_TRANSFORMER_CONFIG - - encoder_output_dim: final transformer output embedding dimension - - transformer_context: (left, right) if set, self-attention will be focused - on (t-left, t+right) - - transformer_sampling: an iterable of int, must match with - len(transformer_config), transformer_sampling[i] indicates sampling - factor for i-th transformer layer, after multihead att and feedfoward - part - """ - super().__init__(None) - - self.num_vggblocks = 0 - if vggblock_config is not None: - if not isinstance(vggblock_config, Iterable): - raise ValueError("vggblock_config is not iterable") - self.num_vggblocks = len(vggblock_config) - - self.conv_layers = nn.ModuleList() - self.in_channels = in_channels - self.input_dim = input_feat_per_channel - self.pooling_kernel_sizes = [] - - if vggblock_config is not None: - for _, config in enumerate(vggblock_config): - ( - out_channels, - conv_kernel_size, - pooling_kernel_size, - num_conv_layers, - layer_norm, - ) = config - self.conv_layers.append( - VGGBlock( - in_channels, - out_channels, - conv_kernel_size, - pooling_kernel_size, - num_conv_layers, - input_dim=input_feat_per_channel, - layer_norm=layer_norm, - ) - ) - self.pooling_kernel_sizes.append(pooling_kernel_size) - in_channels = out_channels - input_feat_per_channel = self.conv_layers[-1].output_dim - - transformer_input_dim = self.infer_conv_output_dim( - self.in_channels, self.input_dim - ) - # transformer_input_dim is the output dimension of VGG part - - self.validate_transformer_config(transformer_config) - self.transformer_context = self.parse_transformer_context(transformer_context) - self.transformer_sampling = self.parse_transformer_sampling( - transformer_sampling, len(transformer_config) - ) - - self.transformer_layers = nn.ModuleList() - - if transformer_input_dim != transformer_config[0][0]: - self.transformer_layers.append( - Linear(transformer_input_dim, transformer_config[0][0]) - ) - self.transformer_layers.append( - TransformerEncoderLayer( - prepare_transformer_encoder_params(*transformer_config[0]) - ) - ) - - for i in range(1, len(transformer_config)): - if transformer_config[i - 1][0] != transformer_config[i][0]: - self.transformer_layers.append( - Linear(transformer_config[i - 1][0], transformer_config[i][0]) - ) - self.transformer_layers.append( - TransformerEncoderLayer( - prepare_transformer_encoder_params(*transformer_config[i]) - ) - ) - - self.encoder_output_dim = encoder_output_dim - self.transformer_layers.extend( - [ - Linear(transformer_config[-1][0], encoder_output_dim), - LayerNorm(encoder_output_dim), - ] - ) - - def forward(self, src_tokens, src_lengths, **kwargs): - """ - src_tokens: padded tensor (B, T, C * feat) - src_lengths: tensor of original lengths of input utterances (B,) - """ - bsz, max_seq_len, _ = src_tokens.size() - x = src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim) - x = x.transpose(1, 2).contiguous() - # (B, C, T, feat) - - for layer_idx in range(len(self.conv_layers)): - x = self.conv_layers[layer_idx](x) - - bsz, _, output_seq_len, _ = x.size() - - # (B, C, T, feat) -> (B, T, C, feat) -> (T, B, C, feat) -> (T, B, C * feat) - x = x.transpose(1, 2).transpose(0, 1) - x = x.contiguous().view(output_seq_len, bsz, -1) - - input_lengths = src_lengths.clone() - for s in self.pooling_kernel_sizes: - input_lengths = (input_lengths.float() / s).ceil().long() - - encoder_padding_mask, _ = lengths_to_encoder_padding_mask( - input_lengths, batch_first=True - ) - if not encoder_padding_mask.any(): - encoder_padding_mask = None - - subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5) - attn_mask = self.lengths_to_attn_mask(input_lengths, subsampling_factor) - - transformer_layer_idx = 0 - - for layer_idx in range(len(self.transformer_layers)): - - if isinstance(self.transformer_layers[layer_idx], TransformerEncoderLayer): - x = self.transformer_layers[layer_idx]( - x, encoder_padding_mask, attn_mask - ) - - if self.transformer_sampling[transformer_layer_idx] != 1: - sampling_factor = self.transformer_sampling[transformer_layer_idx] - x, encoder_padding_mask, attn_mask = self.slice( - x, encoder_padding_mask, attn_mask, sampling_factor - ) - - transformer_layer_idx += 1 - - else: - x = self.transformer_layers[layer_idx](x) - - # encoder_padding_maks is a (T x B) tensor, its [t, b] elements indicate - # whether encoder_output[t, b] is valid or not (valid=0, invalid=1) - - return { - "encoder_out": x, # (T, B, C) - "encoder_padding_mask": encoder_padding_mask.t() - if encoder_padding_mask is not None - else None, - # (B, T) --> (T, B) - } - - def infer_conv_output_dim(self, in_channels, input_dim): - sample_seq_len = 200 - sample_bsz = 10 - x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim) - for i, _ in enumerate(self.conv_layers): - x = self.conv_layers[i](x) - x = x.transpose(1, 2) - mb, seq = x.size()[:2] - return x.contiguous().view(mb, seq, -1).size(-1) - - def validate_transformer_config(self, transformer_config): - for config in transformer_config: - input_dim, num_heads = config[:2] - if input_dim % num_heads != 0: - msg = ( - "ERROR in transformer config {}: ".format(config) - + "input dimension {} ".format(input_dim) - + "not dividable by number of heads {}".format(num_heads) - ) - raise ValueError(msg) - - def parse_transformer_context(self, transformer_context): - """ - transformer_context can be the following: - - None; indicates no context is used, i.e., - transformer can access full context - - a tuple/list of two int; indicates left and right context, - any number <0 indicates infinite context - * e.g., (5, 6) indicates that for query at x_t, transformer can - access [t-5, t+6] (inclusive) - * e.g., (-1, 6) indicates that for query at x_t, transformer can - access [0, t+6] (inclusive) - """ - if transformer_context is None: - return None - - if not isinstance(transformer_context, Iterable): - raise ValueError("transformer context must be Iterable if it is not None") - - if len(transformer_context) != 2: - raise ValueError("transformer context must have length 2") - - left_context = transformer_context[0] - if left_context < 0: - left_context = None - - right_context = transformer_context[1] - if right_context < 0: - right_context = None - - if left_context is None and right_context is None: - return None - - return (left_context, right_context) - - def parse_transformer_sampling(self, transformer_sampling, num_layers): - """ - parsing transformer sampling configuration - - Args: - - transformer_sampling, accepted input: - * None, indicating no sampling - * an Iterable with int (>0) as element - - num_layers, expected number of transformer layers, must match with - the length of transformer_sampling if it is not None - - Returns: - - A tuple with length num_layers - """ - if transformer_sampling is None: - return (1,) * num_layers - - if not isinstance(transformer_sampling, Iterable): - raise ValueError( - "transformer_sampling must be an iterable if it is not None" - ) - - if len(transformer_sampling) != num_layers: - raise ValueError( - "transformer_sampling {} does not match with the number " - "of layers {}".format(transformer_sampling, num_layers) - ) - - for layer, value in enumerate(transformer_sampling): - if not isinstance(value, int): - raise ValueError("Invalid value in transformer_sampling: ") - if value < 1: - raise ValueError( - "{} layer's subsampling is {}.".format(layer, value) - + " This is not allowed! " - ) - return transformer_sampling - - def slice(self, embedding, padding_mask, attn_mask, sampling_factor): - """ - embedding is a (T, B, D) tensor - padding_mask is a (B, T) tensor or None - attn_mask is a (T, T) tensor or None - """ - embedding = embedding[::sampling_factor, :, :] - if padding_mask is not None: - padding_mask = padding_mask[:, ::sampling_factor] - if attn_mask is not None: - attn_mask = attn_mask[::sampling_factor, ::sampling_factor] - - return embedding, padding_mask, attn_mask - - def lengths_to_attn_mask(self, input_lengths, subsampling_factor=1): - """ - create attention mask according to sequence lengths and transformer - context - - Args: - - input_lengths: (B, )-shape Int/Long tensor; input_lengths[b] is - the length of b-th sequence - - subsampling_factor: int - * Note that the left_context and right_context is specified in - the input frame-level while input to transformer may already - go through subsampling (e.g., the use of striding in vggblock) - we use subsampling_factor to scale the left/right context - - Return: - - a (T, T) binary tensor or None, where T is max(input_lengths) - * if self.transformer_context is None, None - * if left_context is None, - * attn_mask[t, t + right_context + 1:] = 1 - * others = 0 - * if right_context is None, - * attn_mask[t, 0:t - left_context] = 1 - * others = 0 - * elsif - * attn_mask[t, t - left_context: t + right_context + 1] = 0 - * others = 1 - """ - if self.transformer_context is None: - return None - - maxT = torch.max(input_lengths).item() - attn_mask = torch.zeros(maxT, maxT) - - left_context = self.transformer_context[0] - right_context = self.transformer_context[1] - if left_context is not None: - left_context = math.ceil(self.transformer_context[0] / subsampling_factor) - if right_context is not None: - right_context = math.ceil(self.transformer_context[1] / subsampling_factor) - - for t in range(maxT): - if left_context is not None: - st = 0 - en = max(st, t - left_context) - attn_mask[t, st:en] = 1 - if right_context is not None: - st = t + right_context + 1 - st = min(st, maxT - 1) - attn_mask[t, st:] = 1 - - return attn_mask.to(input_lengths.device) - - def reorder_encoder_out(self, encoder_out, new_order): - encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( - 1, new_order - ) - if encoder_out["encoder_padding_mask"] is not None: - encoder_out["encoder_padding_mask"] = encoder_out[ - "encoder_padding_mask" - ].index_select(1, new_order) - return encoder_out - - -class TransformerDecoder(FairseqIncrementalDecoder): - """ - Transformer decoder consisting of *args.decoder_layers* layers. Each layer - is a :class:`TransformerDecoderLayer`. - Args: - args (argparse.Namespace): parsed command-line arguments - dictionary (~fairseq.data.Dictionary): decoding dictionary - embed_tokens (torch.nn.Embedding): output embedding - no_encoder_attn (bool, optional): whether to attend to encoder outputs. - Default: ``False`` - left_pad (bool, optional): whether the input is left-padded. Default: - ``False`` - """ - - def __init__( - self, - dictionary, - embed_dim=512, - transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG, - conv_config=DEFAULT_DEC_CONV_CONFIG, - encoder_output_dim=512, - ): - - super().__init__(dictionary) - vocab_size = len(dictionary) - self.padding_idx = dictionary.pad() - self.embed_tokens = Embedding(vocab_size, embed_dim, self.padding_idx) - - self.conv_layers = nn.ModuleList() - for i in range(len(conv_config)): - out_channels, kernel_size, layer_norm = conv_config[i] - if i == 0: - conv_layer = LinearizedConv1d( - embed_dim, out_channels, kernel_size, padding=kernel_size - 1 - ) - else: - conv_layer = LinearizedConv1d( - conv_config[i - 1][0], - out_channels, - kernel_size, - padding=kernel_size - 1, - ) - self.conv_layers.append(conv_layer) - if layer_norm: - self.conv_layers.append(nn.LayerNorm(out_channels)) - self.conv_layers.append(nn.ReLU()) - - self.layers = nn.ModuleList() - if conv_config[-1][0] != transformer_config[0][0]: - self.layers.append(Linear(conv_config[-1][0], transformer_config[0][0])) - self.layers.append( - TransformerDecoderLayer( - prepare_transformer_decoder_params(*transformer_config[0]) - ) - ) - - for i in range(1, len(transformer_config)): - if transformer_config[i - 1][0] != transformer_config[i][0]: - self.layers.append( - Linear(transformer_config[i - 1][0], transformer_config[i][0]) - ) - self.layers.append( - TransformerDecoderLayer( - prepare_transformer_decoder_params(*transformer_config[i]) - ) - ) - self.fc_out = Linear(transformer_config[-1][0], vocab_size) - - def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None): - """ - Args: - prev_output_tokens (LongTensor): previous decoder outputs of shape - `(batch, tgt_len)`, for input feeding/teacher forcing - encoder_out (Tensor, optional): output from the encoder, used for - encoder-side attention - incremental_state (dict): dictionary used for storing state during - :ref:`Incremental decoding` - Returns: - tuple: - - the last decoder layer's output of shape `(batch, tgt_len, - vocab)` - - the last decoder layer's attention weights of shape `(batch, - tgt_len, src_len)` - """ - target_padding_mask = ( - (prev_output_tokens == self.padding_idx).to(prev_output_tokens.device) - if incremental_state is None - else None - ) - - if incremental_state is not None: - prev_output_tokens = prev_output_tokens[:, -1:] - - # embed tokens - x = self.embed_tokens(prev_output_tokens) - - # B x T x C -> T x B x C - x = self._transpose_if_training(x, incremental_state) - - for layer in self.conv_layers: - if isinstance(layer, LinearizedConvolution): - x = layer(x, incremental_state) - else: - x = layer(x) - - # B x T x C -> T x B x C - x = self._transpose_if_inference(x, incremental_state) - - # decoder layers - for layer in self.layers: - if isinstance(layer, TransformerDecoderLayer): - x, *_ = layer( - x, - (encoder_out["encoder_out"] if encoder_out is not None else None), - ( - encoder_out["encoder_padding_mask"].t() - if encoder_out["encoder_padding_mask"] is not None - else None - ), - incremental_state, - self_attn_mask=( - self.buffered_future_mask(x) - if incremental_state is None - else None - ), - self_attn_padding_mask=( - target_padding_mask if incremental_state is None else None - ), - ) - else: - x = layer(x) - - # T x B x C -> B x T x C - x = x.transpose(0, 1) - - x = self.fc_out(x) - - return x, None - - def buffered_future_mask(self, tensor): - dim = tensor.size(0) - if ( - not hasattr(self, "_future_mask") - or self._future_mask is None - or self._future_mask.device != tensor.device - ): - self._future_mask = torch.triu( - utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 - ) - if self._future_mask.size(0) < dim: - self._future_mask = torch.triu( - utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1 - ) - return self._future_mask[:dim, :dim] - - def _transpose_if_training(self, x, incremental_state): - if incremental_state is None: - x = x.transpose(0, 1) - return x - - def _transpose_if_inference(self, x, incremental_state): - if incremental_state: - x = x.transpose(0, 1) - return x - - -@register_model("asr_vggtransformer_encoder") -class VGGTransformerEncoderModel(FairseqEncoderModel): - def __init__(self, encoder): - super().__init__(encoder) - - @staticmethod - def add_args(parser): - """Add model-specific arguments to the parser.""" - parser.add_argument( - "--input-feat-per-channel", - type=int, - metavar="N", - help="encoder input dimension per input channel", - ) - parser.add_argument( - "--vggblock-enc-config", - type=str, - metavar="EXPR", - help=""" - an array of tuples each containing the configuration of one vggblock - [(out_channels, conv_kernel_size, pooling_kernel_size,num_conv_layers), ...] - """, - ) - parser.add_argument( - "--transformer-enc-config", - type=str, - metavar="EXPR", - help=""" - a tuple containing the configuration of the Transformer layers - configurations: - [(input_dim, - num_heads, - ffn_dim, - normalize_before, - dropout, - attention_dropout, - relu_dropout), ]""", - ) - parser.add_argument( - "--enc-output-dim", - type=int, - metavar="N", - help="encoder output dimension, projecting the LSTM output", - ) - parser.add_argument( - "--in-channels", - type=int, - metavar="N", - help="number of encoder input channels", - ) - parser.add_argument( - "--transformer-context", - type=str, - metavar="EXPR", - help=""" - either None or a tuple of two ints, indicating left/right context a - transformer can have access to""", - ) - parser.add_argument( - "--transformer-sampling", - type=str, - metavar="EXPR", - help=""" - either None or a tuple of ints, indicating sampling factor in each layer""", - ) - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - base_architecture_enconly(args) - encoder = VGGTransformerEncoderOnly( - vocab_size=len(task.target_dictionary), - input_feat_per_channel=args.input_feat_per_channel, - vggblock_config=eval(args.vggblock_enc_config), - transformer_config=eval(args.transformer_enc_config), - encoder_output_dim=args.enc_output_dim, - in_channels=args.in_channels, - transformer_context=eval(args.transformer_context), - transformer_sampling=eval(args.transformer_sampling), - ) - return cls(encoder) - - def get_normalized_probs(self, net_output, log_probs, sample=None): - # net_output['encoder_out'] is a (T, B, D) tensor - lprobs = super().get_normalized_probs(net_output, log_probs, sample) - # lprobs is a (T, B, D) tensor - # we need to transoose to get (B, T, D) tensor - lprobs = lprobs.transpose(0, 1).contiguous() - lprobs.batch_first = True - return lprobs - - -class VGGTransformerEncoderOnly(VGGTransformerEncoder): - def __init__( - self, - vocab_size, - input_feat_per_channel, - vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG, - transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG, - encoder_output_dim=512, - in_channels=1, - transformer_context=None, - transformer_sampling=None, - ): - super().__init__( - input_feat_per_channel=input_feat_per_channel, - vggblock_config=vggblock_config, - transformer_config=transformer_config, - encoder_output_dim=encoder_output_dim, - in_channels=in_channels, - transformer_context=transformer_context, - transformer_sampling=transformer_sampling, - ) - self.fc_out = Linear(self.encoder_output_dim, vocab_size) - - def forward(self, src_tokens, src_lengths, **kwargs): - """ - src_tokens: padded tensor (B, T, C * feat) - src_lengths: tensor of original lengths of input utterances (B,) - """ - - enc_out = super().forward(src_tokens, src_lengths) - x = self.fc_out(enc_out["encoder_out"]) - # x = F.log_softmax(x, dim=-1) - # Note: no need this line, because model.get_normalized_prob will call - # log_softmax - return { - "encoder_out": x, # (T, B, C) - "encoder_padding_mask": enc_out["encoder_padding_mask"], # (T, B) - } - - def max_positions(self): - """Maximum input length supported by the encoder.""" - return (1e6, 1e6) # an arbitrary large number - - -def Embedding(num_embeddings, embedding_dim, padding_idx): - m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) - # nn.init.uniform_(m.weight, -0.1, 0.1) - # nn.init.constant_(m.weight[padding_idx], 0) - return m - - -def Linear(in_features, out_features, bias=True, dropout=0): - """Linear layer (input: N x T x C)""" - m = nn.Linear(in_features, out_features, bias=bias) - # m.weight.data.uniform_(-0.1, 0.1) - # if bias: - # m.bias.data.uniform_(-0.1, 0.1) - return m - - -def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs): - """Weight-normalized Conv1d layer optimized for decoding""" - m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs) - std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) - nn.init.normal_(m.weight, mean=0, std=std) - nn.init.constant_(m.bias, 0) - return nn.utils.weight_norm(m, dim=2) - - -def LayerNorm(embedding_dim): - m = nn.LayerNorm(embedding_dim) - return m - - -# seq2seq models -def base_architecture(args): - args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40) - args.vggblock_enc_config = getattr( - args, "vggblock_enc_config", DEFAULT_ENC_VGGBLOCK_CONFIG - ) - args.transformer_enc_config = getattr( - args, "transformer_enc_config", DEFAULT_ENC_TRANSFORMER_CONFIG - ) - args.enc_output_dim = getattr(args, "enc_output_dim", 512) - args.in_channels = getattr(args, "in_channels", 1) - args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128) - args.transformer_dec_config = getattr( - args, "transformer_dec_config", DEFAULT_ENC_TRANSFORMER_CONFIG - ) - args.conv_dec_config = getattr(args, "conv_dec_config", DEFAULT_DEC_CONV_CONFIG) - args.transformer_context = getattr(args, "transformer_context", "None") - - -@register_model_architecture("asr_vggtransformer", "vggtransformer_1") -def vggtransformer_1(args): - args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) - args.vggblock_enc_config = getattr( - args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" - ) - args.transformer_enc_config = getattr( - args, - "transformer_enc_config", - "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 14", - ) - args.enc_output_dim = getattr(args, "enc_output_dim", 1024) - args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128) - args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4") - args.transformer_dec_config = getattr( - args, - "transformer_dec_config", - "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 4", - ) - - -@register_model_architecture("asr_vggtransformer", "vggtransformer_2") -def vggtransformer_2(args): - args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) - args.vggblock_enc_config = getattr( - args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" - ) - args.transformer_enc_config = getattr( - args, - "transformer_enc_config", - "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16", - ) - args.enc_output_dim = getattr(args, "enc_output_dim", 1024) - args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512) - args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4") - args.transformer_dec_config = getattr( - args, - "transformer_dec_config", - "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 6", - ) - - -@register_model_architecture("asr_vggtransformer", "vggtransformer_base") -def vggtransformer_base(args): - args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) - args.vggblock_enc_config = getattr( - args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" - ) - args.transformer_enc_config = getattr( - args, "transformer_enc_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 12" - ) - - args.enc_output_dim = getattr(args, "enc_output_dim", 512) - args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512) - args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4") - args.transformer_dec_config = getattr( - args, "transformer_dec_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 6" - ) - # Size estimations: - # Encoder: - # - vggblock param: 64*1*3*3 + 64*64*3*3 + 128*64*3*3 + 128*128*3 = 258K - # Transformer: - # - input dimension adapter: 2560 x 512 -> 1.31M - # - transformer_layers (x12) --> 37.74M - # * MultiheadAttention: 512*512*3 (in_proj) + 512*512 (out_proj) = 1.048M - # * FFN weight: 512*2048*2 = 2.097M - # - output dimension adapter: 512 x 512 -> 0.26 M - # Decoder: - # - LinearizedConv1d: 512 * 256 * 3 + 256 * 256 * 3 * 3 - # - transformer_layer: (x6) --> 25.16M - # * MultiheadAttention (self-attention): 512*512*3 + 512*512 = 1.048M - # * MultiheadAttention (encoder-attention): 512*512*3 + 512*512 = 1.048M - # * FFN: 512*2048*2 = 2.097M - # Final FC: - # - FC: 512*5000 = 256K (assuming vocab size 5K) - # In total: - # ~65 M - - -# CTC models -def base_architecture_enconly(args): - args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40) - args.vggblock_enc_config = getattr( - args, "vggblock_enc_config", "[(32, 3, 2, 2, True)] * 2" - ) - args.transformer_enc_config = getattr( - args, "transformer_enc_config", "((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2" - ) - args.enc_output_dim = getattr(args, "enc_output_dim", 512) - args.in_channels = getattr(args, "in_channels", 1) - args.transformer_context = getattr(args, "transformer_context", "None") - args.transformer_sampling = getattr(args, "transformer_sampling", "None") - - -@register_model_architecture("asr_vggtransformer_encoder", "vggtransformer_enc_1") -def vggtransformer_enc_1(args): - # vggtransformer_1 is the same as vggtransformer_enc_big, except the number - # of layers is increased to 16 - # keep it here for backward compatiablity purpose - args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) - args.vggblock_enc_config = getattr( - args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" - ) - args.transformer_enc_config = getattr( - args, - "transformer_enc_config", - "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16", - ) - args.enc_output_dim = getattr(args, "enc_output_dim", 1024) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/linformer/linformer_src/models/__init__.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/linformer/linformer_src/models/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/wav2vec/wav2vec_manifest.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/wav2vec/wav2vec_manifest.py deleted file mode 100644 index 9b8aa180e88d9ee98bdca7089aed5046ec0d9cb9..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/wav2vec/wav2vec_manifest.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -""" -Data pre-processing: build vocabularies and binarize training data. -""" - -import argparse -import glob -import os -import random - -import soundfile - - -def get_parser(): - parser = argparse.ArgumentParser() - parser.add_argument( - "root", metavar="DIR", help="root directory containing flac files to index" - ) - parser.add_argument( - "--valid-percent", - default=0.01, - type=float, - metavar="D", - help="percentage of data to use as validation set (between 0 and 1)", - ) - parser.add_argument( - "--dest", default=".", type=str, metavar="DIR", help="output directory" - ) - parser.add_argument( - "--ext", default="flac", type=str, metavar="EXT", help="extension to look for" - ) - parser.add_argument("--seed", default=42, type=int, metavar="N", help="random seed") - parser.add_argument( - "--path-must-contain", - default=None, - type=str, - metavar="FRAG", - help="if set, path must contain this substring for a file to be included in the manifest", - ) - return parser - - -def main(args): - assert args.valid_percent >= 0 and args.valid_percent <= 1.0 - - if not os.path.exists(args.dest): - os.makedirs(args.dest) - - dir_path = os.path.realpath(args.root) - search_path = os.path.join(dir_path, "**/*." + args.ext) - rand = random.Random(args.seed) - - valid_f = ( - open(os.path.join(args.dest, "valid.tsv"), "w") - if args.valid_percent > 0 - else None - ) - - with open(os.path.join(args.dest, "train.tsv"), "w") as train_f: - print(dir_path, file=train_f) - - if valid_f is not None: - print(dir_path, file=valid_f) - - for fname in glob.iglob(search_path, recursive=True): - file_path = os.path.realpath(fname) - - if args.path_must_contain and args.path_must_contain not in file_path: - continue - - frames = soundfile.info(fname).frames - dest = train_f if rand.random() > args.valid_percent else valid_f - print( - "{}\t{}".format(os.path.relpath(file_path, dir_path), frames), file=dest - ) - if valid_f is not None: - valid_f.close() - - -if __name__ == "__main__": - parser = get_parser() - args = parser.parse_args() - main(args) diff --git a/spaces/OIUGLK/bingo/src/lib/bots/bing/sr.ts b/spaces/OIUGLK/bingo/src/lib/bots/bing/sr.ts deleted file mode 100644 index 7cae14da7362bd6cc1e234851c11ca67e5a99f0c..0000000000000000000000000000000000000000 --- a/spaces/OIUGLK/bingo/src/lib/bots/bing/sr.ts +++ /dev/null @@ -1,106 +0,0 @@ -// @ts-ignore -const SpeechRecognitionPolyfill: typeof webkitSpeechRecognition = typeof window !== 'undefined' ? ( - // @ts-ignore - window.SpeechRecognition || - window.webkitSpeechRecognition || - // @ts-ignore - window.mozSpeechRecognition || - // @ts-ignore - window.msSpeechRecognition || - // @ts-ignore - window.oSpeechRecognition -) as typeof webkitSpeechRecognition : undefined - -type subscriber = (msg: string, command?: string) => void - -export class SR { - recognition?: SpeechRecognition - onchange?: subscriber - transcript: boolean = false - listening: boolean = false - private commandsRe?: RegExp - constructor(commands: string[]) { - this.recognition = SpeechRecognitionPolyfill ? new SpeechRecognitionPolyfill() : undefined - if (!this.recognition) { - return - } - this.configuration('zh-CN') - if (commands.length) { - this.commandsRe = new RegExp(`^(${commands.join('|')})。?$`) - } - this.recognition.onresult = this.speechRecognition - this.recognition.onerror = (err) => { - console.log('err', err.error) - this.stop() - } - this.recognition.onend = () => { - if (this.recognition && this.listening) { - this.recognition.start() - } - } - } - - speechRecognition = (event: SpeechRecognitionEvent) => { - if (!this.listening) return - for (var i = event.resultIndex; i < event.results.length; i++) { - let result = event.results[i] - if (result.isFinal) { - var alt = result[0] - const text = alt.transcript.trim() - if (this.commandsRe && this.commandsRe.test(text)) { - return this.onchange?.('', RegExp.$1) - } - if (!this.transcript) return - this.onchange?.(text) - } - } - } - - private configuration = async (lang: string = 'zh-CN') => { - return new Promise((resolve) => { - if (this.recognition) { - this.recognition.continuous = true - this.recognition.lang = lang - this.recognition.onstart = resolve - } - }) - } - - start = async () => { - if (this.recognition && !this.listening) { - await this.recognition.start() - this.transcript = true - this.listening = true - } - } - - stop = () => { - if (this.recognition) { - this.recognition.stop() - this.transcript = false - this.listening = false - } - } - - - pause = () => { - if (this.recognition) { - this.transcript = false - } - } - - resume = () => { - if (this.recognition) { - this.transcript = true - } - } - - abort = () => { - if (this.recognition && this.transcript) { - this.recognition.abort() - this.transcript = false - this.listening = false - } - } -} - diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/layers/aspp.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/layers/aspp.py deleted file mode 100644 index 14861aa9ede4fea6a69a49f189bcab997b558148..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/layers/aspp.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -from copy import deepcopy -import fvcore.nn.weight_init as weight_init -import torch -from torch import nn -from torch.nn import functional as F - -from .batch_norm import get_norm -from .blocks import DepthwiseSeparableConv2d -from .wrappers import Conv2d - - -class ASPP(nn.Module): - """ - Atrous Spatial Pyramid Pooling (ASPP). - """ - - def __init__( - self, - in_channels, - out_channels, - dilations, - *, - norm, - activation, - pool_kernel_size=None, - dropout: float = 0.0, - use_depthwise_separable_conv=False, - ): - """ - Args: - in_channels (int): number of input channels for ASPP. - out_channels (int): number of output channels. - dilations (list): a list of 3 dilations in ASPP. - norm (str or callable): normalization for all conv layers. - See :func:`layers.get_norm` for supported format. norm is - applied to all conv layers except the conv following - global average pooling. - activation (callable): activation function. - pool_kernel_size (tuple, list): the average pooling size (kh, kw) - for image pooling layer in ASPP. If set to None, it always - performs global average pooling. If not None, it must be - divisible by the shape of inputs in forward(). It is recommended - to use a fixed input feature size in training, and set this - option to match this size, so that it performs global average - pooling in training, and the size of the pooling window stays - consistent in inference. - dropout (float): apply dropout on the output of ASPP. It is used in - the official DeepLab implementation with a rate of 0.1: - https://github.com/tensorflow/models/blob/21b73d22f3ed05b650e85ac50849408dd36de32e/research/deeplab/model.py#L532 # noqa - use_depthwise_separable_conv (bool): use DepthwiseSeparableConv2d - for 3x3 convs in ASPP, proposed in :paper:`DeepLabV3+`. - """ - super(ASPP, self).__init__() - assert len(dilations) == 3, "ASPP expects 3 dilations, got {}".format(len(dilations)) - self.pool_kernel_size = pool_kernel_size - self.dropout = dropout - use_bias = norm == "" - self.convs = nn.ModuleList() - # conv 1x1 - self.convs.append( - Conv2d( - in_channels, - out_channels, - kernel_size=1, - bias=use_bias, - norm=get_norm(norm, out_channels), - activation=deepcopy(activation), - ) - ) - weight_init.c2_xavier_fill(self.convs[-1]) - # atrous convs - for dilation in dilations: - if use_depthwise_separable_conv: - self.convs.append( - DepthwiseSeparableConv2d( - in_channels, - out_channels, - kernel_size=3, - padding=dilation, - dilation=dilation, - norm1=norm, - activation1=deepcopy(activation), - norm2=norm, - activation2=deepcopy(activation), - ) - ) - else: - self.convs.append( - Conv2d( - in_channels, - out_channels, - kernel_size=3, - padding=dilation, - dilation=dilation, - bias=use_bias, - norm=get_norm(norm, out_channels), - activation=deepcopy(activation), - ) - ) - weight_init.c2_xavier_fill(self.convs[-1]) - # image pooling - # We do not add BatchNorm because the spatial resolution is 1x1, - # the original TF implementation has BatchNorm. - if pool_kernel_size is None: - image_pooling = nn.Sequential( - nn.AdaptiveAvgPool2d(1), - Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)), - ) - else: - image_pooling = nn.Sequential( - nn.AvgPool2d(kernel_size=pool_kernel_size, stride=1), - Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)), - ) - weight_init.c2_xavier_fill(image_pooling[1]) - self.convs.append(image_pooling) - - self.project = Conv2d( - 5 * out_channels, - out_channels, - kernel_size=1, - bias=use_bias, - norm=get_norm(norm, out_channels), - activation=deepcopy(activation), - ) - weight_init.c2_xavier_fill(self.project) - - def forward(self, x): - size = x.shape[-2:] - if self.pool_kernel_size is not None: - if size[0] % self.pool_kernel_size[0] or size[1] % self.pool_kernel_size[1]: - raise ValueError( - "`pool_kernel_size` must be divisible by the shape of inputs. " - "Input size: {} `pool_kernel_size`: {}".format(size, self.pool_kernel_size) - ) - res = [] - for conv in self.convs: - res.append(conv(x)) - res[-1] = F.interpolate(res[-1], size=size, mode="bilinear", align_corners=False) - res = torch.cat(res, dim=1) - res = self.project(res) - res = F.dropout(res, self.dropout, training=self.training) if self.dropout > 0 else res - return res diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/evaluation/masks/countless/test.py b/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/evaluation/masks/countless/test.py deleted file mode 100644 index 7809beb7aeeb3bcb10d03093a564917b1f2b4786..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/evaluation/masks/countless/test.py +++ /dev/null @@ -1,195 +0,0 @@ -from copy import deepcopy - -import numpy as np - -import countless2d -import countless3d - -def test_countless2d(): - def test_all_cases(fn, test_zero): - case1 = np.array([ [ 1, 2 ], [ 3, 4 ] ]).reshape((2,2,1,1)) # all different - case2 = np.array([ [ 1, 1 ], [ 2, 3 ] ]).reshape((2,2,1,1)) # two are same - case1z = np.array([ [ 0, 1 ], [ 2, 3 ] ]).reshape((2,2,1,1)) # all different - case2z = np.array([ [ 0, 0 ], [ 2, 3 ] ]).reshape((2,2,1,1)) # two are same - case3 = np.array([ [ 1, 1 ], [ 2, 2 ] ]).reshape((2,2,1,1)) # two groups are same - case4 = np.array([ [ 1, 2 ], [ 2, 2 ] ]).reshape((2,2,1,1)) # 3 are the same - case5 = np.array([ [ 5, 5 ], [ 5, 5 ] ]).reshape((2,2,1,1)) # all are the same - - is_255_handled = np.array([ [ 255, 255 ], [ 1, 2 ] ], dtype=np.uint8).reshape((2,2,1,1)) - - test = lambda case: fn(case) - - if test_zero: - assert test(case1z) == [[[[3]]]] # d - assert test(case2z) == [[[[0]]]] # a==b - else: - assert test(case1) == [[[[4]]]] # d - assert test(case2) == [[[[1]]]] # a==b - - assert test(case3) == [[[[1]]]] # a==b - assert test(case4) == [[[[2]]]] # b==c - assert test(case5) == [[[[5]]]] # a==b - - assert test(is_255_handled) == [[[[255]]]] - - assert fn(case1).dtype == case1.dtype - - test_all_cases(countless2d.simplest_countless, False) - test_all_cases(countless2d.quick_countless, False) - test_all_cases(countless2d.quickest_countless, False) - test_all_cases(countless2d.stippled_countless, False) - - - - methods = [ - countless2d.zero_corrected_countless, - countless2d.countless, - countless2d.countless_if, - # countless2d.counting, # counting doesn't respect order so harder to write a test - ] - - for fn in methods: - print(fn.__name__) - test_all_cases(fn, True) - -def test_stippled_countless2d(): - a = np.array([ [ 1, 2 ], [ 3, 4 ] ]).reshape((2,2,1,1)) - b = np.array([ [ 0, 2 ], [ 3, 4 ] ]).reshape((2,2,1,1)) - c = np.array([ [ 1, 0 ], [ 3, 4 ] ]).reshape((2,2,1,1)) - d = np.array([ [ 1, 2 ], [ 0, 4 ] ]).reshape((2,2,1,1)) - e = np.array([ [ 1, 2 ], [ 3, 0 ] ]).reshape((2,2,1,1)) - f = np.array([ [ 0, 0 ], [ 3, 4 ] ]).reshape((2,2,1,1)) - g = np.array([ [ 0, 2 ], [ 0, 4 ] ]).reshape((2,2,1,1)) - h = np.array([ [ 0, 2 ], [ 3, 0 ] ]).reshape((2,2,1,1)) - i = np.array([ [ 1, 0 ], [ 0, 4 ] ]).reshape((2,2,1,1)) - j = np.array([ [ 1, 2 ], [ 0, 0 ] ]).reshape((2,2,1,1)) - k = np.array([ [ 1, 0 ], [ 3, 0 ] ]).reshape((2,2,1,1)) - l = np.array([ [ 1, 0 ], [ 0, 0 ] ]).reshape((2,2,1,1)) - m = np.array([ [ 0, 2 ], [ 0, 0 ] ]).reshape((2,2,1,1)) - n = np.array([ [ 0, 0 ], [ 3, 0 ] ]).reshape((2,2,1,1)) - o = np.array([ [ 0, 0 ], [ 0, 4 ] ]).reshape((2,2,1,1)) - z = np.array([ [ 0, 0 ], [ 0, 0 ] ]).reshape((2,2,1,1)) - - test = countless2d.stippled_countless - - # Note: We only tested non-matching cases above, - # cases f,g,h,i,j,k prove their duals work as well - # b/c if two pixels are black, either one can be chosen - # if they are different or the same. - - assert test(a) == [[[[4]]]] - assert test(b) == [[[[4]]]] - assert test(c) == [[[[4]]]] - assert test(d) == [[[[4]]]] - assert test(e) == [[[[1]]]] - assert test(f) == [[[[4]]]] - assert test(g) == [[[[4]]]] - assert test(h) == [[[[2]]]] - assert test(i) == [[[[4]]]] - assert test(j) == [[[[1]]]] - assert test(k) == [[[[1]]]] - assert test(l) == [[[[1]]]] - assert test(m) == [[[[2]]]] - assert test(n) == [[[[3]]]] - assert test(o) == [[[[4]]]] - assert test(z) == [[[[0]]]] - - bc = np.array([ [ 0, 2 ], [ 2, 4 ] ]).reshape((2,2,1,1)) - bd = np.array([ [ 0, 2 ], [ 3, 2 ] ]).reshape((2,2,1,1)) - cd = np.array([ [ 0, 2 ], [ 3, 3 ] ]).reshape((2,2,1,1)) - - assert test(bc) == [[[[2]]]] - assert test(bd) == [[[[2]]]] - assert test(cd) == [[[[3]]]] - - ab = np.array([ [ 1, 1 ], [ 0, 4 ] ]).reshape((2,2,1,1)) - ac = np.array([ [ 1, 2 ], [ 1, 0 ] ]).reshape((2,2,1,1)) - ad = np.array([ [ 1, 0 ], [ 3, 1 ] ]).reshape((2,2,1,1)) - - assert test(ab) == [[[[1]]]] - assert test(ac) == [[[[1]]]] - assert test(ad) == [[[[1]]]] - -def test_countless3d(): - def test_all_cases(fn): - alldifferent = [ - [ - [1,2], - [3,4], - ], - [ - [5,6], - [7,8] - ] - ] - allsame = [ - [ - [1,1], - [1,1], - ], - [ - [1,1], - [1,1] - ] - ] - - assert fn(np.array(alldifferent)) == [[[8]]] - assert fn(np.array(allsame)) == [[[1]]] - - twosame = deepcopy(alldifferent) - twosame[1][1][0] = 2 - - assert fn(np.array(twosame)) == [[[2]]] - - threemixed = [ - [ - [3,3], - [1,2], - ], - [ - [2,4], - [4,3] - ] - ] - assert fn(np.array(threemixed)) == [[[3]]] - - foursame = [ - [ - [4,4], - [1,2], - ], - [ - [2,4], - [4,3] - ] - ] - - assert fn(np.array(foursame)) == [[[4]]] - - fivesame = [ - [ - [5,4], - [5,5], - ], - [ - [2,4], - [5,5] - ] - ] - - assert fn(np.array(fivesame)) == [[[5]]] - - def countless3d_generalized(img): - return countless3d.countless_generalized(img, (2,2,2)) - def countless3d_dynamic_generalized(img): - return countless3d.dynamic_countless_generalized(img, (2,2,2)) - - methods = [ - countless3d.countless3d, - countless3d.dynamic_countless3d, - countless3d_generalized, - countless3d_dynamic_generalized, - ] - - for fn in methods: - test_all_cases(fn) \ No newline at end of file diff --git a/spaces/OpenMotionLab/MotionGPT/mGPT/utils/misc.py b/spaces/OpenMotionLab/MotionGPT/mGPT/utils/misc.py deleted file mode 100644 index 4f2a68d68019098e66905e0e21cb96678031bab0..0000000000000000000000000000000000000000 --- a/spaces/OpenMotionLab/MotionGPT/mGPT/utils/misc.py +++ /dev/null @@ -1,29 +0,0 @@ -import torch - - -def to_numpy(tensor): - if torch.is_tensor(tensor): - return tensor.cpu().numpy() - elif type(tensor).__module__ != 'numpy': - raise ValueError("Cannot convert {} to numpy array".format( - type(tensor))) - return tensor - - -def to_torch(ndarray): - if type(ndarray).__module__ == 'numpy': - return torch.from_numpy(ndarray) - elif not torch.is_tensor(ndarray): - raise ValueError("Cannot convert {} to torch tensor".format( - type(ndarray))) - return ndarray - - -def cleanexit(): - import sys - import os - try: - sys.exit(0) - except SystemExit: - os._exit(0) - diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/cnn/bricks/context_block.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/cnn/bricks/context_block.py deleted file mode 100644 index d60fdb904c749ce3b251510dff3cc63cea70d42e..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/cnn/bricks/context_block.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from torch import nn - -from ..utils import constant_init, kaiming_init -from .registry import PLUGIN_LAYERS - - -def last_zero_init(m): - if isinstance(m, nn.Sequential): - constant_init(m[-1], val=0) - else: - constant_init(m, val=0) - - -@PLUGIN_LAYERS.register_module() -class ContextBlock(nn.Module): - """ContextBlock module in GCNet. - - See 'GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond' - (https://arxiv.org/abs/1904.11492) for details. - - Args: - in_channels (int): Channels of the input feature map. - ratio (float): Ratio of channels of transform bottleneck - pooling_type (str): Pooling method for context modeling. - Options are 'att' and 'avg', stand for attention pooling and - average pooling respectively. Default: 'att'. - fusion_types (Sequence[str]): Fusion method for feature fusion, - Options are 'channels_add', 'channel_mul', stand for channelwise - addition and multiplication respectively. Default: ('channel_add',) - """ - - _abbr_ = 'context_block' - - def __init__(self, - in_channels, - ratio, - pooling_type='att', - fusion_types=('channel_add', )): - super(ContextBlock, self).__init__() - assert pooling_type in ['avg', 'att'] - assert isinstance(fusion_types, (list, tuple)) - valid_fusion_types = ['channel_add', 'channel_mul'] - assert all([f in valid_fusion_types for f in fusion_types]) - assert len(fusion_types) > 0, 'at least one fusion should be used' - self.in_channels = in_channels - self.ratio = ratio - self.planes = int(in_channels * ratio) - self.pooling_type = pooling_type - self.fusion_types = fusion_types - if pooling_type == 'att': - self.conv_mask = nn.Conv2d(in_channels, 1, kernel_size=1) - self.softmax = nn.Softmax(dim=2) - else: - self.avg_pool = nn.AdaptiveAvgPool2d(1) - if 'channel_add' in fusion_types: - self.channel_add_conv = nn.Sequential( - nn.Conv2d(self.in_channels, self.planes, kernel_size=1), - nn.LayerNorm([self.planes, 1, 1]), - nn.ReLU(inplace=True), # yapf: disable - nn.Conv2d(self.planes, self.in_channels, kernel_size=1)) - else: - self.channel_add_conv = None - if 'channel_mul' in fusion_types: - self.channel_mul_conv = nn.Sequential( - nn.Conv2d(self.in_channels, self.planes, kernel_size=1), - nn.LayerNorm([self.planes, 1, 1]), - nn.ReLU(inplace=True), # yapf: disable - nn.Conv2d(self.planes, self.in_channels, kernel_size=1)) - else: - self.channel_mul_conv = None - self.reset_parameters() - - def reset_parameters(self): - if self.pooling_type == 'att': - kaiming_init(self.conv_mask, mode='fan_in') - self.conv_mask.inited = True - - if self.channel_add_conv is not None: - last_zero_init(self.channel_add_conv) - if self.channel_mul_conv is not None: - last_zero_init(self.channel_mul_conv) - - def spatial_pool(self, x): - batch, channel, height, width = x.size() - if self.pooling_type == 'att': - input_x = x - # [N, C, H * W] - input_x = input_x.view(batch, channel, height * width) - # [N, 1, C, H * W] - input_x = input_x.unsqueeze(1) - # [N, 1, H, W] - context_mask = self.conv_mask(x) - # [N, 1, H * W] - context_mask = context_mask.view(batch, 1, height * width) - # [N, 1, H * W] - context_mask = self.softmax(context_mask) - # [N, 1, H * W, 1] - context_mask = context_mask.unsqueeze(-1) - # [N, 1, C, 1] - context = torch.matmul(input_x, context_mask) - # [N, C, 1, 1] - context = context.view(batch, channel, 1, 1) - else: - # [N, C, 1, 1] - context = self.avg_pool(x) - - return context - - def forward(self, x): - # [N, C, 1, 1] - context = self.spatial_pool(x) - - out = x - if self.channel_mul_conv is not None: - # [N, C, 1, 1] - channel_mul_term = torch.sigmoid(self.channel_mul_conv(context)) - out = out * channel_mul_term - if self.channel_add_conv is not None: - # [N, C, 1, 1] - channel_add_term = self.channel_add_conv(context) - out = out + channel_add_term - - return out diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/parallel/distributed_deprecated.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/parallel/distributed_deprecated.py deleted file mode 100644 index 676937a2085d4da20fa87923041a200fca6214eb..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/parallel/distributed_deprecated.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.distributed as dist -import torch.nn as nn -from torch._utils import (_flatten_dense_tensors, _take_tensors, - _unflatten_dense_tensors) - -from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version -from .registry import MODULE_WRAPPERS -from .scatter_gather import scatter_kwargs - - -@MODULE_WRAPPERS.register_module() -class MMDistributedDataParallel(nn.Module): - - def __init__(self, - module, - dim=0, - broadcast_buffers=True, - bucket_cap_mb=25): - super(MMDistributedDataParallel, self).__init__() - self.module = module - self.dim = dim - self.broadcast_buffers = broadcast_buffers - - self.broadcast_bucket_size = bucket_cap_mb * 1024 * 1024 - self._sync_params() - - def _dist_broadcast_coalesced(self, tensors, buffer_size): - for tensors in _take_tensors(tensors, buffer_size): - flat_tensors = _flatten_dense_tensors(tensors) - dist.broadcast(flat_tensors, 0) - for tensor, synced in zip( - tensors, _unflatten_dense_tensors(flat_tensors, tensors)): - tensor.copy_(synced) - - def _sync_params(self): - module_states = list(self.module.state_dict().values()) - if len(module_states) > 0: - self._dist_broadcast_coalesced(module_states, - self.broadcast_bucket_size) - if self.broadcast_buffers: - if (TORCH_VERSION != 'parrots' - and digit_version(TORCH_VERSION) < digit_version('1.0')): - buffers = [b.data for b in self.module._all_buffers()] - else: - buffers = [b.data for b in self.module.buffers()] - if len(buffers) > 0: - self._dist_broadcast_coalesced(buffers, - self.broadcast_bucket_size) - - def scatter(self, inputs, kwargs, device_ids): - return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) - - def forward(self, *inputs, **kwargs): - inputs, kwargs = self.scatter(inputs, kwargs, - [torch.cuda.current_device()]) - return self.module(*inputs[0], **kwargs[0]) - - def train_step(self, *inputs, **kwargs): - inputs, kwargs = self.scatter(inputs, kwargs, - [torch.cuda.current_device()]) - output = self.module.train_step(*inputs[0], **kwargs[0]) - return output - - def val_step(self, *inputs, **kwargs): - inputs, kwargs = self.scatter(inputs, kwargs, - [torch.cuda.current_device()]) - output = self.module.val_step(*inputs[0], **kwargs[0]) - return output diff --git a/spaces/PHZane/emrwa/generate1.py b/spaces/PHZane/emrwa/generate1.py deleted file mode 100644 index 0158a26888ff8e5c2334c9d1f448cf202d382601..0000000000000000000000000000000000000000 --- a/spaces/PHZane/emrwa/generate1.py +++ /dev/null @@ -1,231 +0,0 @@ -import torch -import torch.nn.functional as F -import os -import argparse -from tqdm import trange -from transformers import GPT2LMHeadModel - -def check_chance(input): - print("models/"+input) - return "models/"+input - - -def is_word(word): - for item in list(word): - if item not in 'qwertyuiopasdfghjklzxcvbnm': - return False - return True - - -def _is_chinese_char(char): - """Checks whether CP is the codepoint of a CJK character.""" - # This defines a "chinese character" as anything in the CJK Unicode block: - # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) - # - # Note that the CJK Unicode block is NOT all Japanese and Korean characters, - # despite its name. The modern Korean Hangul alphabet is a different block, - # as is Japanese Hiragana and Katakana. Those alphabets are used to write - # space-separated words, so they are not treated specially and handled - # like the all of the other languages. - cp = ord(char) - if ((cp >= 0x4E00 and cp <= 0x9FFF) or # - (cp >= 0x3400 and cp <= 0x4DBF) or # - (cp >= 0x20000 and cp <= 0x2A6DF) or # - (cp >= 0x2A700 and cp <= 0x2B73F) or # - (cp >= 0x2B740 and cp <= 0x2B81F) or # - (cp >= 0x2B820 and cp <= 0x2CEAF) or - (cp >= 0xF900 and cp <= 0xFAFF) or # - (cp >= 0x2F800 and cp <= 0x2FA1F)): # - return True - - return False - - -def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')): - """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering - Args: - logits: logits distribution shape (vocabulary size) - top_k > 0: keep only top k tokens with highest probability (top-k filtering). - top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). - Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) - From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 - """ - assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear - top_k = min(top_k, logits.size(-1)) # Safety check - if top_k > 0: - # Remove all tokens with a probability less than the last token of the top-k - indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] - logits[indices_to_remove] = filter_value - - if top_p > 0.0: - sorted_logits, sorted_indices = torch.sort(logits, descending=True) - cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) - - # Remove tokens with cumulative probability above the threshold - sorted_indices_to_remove = cumulative_probs > top_p - # Shift the indices to the right to keep also the first token above the threshold - sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() - sorted_indices_to_remove[..., 0] = 0 - - indices_to_remove = sorted_indices[sorted_indices_to_remove] - logits[indices_to_remove] = filter_value - return logits - - -def sample_sequence(model, context, length, n_ctx, tokenizer, temperature=1.0, top_k=30, top_p=0.0, repitition_penalty=1.0, - device='cpu'): - context = torch.tensor(context, dtype=torch.long, device=device) - context = context.unsqueeze(0) - generated = context - with torch.no_grad(): - for _ in trange(length): - inputs = {'input_ids': generated[0][-(n_ctx - 1):].unsqueeze(0)} - outputs = model( - **inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states) - next_token_logits = outputs[0][0, -1, :] - for id in set(generated): - next_token_logits[id] /= repitition_penalty - next_token_logits = next_token_logits / temperature - next_token_logits[tokenizer.convert_tokens_to_ids('[UNK]')] = -float('Inf') - filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p) - next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1) - generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1) - return generated.tolist()[0] - - -def fast_sample_sequence(model, context, length, temperature=1.0, top_k=30, top_p=0.0, device='cpu'): - inputs = torch.LongTensor(context).view(1, -1).to(device) - if len(context) > 1: - _, past = model(inputs[:, :-1], None)[:2] - prev = inputs[:, -1].view(1, -1) - else: - past = None - prev = inputs - generate = [] + context - with torch.no_grad(): - for i in trange(length): - output = model(prev, past=past) - output, past = output[:2] - output = output[-1].squeeze(0) / temperature - filtered_logits = top_k_top_p_filtering(output, top_k=top_k, top_p=top_p) - next_token = torch.multinomial(torch.softmax(filtered_logits, dim=-1), num_samples=1) - generate.append(next_token.item()) - prev = next_token.view(1, 1) - return generate - - -# 通过命令行参数--fast_pattern,指定模式 -def generate(n_ctx, model, context, length, tokenizer, temperature=1, top_k=0, top_p=0.0, repitition_penalty=1.0, device='cpu', - is_fast_pattern=False): - if is_fast_pattern: - return fast_sample_sequence(model, context, length, temperature=temperature, top_k=top_k, top_p=top_p, - device=device) - else: - return sample_sequence(model, context, length, n_ctx, tokenizer=tokenizer, temperature=temperature, top_k=top_k, top_p=top_p, - repitition_penalty=repitition_penalty, device=device) - - -def main(input): - parser = argparse.ArgumentParser() - parser.add_argument('--device', default='0,1,2,3', type=str, required=False, help='生成设备') - parser.add_argument('--length', default=-1, type=int, required=False, help='生成长度') - parser.add_argument('--batch_size', default=1, type=int, required=False, help='生成的batch size') - #parser.add_argument('--nsamples', default=10, type=int, required=False, help='生成几个样本') - parser.add_argument('--nsamples', default=1, type=int, required=False, help='生成几个样本') - parser.add_argument('--temperature', default=1, type=float, required=False, help='生成温度') - parser.add_argument('--topk', default=1, type=int, required=False, help='最高几选一') - parser.add_argument('--topp', default=0, type=float, required=False, help='最高积累概率') - parser.add_argument('--model_config', default='/config/model_config_small.json', type=str, required=False, - help='模型参数') - parser.add_argument('--tokenizer_path', default='cache/vocab_small.txt', type=str, required=False, help='词表路径') - parser.add_argument('--model_path', default=check_chance(input), type=str, required=False, help='模型路径') - parser.add_argument('--prefix', default='主诉', type=str, required=False, help='生成文章的开头') - parser.add_argument('--no_wordpiece', action='store_true', help='不做word piece切词') - parser.add_argument('--segment', action='store_true', help='中文以词为单位') - parser.add_argument('--fast_pattern', action='store_true', help='采用更加快的方式生成文本') - parser.add_argument('--save_samples', action='store_true', help='保存产生的样本') - parser.add_argument('--save_samples_path', default='.', type=str, required=False, help="保存样本的路径") - parser.add_argument('--repetition_penalty', default=1.0, type=float, required=False) - - args = parser.parse_args() - print('args:\n' + args.__repr__()) - - if args.segment: - from tokenizations import tokenization_bert_word_level as tokenization_bert - else: - from tokenizations import tokenization_bert - - os.environ["CUDA_VISIBLE_DEVICES"] = args.device # 此处设置程序使用哪些显卡 - length = args.length - batch_size = args.batch_size - nsamples = args.nsamples - temperature = args.temperature - topk = args.topk - topp = args.topp - repetition_penalty = args.repetition_penalty - - device = "cuda" if torch.cuda.is_available() else "cpu" - - tokenizer = tokenization_bert.BertTokenizer(vocab_file=args.tokenizer_path) - model = GPT2LMHeadModel.from_pretrained(args.model_path) - model.to(device) - model.eval() - - n_ctx = model.config.n_ctx - - if length == -1: - length = model.config.n_ctx - if args.save_samples: - if not os.path.exists(args.save_samples_path): - os.makedirs(args.save_samples_path) - samples_file = open(args.save_samples_path + '/samples.txt', 'w', encoding='utf8') - while True: - raw_text = args.prefix - context_tokens = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(raw_text)) - generated = 0 - for _ in range(nsamples // batch_size): - out = generate( - n_ctx=n_ctx, - model=model, - context=context_tokens, - length=length, - is_fast_pattern=args.fast_pattern, tokenizer=tokenizer, - temperature=temperature, top_k=topk, top_p=topp, repitition_penalty=repetition_penalty, device=device - ) - for i in range(batch_size): - generated += 1 - text = tokenizer.convert_ids_to_tokens(out) - for i, item in enumerate(text[:-1]): # 确保英文前后有空格 - if is_word(item) and is_word(text[i + 1]): - text[i] = item + ' ' - for i, item in enumerate(text): - if item == '[MASK]': - text[i] = '' - elif item == '[CLS]': - text[i] = '\n\n' - elif item == '[SEP]': - text[i] = '\n' - info = "=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40 + "\n" - print(info) - text = ''.join(text).replace('##', '').strip() - print(text) - if args.save_samples: - samples_file.write(info) - samples_file.write(text) - samples_file.write('\n') - samples_file.write('=' * 90) - samples_file.write('\n' * 2) - print("=" * 80) - - if generated == nsamples: - # close file when finish writing. - if args.save_samples: - samples_file.close() - return text - #break - - - - -if __name__ == '__main__': - main() diff --git a/spaces/PKUWilliamYang/StyleGANEX/models/bisenet/resnet.py b/spaces/PKUWilliamYang/StyleGANEX/models/bisenet/resnet.py deleted file mode 100644 index aa2bf95130e9815ba378cb6f73207068b81a04b9..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/StyleGANEX/models/bisenet/resnet.py +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8 -*- - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.model_zoo as modelzoo - -# from modules.bn import InPlaceABNSync as BatchNorm2d - -resnet18_url = 'https://download.pytorch.org/models/resnet18-5c106cde.pth' - - -def conv3x3(in_planes, out_planes, stride=1): - """3x3 convolution with padding""" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=1, bias=False) - - -class BasicBlock(nn.Module): - def __init__(self, in_chan, out_chan, stride=1): - super(BasicBlock, self).__init__() - self.conv1 = conv3x3(in_chan, out_chan, stride) - self.bn1 = nn.BatchNorm2d(out_chan) - self.conv2 = conv3x3(out_chan, out_chan) - self.bn2 = nn.BatchNorm2d(out_chan) - self.relu = nn.ReLU(inplace=True) - self.downsample = None - if in_chan != out_chan or stride != 1: - self.downsample = nn.Sequential( - nn.Conv2d(in_chan, out_chan, - kernel_size=1, stride=stride, bias=False), - nn.BatchNorm2d(out_chan), - ) - - def forward(self, x): - residual = self.conv1(x) - residual = F.relu(self.bn1(residual)) - residual = self.conv2(residual) - residual = self.bn2(residual) - - shortcut = x - if self.downsample is not None: - shortcut = self.downsample(x) - - out = shortcut + residual - out = self.relu(out) - return out - - -def create_layer_basic(in_chan, out_chan, bnum, stride=1): - layers = [BasicBlock(in_chan, out_chan, stride=stride)] - for i in range(bnum-1): - layers.append(BasicBlock(out_chan, out_chan, stride=1)) - return nn.Sequential(*layers) - - -class Resnet18(nn.Module): - def __init__(self): - super(Resnet18, self).__init__() - self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, - bias=False) - self.bn1 = nn.BatchNorm2d(64) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1) - self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2) - self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2) - self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2) - self.init_weight() - - def forward(self, x): - x = self.conv1(x) - x = F.relu(self.bn1(x)) - x = self.maxpool(x) - - x = self.layer1(x) - feat8 = self.layer2(x) # 1/8 - feat16 = self.layer3(feat8) # 1/16 - feat32 = self.layer4(feat16) # 1/32 - return feat8, feat16, feat32 - - def init_weight(self): - state_dict = modelzoo.load_url(resnet18_url) - self_state_dict = self.state_dict() - for k, v in state_dict.items(): - if 'fc' in k: continue - self_state_dict.update({k: v}) - self.load_state_dict(self_state_dict) - - def get_params(self): - wd_params, nowd_params = [], [] - for name, module in self.named_modules(): - if isinstance(module, (nn.Linear, nn.Conv2d)): - wd_params.append(module.weight) - if not module.bias is None: - nowd_params.append(module.bias) - elif isinstance(module, nn.BatchNorm2d): - nowd_params += list(module.parameters()) - return wd_params, nowd_params - - -if __name__ == "__main__": - net = Resnet18() - x = torch.randn(16, 3, 224, 224) - out = net(x) - print(out[0].size()) - print(out[1].size()) - print(out[2].size()) - net.get_params() diff --git a/spaces/PKaushik/humandetect/yolov6/utils/envs.py b/spaces/PKaushik/humandetect/yolov6/utils/envs.py deleted file mode 100644 index 10159a9484ed525ad5ef3826ec3db4bf70b4c9cc..0000000000000000000000000000000000000000 --- a/spaces/PKaushik/humandetect/yolov6/utils/envs.py +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -import os -import random -import numpy as np - -import torch -import torch.backends.cudnn as cudnn -from yolov6.utils.events import LOGGER - - -def get_envs(): - """Get PyTorch needed environments from system envirionments.""" - local_rank = int(os.getenv('LOCAL_RANK', -1)) - rank = int(os.getenv('RANK', -1)) - world_size = int(os.getenv('WORLD_SIZE', 1)) - return local_rank, rank, world_size - - -def select_device(device): - """Set devices' information to the program. - Args: - device: a string, like 'cpu' or '1,2,3,4' - Returns: - torch.device - """ - if device == 'cpu': - os.environ['CUDA_VISIBLE_DEVICES'] = '-1' - LOGGER.info('Using CPU for training... ') - elif device: - os.environ['CUDA_VISIBLE_DEVICES'] = device - assert torch.cuda.is_available() - nd = len(device.strip().split(',')) - LOGGER.info(f'Using {nd} GPU for training... ') - cuda = device != 'cpu' and torch.cuda.is_available() - device = torch.device('cuda:0' if cuda else 'cpu') - return device - - -def set_random_seed(seed, deterministic=False): - """ Set random state to random libray, numpy, torch and cudnn. - Args: - seed: int value. - deterministic: bool value. - """ - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - if deterministic: - cudnn.deterministic = True - cudnn.benchmark = False - else: - cudnn.deterministic = False - cudnn.benchmark = True diff --git a/spaces/PSLD/PSLD/stable-diffusion/run/inverse_rip.sh b/spaces/PSLD/PSLD/stable-diffusion/run/inverse_rip.sh deleted file mode 100644 index 530f6a3b2aaa1f3a96d40100b795b47a598dccad..0000000000000000000000000000000000000000 --- a/spaces/PSLD/PSLD/stable-diffusion/run/inverse_rip.sh +++ /dev/null @@ -1,7 +0,0 @@ -export CUDA_VISIBLE_DEVICES='1' -python scripts/inverse.py \ - --file_id='00004.png' \ - --task_config='configs/inpainting_config_psld.yaml' \ - --inpainting=1 \ - --general_inverse=0 \ - --outdir='outputs/psld-samples-rip'; \ No newline at end of file diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/oop/goops.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/oop/goops.go deleted file mode 100644 index 35561d5f0f76889bc76718070c18e87749e7ca07..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/oop/goops.go and /dev/null differ diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/fileio/handlers/pickle_handler.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/fileio/handlers/pickle_handler.py deleted file mode 100644 index b37c79bed4ef9fd8913715e62dbe3fc5cafdc3aa..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/fileio/handlers/pickle_handler.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import pickle - -from .base import BaseFileHandler - - -class PickleHandler(BaseFileHandler): - - str_like = False - - def load_from_fileobj(self, file, **kwargs): - return pickle.load(file, **kwargs) - - def load_from_path(self, filepath, **kwargs): - return super(PickleHandler, self).load_from_path( - filepath, mode='rb', **kwargs) - - def dump_to_str(self, obj, **kwargs): - kwargs.setdefault('protocol', 2) - return pickle.dumps(obj, **kwargs) - - def dump_to_fileobj(self, obj, file, **kwargs): - kwargs.setdefault('protocol', 2) - pickle.dump(obj, file, **kwargs) - - def dump_to_path(self, obj, filepath, **kwargs): - super(PickleHandler, self).dump_to_path( - obj, filepath, mode='wb', **kwargs) diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/utils/trace.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/utils/trace.py deleted file mode 100644 index 5ca99dc3eda05ef980d9a4249b50deca8273b6cc..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/utils/trace.py +++ /dev/null @@ -1,23 +0,0 @@ -import warnings - -import torch - -from annotator.uniformer.mmcv.utils import digit_version - - -def is_jit_tracing() -> bool: - if (torch.__version__ != 'parrots' - and digit_version(torch.__version__) >= digit_version('1.6.0')): - on_trace = torch.jit.is_tracing() - # In PyTorch 1.6, torch.jit.is_tracing has a bug. - # Refers to https://github.com/pytorch/pytorch/issues/42448 - if isinstance(on_trace, bool): - return on_trace - else: - return torch._C._is_tracing() - else: - warnings.warn( - 'torch.jit.is_tracing is only supported after v1.6.0. ' - 'Therefore is_tracing returns False automatically. Please ' - 'set on_trace manually if you are using trace.', UserWarning) - return False diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/structures/boxlist_ops.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/structures/boxlist_ops.py deleted file mode 100644 index c16ff0fd402f948336e2adfd8138677c7f43e4c8..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/structures/boxlist_ops.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -import torch - -from .bounding_box import BoxList - -from maskrcnn_benchmark.layers import nms as _box_nms -from maskrcnn_benchmark.layers import ml_nms as _box_ml_nms - - -def boxlist_nms(boxlist, nms_thresh, max_proposals=-1, score_field="score"): - """ - Performs non-maximum suppression on a boxlist, with scores specified - in a boxlist field via score_field. - - Arguments: - boxlist(BoxList) - nms_thresh (float) - max_proposals (int): if > 0, then only the top max_proposals are kept - after non-maxium suppression - score_field (str) - """ - if nms_thresh <= 0: - return boxlist - mode = boxlist.mode - boxlist = boxlist.convert("xyxy") - boxes = boxlist.bbox - score = boxlist.get_field(score_field) - keep = _box_nms(boxes, score, nms_thresh) - if max_proposals > 0: - keep = keep[: max_proposals] - boxlist = boxlist[keep] - return boxlist.convert(mode) - - -def boxlist_ml_nms(boxlist, nms_thresh, max_proposals=-1, - score_field="scores", label_field="labels"): - """ - Performs non-maximum suppression on a boxlist, with scores specified - in a boxlist field via score_field. - - Arguments: - boxlist(BoxList) - nms_thresh (float) - max_proposals (int): if > 0, then only the top max_proposals are kept - after non-maximum suppression - score_field (str) - """ - if nms_thresh <= 0: - return boxlist - mode = boxlist.mode - boxlist = boxlist.convert("xyxy") - boxes = boxlist.bbox - scores = boxlist.get_field(score_field) - labels = boxlist.get_field(label_field) - - if boxes.device==torch.device("cpu"): - keep = [] - unique_labels = torch.unique(labels) - print(unique_labels) - for j in unique_labels: - inds = (labels == j).nonzero().view(-1) - - scores_j = scores[inds] - boxes_j = boxes[inds, :].view(-1, 4) - keep_j = _box_nms(boxes_j, scores_j, nms_thresh) - - keep += keep_j - else: - keep = _box_ml_nms(boxes, scores, labels.float(), nms_thresh) - - if max_proposals > 0: - keep = keep[: max_proposals] - boxlist = boxlist[keep] - - return boxlist.convert(mode) - - -def remove_small_boxes(boxlist, min_size): - """ - Only keep boxes with both sides >= min_size - - Arguments: - boxlist (Boxlist) - min_size (int) - """ - # WORK AROUND: work around unbind using split + squeeze. - xywh_boxes = boxlist.convert("xywh").bbox - _, _, ws, hs = xywh_boxes.split(1, dim=1) - ws = ws.squeeze(1) - hs = hs.squeeze(1) - keep = ((ws >= min_size) & (hs >= min_size)).nonzero().squeeze(1) - return boxlist[keep] - - -# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py -# with slight modifications -def boxlist_iou(boxlist1, boxlist2): - """Compute the intersection over union of two set of boxes. - The box order must be (xmin, ymin, xmax, ymax). - - Arguments: - box1: (BoxList) bounding boxes, sized [N,4]. - box2: (BoxList) bounding boxes, sized [M,4]. - - Returns: - (tensor) iou, sized [N,M]. - - Reference: - https://github.com/chainer/chainercv/blob/master/chainercv/utils/bbox/bbox_iou.py - """ - if boxlist1.size != boxlist2.size: - raise RuntimeError( - "boxlists should have same image size, got {}, {}".format(boxlist1, boxlist2)) - - N = len(boxlist1) - M = len(boxlist2) - - area1 = boxlist1.area() - area2 = boxlist2.area() - - box1, box2 = boxlist1.bbox, boxlist2.bbox - - lt = torch.max(box1[:, None, :2], box2[:, :2]) # [N,M,2] - rb = torch.min(box1[:, None, 2:], box2[:, 2:]) # [N,M,2] - - TO_REMOVE = 1 - - wh = (rb - lt + TO_REMOVE).clamp(min=0) # [N,M,2] - inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] - - iou = inter / (area1[:, None] + area2 - inter) - return iou - - -# TODO redundant, remove -def _cat(tensors, dim=0): - """ - Efficient version of torch.cat that avoids a copy if there is only a single element in a list - """ - assert isinstance(tensors, (list, tuple)) - if len(tensors) == 1: - return tensors[0] - if isinstance(tensors[0], torch.Tensor): - return torch.cat(tensors, dim) - else: - return cat_boxlist(tensors) - -def cat_boxlist(bboxes): - """ - Concatenates a list of BoxList (having the same image size) into a - single BoxList - - Arguments: - bboxes (list[BoxList]) - """ - assert isinstance(bboxes, (list, tuple)) - assert all(isinstance(bbox, BoxList) for bbox in bboxes) - - size = bboxes[0].size - assert all(bbox.size == size for bbox in bboxes) - - mode = bboxes[0].mode - assert all(bbox.mode == mode for bbox in bboxes) - - fields = set(bboxes[0].fields()) - assert all(set(bbox.fields()) == fields for bbox in bboxes) - - cat_boxes = BoxList(_cat([bbox.bbox for bbox in bboxes], dim=0), size, mode) - - for field in fields: - data = _cat([bbox.get_field(field) for bbox in bboxes], dim=0) - cat_boxes.add_field(field, data) - - return cat_boxes - - -def getUnionBBox(aBB, bBB, margin = 10): - assert aBB.size==bBB.size - assert aBB.mode==bBB.mode - ih, iw = aBB.size - union_boxes = torch.cat([(torch.min(aBB.bbox[:,[0,1]], bBB.bbox[:,[0,1]]) - margin).clamp(min=0), \ - (torch.max(aBB.bbox[:,[2]], bBB.bbox[:,[2]]) + margin).clamp(max=iw), \ - (torch.max(aBB.bbox[:,[3]], bBB.bbox[:,[3]]) + margin).clamp(max=ih)], dim=1) - return BoxList(union_boxes, aBB.size, mode=aBB.mode) diff --git a/spaces/PirateHFH/IllusionDiffusion/illusion_style.py b/spaces/PirateHFH/IllusionDiffusion/illusion_style.py deleted file mode 100644 index 54a3614533167bcee0d4ba77c2f07294c1ed1690..0000000000000000000000000000000000000000 --- a/spaces/PirateHFH/IllusionDiffusion/illusion_style.py +++ /dev/null @@ -1,10 +0,0 @@ -css=''' -#share-btn-container {padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; max-width: 13rem; margin-left: auto;} -div#share-btn-container > div {flex-direction: row;background: black;align-items: center} -#share-btn-container:hover {background-color: #060606} -#share-btn {all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.5rem !important; padding-bottom: 0.5rem !important;right:0;} -#share-btn * {all: unset} -#share-btn-container div:nth-child(-n+2){width: auto !important;min-height: 0px !important;} -#share-btn-container .wrap {display: none !important} -#share-btn-container.hidden {display: none!important} -''' \ No newline at end of file diff --git a/spaces/Plachta/VITS-Umamusume-voice-synthesizer/README.md b/spaces/Plachta/VITS-Umamusume-voice-synthesizer/README.md deleted file mode 100644 index ca5ce14e43d83960115b5ba1c4a87d7b46adea81..0000000000000000000000000000000000000000 --- a/spaces/Plachta/VITS-Umamusume-voice-synthesizer/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Multilingual Anime TTS -emoji: 🎙🐴 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.7 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Prasanna18/Nagpur-FoodGPT/README.md b/spaces/Prasanna18/Nagpur-FoodGPT/README.md deleted file mode 100644 index dd79a1fae548a0bb77887a9efc65e29e910bc84b..0000000000000000000000000000000000000000 --- a/spaces/Prasanna18/Nagpur-FoodGPT/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: NagpurFoodGPT! -emoji: 🍊 -colorFrom: #FF5733 -colorTo: #FFBF00 -sdk: streamlit -sdk_version: 1.25.0 -app_file: app.py -pinned: false ---- - - diff --git a/spaces/Protatoes/proxy_shit/README.md b/spaces/Protatoes/proxy_shit/README.md deleted file mode 100644 index e862bc1ba08abf29183ea917afffba180f6d4685..0000000000000000000000000000000000000000 --- a/spaces/Protatoes/proxy_shit/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Proxy Shit -emoji: 🌖 -colorFrom: purple -colorTo: indigo -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Realcat/image-matching-webui/third_party/SGMNet/superpoint/__init__.py b/spaces/Realcat/image-matching-webui/third_party/SGMNet/superpoint/__init__.py deleted file mode 100644 index f1127dfc54047e2d0d877da1d3eb5c2ed569b85e..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/SGMNet/superpoint/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .superpoint import SuperPoint diff --git a/spaces/Redgon/bingo/tests/kblob.ts b/spaces/Redgon/bingo/tests/kblob.ts deleted file mode 100644 index 9e15b41c1c94a690beb61b23cdb42fc78767ccd2..0000000000000000000000000000000000000000 --- a/spaces/Redgon/bingo/tests/kblob.ts +++ /dev/null @@ -1,27 +0,0 @@ -import FormData from 'form-data' - -import { fetch } from '@/lib/isomorphic' - -const formData = new FormData() - -const knowledgeRequest = {"imageInfo":{"url":"https://www.baidu.com/img/PCfb_5bf082d29588c07f842ccde3f97243ea.png"},"knowledgeRequest":{"invokedSkills":["ImageById"],"subscriptionId":"Bing.Chat.Multimodal","invokedSkillsRequestData":{"enableFaceBlur":true},"convoData":{"convoid":"51D|BingProdUnAuthenticatedUsers|E3DCA904FF236C67C3450163BCEC64CFF3F618CC8A4AFD75FD518F5ED0ADA080","convotone":"Creative"}}} - -formData.append('knowledgeRequest', JSON.stringify(knowledgeRequest)) - - -fetch('https://bing.vcanbb.top/images/kblob', - { - method: 'POST', - body: formData.getBuffer(), - headers: { - "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"", - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": "\"Windows\"", - "Referer": "https://bing.vcanbb.top/web/index.html", - "Referrer-Policy": "origin-when-cross-origin", - ...formData.getHeaders() - } - - } -).then(res => res.text()) -.then(res => console.log('res', res)) diff --git a/spaces/Ritori/TTS_Yui/train.py b/spaces/Ritori/TTS_Yui/train.py deleted file mode 100644 index fc22bd44ee052b24a12d744c1661919dc5ab8217..0000000000000000000000000000000000000000 --- a/spaces/Ritori/TTS_Yui/train.py +++ /dev/null @@ -1,293 +0,0 @@ -import os -import time -import math -import torch -import argparse -import torch.distributed as dist - - -from numpy import finfo -from model import Tacotron2 -from torch.backends import cudnn -from hparams import create_hparams -from logger import Tacotron2Logger -from torch.utils.data import DataLoader -from loss_function import Tacotron2Loss -from distributed import apply_gradient_allreduce -from data_utils import TextMelLoader, TextMelCollate -from torch.utils.data.distributed import DistributedSampler - - -device = torch.device('cuda') if torch.cuda.is_available() else 'cpu' - -# 整理tensor -def reduce_tensor(tensor, n_gpus): - rt = tensor.clone() - dist.all_reduce(rt, op=dist.reduce_op.SUM) - rt /= n_gpus - return rt - - -def init_distributed(hparams, n_gpus, rank, group_name): - #assert torch.cuda.is_available(), "Distributed mode requires CUDA." - if torch.cuda.is_available() : - # Set cuda device so everything is done on the right GPU. - torch.cuda.set_device(rank % torch.cuda.device_count()) - # Initialize distributed communication - dist.init_process_group(backend=hparams.dist_backend, - init_method=hparams.dist_url, - world_size=n_gpus, - rank=rank, - group_name=group_name) - print("Distributed mode requires CUDA.") - else : - print("Use the CPU") - print("Initializing Distributed") - - print("Done initializing distributed") - - - -def prepare_dataloaders(hparams): - # Get data, data loaders and collate function ready - trainset = TextMelLoader(hparams.training_files, hparams) - valset = TextMelLoader(hparams.validation_files, hparams) - collate_fn = TextMelCollate(hparams.n_frames_per_step) - - if hparams.distributed_run: - train_sampler = DistributedSampler(trainset) - shuffle = False - else: - train_sampler = None - shuffle = True - - train_loader = DataLoader(trainset, num_workers=1, shuffle=shuffle, - sampler=train_sampler, - batch_size=hparams.batch_size, pin_memory=False, - drop_last=True, collate_fn=collate_fn) - return train_loader, valset, collate_fn - - -def prepare_directories_and_logger(output_directory, log_directory, rank): - if rank == 0: - if not os.path.isdir(output_directory): - os.makedirs(output_directory) - os.chmod(output_directory, 0o775) - logger = Tacotron2Logger(os.path.join(output_directory, log_directory)) - else: - logger = None - return logger - - -def load_model(hparams): - model = Tacotron2(hparams) - model.to(device) - if hparams.fp16_run: - model.decoder.attention_layer.score_mask_value = finfo('float16').min - - if hparams.distributed_run: - model = apply_gradient_allreduce(model) - - return model - - -def warm_start_model(checkpoint_path, model, ignore_layers): - assert os.path.isfile(checkpoint_path) - print("Warm starting model from checkpoint '{}'".format(checkpoint_path)) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - model_dict = checkpoint_dict['state_dict'] - if len(ignore_layers) > 0: - model_dict = {k: v for k, v in model_dict.items() - if k not in ignore_layers} - dummy_dict = model.state_dict() - dummy_dict.update(model_dict) - model_dict = dummy_dict - model.load_state_dict(model_dict) - return model - - -def load_checkpoint(checkpoint_path, model, optimizer): - assert os.path.isfile(checkpoint_path) - print("Loading checkpoint '{}'".format(checkpoint_path)) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - model.load_state_dict(checkpoint_dict['state_dict']) - optimizer.load_state_dict(checkpoint_dict['optimizer']) - learning_rate = checkpoint_dict['learning_rate'] - iteration = checkpoint_dict['iteration'] - print("Loaded checkpoint '{}' from iteration {}" .format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, filepath): - print("Saving model and optimizer state at iteration {} to {}".format( - iteration, filepath)) - torch.save({'iteration': iteration, - 'state_dict': model.state_dict(), - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, filepath) - - -def validate(model, criterion, valset, iteration, batch_size, n_gpus, - collate_fn, logger, distributed_run, rank): - """Handles all the validation scoring and printing""" - model.eval() - with torch.no_grad(): - val_sampler = DistributedSampler(valset) if distributed_run else None - val_loader = DataLoader(valset, sampler=val_sampler, num_workers=1, - shuffle=False, batch_size=batch_size, - pin_memory=False, collate_fn=collate_fn) - - val_loss = 0.0 - for i, batch in enumerate(val_loader): - x, y = model.parse_batch(batch) - y_pred = model(x) - loss = criterion(y_pred, y) - if distributed_run: - reduced_val_loss = reduce_tensor(loss.data, n_gpus).item() - else: - reduced_val_loss = loss.item() - val_loss += reduced_val_loss - val_loss = val_loss / (i + 1) - - model.train() - if rank == 0: - print("Validation loss {}: {:9f} ".format(iteration, val_loss)) - logger.log_validation(val_loss, model, y, y_pred, iteration) - - -def train(output_directory, log_directory, checkpoint_path, warm_start, n_gpus, - rank, group_name, hparams): - """Training and validation logging results to tensorboard and stdout - - Params - ------ - output_directory (string): directory to save checkpoints - log_directory (string) directory to save tensorboard logs - checkpoint_path(string): checkpoint path - n_gpus (int): number of gpus - rank (int): rank of current gpu - hparams (object): comma separated list of "name=value" pairs. - """ - if hparams.distributed_run: - init_distributed(hparams, n_gpus, rank, group_name) - - torch.manual_seed(hparams.seed) - torch.cuda.manual_seed(hparams.seed) - - model = load_model(hparams) - learning_rate = hparams.learning_rate - optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, - weight_decay=hparams.weight_decay) - - # 默认的是 False 可以注释掉 - #if hparams.fp16_run: - # from apex import amp - # model, optimizer = amp.initialize( - # model, optimizer, opt_level='O2') - - if hparams.distributed_run: - model = apply_gradient_allreduce(model) - - criterion = Tacotron2Loss() - logger = prepare_directories_and_logger(output_directory, log_directory, rank) - train_loader, valset, collate_fn = prepare_dataloaders(hparams) - - # Load checkpoint if one exists - iteration = 0 - epoch_offset = 0 - if checkpoint_path is not None: - if warm_start: - model = warm_start_model( - checkpoint_path, model, hparams.ignore_layers) - else: - model, optimizer, _learning_rate, iteration = load_checkpoint( - checkpoint_path, model, optimizer) - if hparams.use_saved_learning_rate: - learning_rate = _learning_rate - iteration += 1 # next iteration is iteration + 1 - epoch_offset = max(0, int(iteration / len(train_loader))) - - model.train() - is_overflow = False - # ================ MAIN TRAINNIG LOOP! =================== - for epoch in range(epoch_offset, hparams.epochs): - print("Epoch: {}".format(epoch)) - for i, batch in enumerate(train_loader): - start = time.perf_counter() - for param_group in optimizer.param_groups: - param_group['lr'] = learning_rate - - model.zero_grad() - x, y = model.parse_batch(batch) - y_pred = model(x) - - loss = criterion(y_pred, y) - if hparams.distributed_run: - reduced_loss = reduce_tensor(loss.data, n_gpus).item() - else: - reduced_loss = loss.item() - - loss.backward() - grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), hparams.grad_clip_thresh) - optimizer.step() - - if not is_overflow and rank == 0: - duration = time.perf_counter() - start - print("Train loss {} {:.6f} Grad Norm {:.6f} {:.2f}s/it".format( - iteration, reduced_loss, grad_norm, duration)) - logger.log_training( - reduced_loss, grad_norm, learning_rate, duration, iteration) - - if not is_overflow and (iteration % hparams.iters_per_checkpoint == 0): - validate(model, criterion, valset, iteration, - hparams.batch_size, n_gpus, collate_fn, logger, - hparams.distributed_run, rank) - if rank == 0: - checkpoint_path = os.path.join( - output_directory, "checkpoint_{}".format(iteration)) - save_checkpoint(model, optimizer, learning_rate, iteration, - checkpoint_path) - - iteration += 1 - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('-o', '--output_directory', type=str, - help='directory to save checkpoints ') - parser.add_argument('-l', '--log_directory', type=str, - help='directory to save tensorboard logs') - parser.add_argument('-c', '--checkpoint_path', type=str, default=None, - required=False, help='checkpoint path') - parser.add_argument('--warm_start', action='store_true', - help='load model weights only, ignore specified layers') - parser.add_argument('--n_gpus', type=int, default=1, - required=False, help='number of gpus') - parser.add_argument('--rank', type=int, default=0, - required=False, help='rank of current gpu') - parser.add_argument('--group_name', type=str, default='group_name', - required=False, help='Distributed group name') - parser.add_argument('--hparams', type=str, - required=False, help='comma separated name=value pairs') - - args = parser.parse_args() - hparams = create_hparams() - - cudnn.enabled = hparams.cudnn_enabled#create_hparams.cudnn_enabled - cudnn.benchmark = hparams.cudnn_benchmark#create_hparams.cudnn_benchmark - - print("FP16 Run:", hparams.fp16_run) - print("Dynamic Loss Scaling:", hparams.dynamic_loss_scaling) - print("Distributed Run:", hparams.distributed_run) - print("cuDNN Enabled:", hparams.cudnn_enabled) - print("cuDNN Benchmark:", hparams.cudnn_benchmark) - - train(args.output_directory, - args.log_directory, - args.checkpoint_path, - args.warm_start, - args.n_gpus, - args.rank, - args.group_name, - hparams) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/visualization/color.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/visualization/color.py deleted file mode 100644 index 9041e0e6b7581c3356795d6a3c5e84667c88f025..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/visualization/color.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from enum import Enum - -import numpy as np - -from annotator.uniformer.mmcv.utils import is_str - - -class Color(Enum): - """An enum that defines common colors. - - Contains red, green, blue, cyan, yellow, magenta, white and black. - """ - red = (0, 0, 255) - green = (0, 255, 0) - blue = (255, 0, 0) - cyan = (255, 255, 0) - yellow = (0, 255, 255) - magenta = (255, 0, 255) - white = (255, 255, 255) - black = (0, 0, 0) - - -def color_val(color): - """Convert various input to color tuples. - - Args: - color (:obj:`Color`/str/tuple/int/ndarray): Color inputs - - Returns: - tuple[int]: A tuple of 3 integers indicating BGR channels. - """ - if is_str(color): - return Color[color].value - elif isinstance(color, Color): - return color.value - elif isinstance(color, tuple): - assert len(color) == 3 - for channel in color: - assert 0 <= channel <= 255 - return color - elif isinstance(color, int): - assert 0 <= color <= 255 - return color, color, color - elif isinstance(color, np.ndarray): - assert color.ndim == 1 and color.size == 3 - assert np.all((color >= 0) & (color <= 255)) - color = color.astype(np.uint8) - return tuple(color) - else: - raise TypeError(f'Invalid type for color: {type(color)}') diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/utils/res_layer.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/utils/res_layer.py deleted file mode 100644 index 4a4efd3dd30b30123ed5135eac080ad9f7f7b448..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/utils/res_layer.py +++ /dev/null @@ -1,187 +0,0 @@ -from mmcv.cnn import build_conv_layer, build_norm_layer -from torch import nn as nn - - -class ResLayer(nn.Sequential): - """ResLayer to build ResNet style backbone. - - Args: - block (nn.Module): block used to build ResLayer. - inplanes (int): inplanes of block. - planes (int): planes of block. - num_blocks (int): number of blocks. - stride (int): stride of the first block. Default: 1 - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottleneck. Default: False - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - downsample_first (bool): Downsample at the first block or last block. - False for Hourglass, True for ResNet. Default: True - """ - - def __init__(self, - block, - inplanes, - planes, - num_blocks, - stride=1, - avg_down=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - downsample_first=True, - **kwargs): - self.block = block - - downsample = None - if stride != 1 or inplanes != planes * block.expansion: - downsample = [] - conv_stride = stride - if avg_down: - conv_stride = 1 - downsample.append( - nn.AvgPool2d( - kernel_size=stride, - stride=stride, - ceil_mode=True, - count_include_pad=False)) - downsample.extend([ - build_conv_layer( - conv_cfg, - inplanes, - planes * block.expansion, - kernel_size=1, - stride=conv_stride, - bias=False), - build_norm_layer(norm_cfg, planes * block.expansion)[1] - ]) - downsample = nn.Sequential(*downsample) - - layers = [] - if downsample_first: - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=stride, - downsample=downsample, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - **kwargs)) - inplanes = planes * block.expansion - for _ in range(1, num_blocks): - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - **kwargs)) - - else: # downsample_first=False is for HourglassModule - for _ in range(num_blocks - 1): - layers.append( - block( - inplanes=inplanes, - planes=inplanes, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - **kwargs)) - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=stride, - downsample=downsample, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - **kwargs)) - super(ResLayer, self).__init__(*layers) - - -class SimplifiedBasicBlock(nn.Module): - """Simplified version of original basic residual block. This is used in - `SCNet `_. - - - Norm layer is now optional - - Last ReLU in forward function is removed - """ - expansion = 1 - - def __init__(self, - inplanes, - planes, - stride=1, - dilation=1, - downsample=None, - style='pytorch', - with_cp=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - dcn=None, - plugins=None): - super(SimplifiedBasicBlock, self).__init__() - assert dcn is None, 'Not implemented yet.' - assert plugins is None, 'Not implemented yet.' - assert not with_cp, 'Not implemented yet.' - self.with_norm = norm_cfg is not None - with_bias = True if norm_cfg is None else False - self.conv1 = build_conv_layer( - conv_cfg, - inplanes, - planes, - 3, - stride=stride, - padding=dilation, - dilation=dilation, - bias=with_bias) - if self.with_norm: - self.norm1_name, norm1 = build_norm_layer( - norm_cfg, planes, postfix=1) - self.add_module(self.norm1_name, norm1) - self.conv2 = build_conv_layer( - conv_cfg, planes, planes, 3, padding=1, bias=with_bias) - if self.with_norm: - self.norm2_name, norm2 = build_norm_layer( - norm_cfg, planes, postfix=2) - self.add_module(self.norm2_name, norm2) - - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - self.dilation = dilation - self.with_cp = with_cp - - @property - def norm1(self): - """nn.Module: normalization layer after the first convolution layer""" - return getattr(self, self.norm1_name) if self.with_norm else None - - @property - def norm2(self): - """nn.Module: normalization layer after the second convolution layer""" - return getattr(self, self.norm2_name) if self.with_norm else None - - def forward(self, x): - """Forward function.""" - - identity = x - - out = self.conv1(x) - if self.with_norm: - out = self.norm1(out) - out = self.relu(out) - - out = self.conv2(out) - if self.with_norm: - out = self.norm2(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/configs/_base_/schedules/schedule_80k.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/configs/_base_/schedules/schedule_80k.py deleted file mode 100644 index aed63884d1ea6a39bb8a7b09c5659bf4c869eff2..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/configs/_base_/schedules/schedule_80k.py +++ /dev/null @@ -1,21 +0,0 @@ -''' - * Copyright (c) 2023 Salesforce, Inc. - * All rights reserved. - * SPDX-License-Identifier: Apache License 2.0 - * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/ - * By Can Qin - * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet - * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala - * Modified from UniFormer repo: From https://github.com/Sense-X/UniFormer - * Apache-2.0 license -''' - -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict() -# learning policy -lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) -# runtime settings -runner = dict(type='IterBasedRunner', max_iters=80000) -checkpoint_config = dict(by_epoch=False, interval=8000) -evaluation = dict(interval=8000, metric='mIoU') diff --git a/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/metrics/inception_score.py b/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/metrics/inception_score.py deleted file mode 100644 index 3822c1435901a47e8c192b52cd3ed1ce5de67acd..0000000000000000000000000000000000000000 --- a/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/metrics/inception_score.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Inception Score (IS) from the paper "Improved techniques for training -GANs". Matches the original implementation by Salimans et al. at -https://github.com/openai/improved-gan/blob/master/inception_score/model.py""" - -import numpy as np -from . import metric_utils - -#---------------------------------------------------------------------------- - -def compute_is(opts, num_gen, num_splits): - # Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz - detector_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/inception-2015-12-05.pt' - detector_kwargs = dict(no_output_bias=True) # Match the original implementation by not applying bias in the softmax layer. - - gen_probs = metric_utils.compute_feature_stats_for_generator( - opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, - capture_all=True, max_items=num_gen).get_all() - - if opts.rank != 0: - return float('nan'), float('nan') - - scores = [] - for i in range(num_splits): - part = gen_probs[i * num_gen // num_splits : (i + 1) * num_gen // num_splits] - kl = part * (np.log(part) - np.log(np.mean(part, axis=0, keepdims=True))) - kl = np.mean(np.sum(kl, axis=1)) - scores.append(np.exp(kl)) - return float(np.mean(scores)), float(np.std(scores)) - -#---------------------------------------------------------------------------- diff --git a/spaces/Sal-ONE/AI_Code_Gen/README.md b/spaces/Sal-ONE/AI_Code_Gen/README.md deleted file mode 100644 index 189033ed804a824e281f51ae3a295cc3623b862a..0000000000000000000000000000000000000000 --- a/spaces/Sal-ONE/AI_Code_Gen/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: AI Code Gen -emoji: 💻 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: true ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/SanjayreddyBaddipadiga/MyfirstGenAIChatBot/app.py b/spaces/SanjayreddyBaddipadiga/MyfirstGenAIChatBot/app.py deleted file mode 100644 index 2dbf3ae89c2e3fdab7134107dd346f984dca8eb1..0000000000000000000000000000000000000000 --- a/spaces/SanjayreddyBaddipadiga/MyfirstGenAIChatBot/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') - -template = """Meet Riya, your youthful and witty personal assistant! At 21 years old, she's full of energy and always eager to help. Riya's goal is to assist you with any questions or problems you might have. Her enthusiasm shines through in every response, making interactions with her enjoyable and engaging. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -def get_text_response(user_message,history): - response = llm_chain.predict(user_message = user_message) - return response - -demo = gr.ChatInterface(get_text_response) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/Sasidhar/information-extraction-demo/text_annotatator.py b/spaces/Sasidhar/information-extraction-demo/text_annotatator.py deleted file mode 100644 index 7edb78e36f4f06971bbdd7557fb83a6a81dd41b5..0000000000000000000000000000000000000000 --- a/spaces/Sasidhar/information-extraction-demo/text_annotatator.py +++ /dev/null @@ -1,50 +0,0 @@ -def get_display_only_data(): - data1 = { - "tokens": [ - {"text": "He", "labels": ["Person"]}, - {"text": "loves"}, - {"text": "his"}, - {"text": "dog", "labels": ["Animal", "Pet"]}, - ], - "labels": [ - {"text": "Person"}, - {"text": "Action"}, - {"text": "Animal"}, - ] - } - - return data1 - -def get_editable_data(): - - data2 = { - "allowEditing": True, - "tokens": [ - {"text": "He", "labels": ["Pronoun", "Person"]}, - {"text": "loves", "labels": ["Action"]}, - {"text": "his"}, - {"text": "dog", "labels": ["Animal"]}, - ], - "labels": [ - {"text": "Pronoun", "style": { - "color": "red", - "background-color": "white", - "font-size": "8px", - "border": "3px dashed red", - }}, - {"text": "Verb", "style": { - "color": "green", - "background-color": "white", - "font-size": "8px", - "font-weight": "900", - }}, - {"text": "Noun", "style": { - "color": "blue", - "background-color": "white", - "font-size": "8px", - }}, - {"text": "Person"}, - {"text": "Animal"}, - ] - } - return data2 \ No newline at end of file diff --git a/spaces/Saturdays/mamamIA/README.md b/spaces/Saturdays/mamamIA/README.md deleted file mode 100644 index 414fd2c849e2d8e53a513a85a6ef3ab315d45590..0000000000000000000000000000000000000000 --- a/spaces/Saturdays/mamamIA/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: MamamIA -emoji: 📈 -colorFrom: yellow -colorTo: blue -sdk: streamlit -sdk_version: 1.2.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/SeViLA/SeViLA/lavis/models/clip_models/pretrained.py b/spaces/SeViLA/SeViLA/lavis/models/clip_models/pretrained.py deleted file mode 100644 index a8d9834952263a0cd19c775d2576628e4ee580cd..0000000000000000000000000000000000000000 --- a/spaces/SeViLA/SeViLA/lavis/models/clip_models/pretrained.py +++ /dev/null @@ -1,182 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause - - Based on https://github.com/mlfoundations/open_clip -""" - -import hashlib -import os -import urllib -import warnings - -from tqdm import tqdm - -_RN50 = dict( - openai="https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt", - yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt", - cc12m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt", -) - -_RN50_quickgelu = dict( - openai="https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt", - yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt", - cc12m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt", -) - -_RN101 = dict( - openai="https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt", - yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt", -) - -_RN101_quickgelu = dict( - openai="https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt", - yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt", -) - -_RN50x4 = dict( - openai="https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt", -) - -_RN50x16 = dict( - openai="https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt", -) - -_RN50x64 = dict( - openai="https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt", -) - -_VITB32 = dict( - openai="https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", - laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt", - laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt", - laion400m_avg="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_avg-8a00ab3c.pt", -) - -_VITB32_quickgelu = dict( - openai="https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", - laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt", - laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt", - laion400m_avg="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_avg-8a00ab3c.pt", -) - -_VITB16 = dict( - openai="https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt", -) - -_VITL14 = dict( - openai="https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", -) - -_VITL14_336 = dict( - openai="https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt" -) - -_PRETRAINED = { - "RN50": _RN50, - "RN50-quickgelu": _RN50_quickgelu, - "RN101": _RN101, - "RN101-quickgelu": _RN101_quickgelu, - "RN50x4": _RN50x4, - "RN50x16": _RN50x16, - "ViT-B-32": _VITB32, - "ViT-B-32-quickgelu": _VITB32_quickgelu, - "ViT-B-16": _VITB16, - "ViT-L-14": _VITL14, - "ViT-L-14-336": _VITL14_336, -} - - -def list_pretrained(as_str: bool = False): - """returns list of pretrained models - Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True - """ - return [ - ":".join([k, t]) if as_str else (k, t) - for k in _PRETRAINED.keys() - for t in _PRETRAINED[k].keys() - ] - - -def list_pretrained_tag_models(tag: str): - """return all models having the specified pretrain tag""" - models = [] - for k in _PRETRAINED.keys(): - if tag in _PRETRAINED[k]: - models.append(k) - return models - - -def list_pretrained_model_tags(model: str): - """return all pretrain tags for the specified model architecture""" - tags = [] - if model in _PRETRAINED: - tags.extend(_PRETRAINED[model].keys()) - return tags - - -def get_pretrained_url(model: str, tag: str): - if model not in _PRETRAINED: - return "" - model_pretrained = _PRETRAINED[model] - tag = tag.lower() - if tag not in model_pretrained: - return "" - return model_pretrained[tag] - - -def download_pretrained(url: str, root: str = os.path.expanduser("~/.cache/clip")): - os.makedirs(root, exist_ok=True) - filename = os.path.basename(url) - - if "openaipublic" in url: - expected_sha256 = url.split("/")[-2] - else: - expected_sha256 = "" - - download_target = os.path.join(root, filename) - - if os.path.exists(download_target) and not os.path.isfile(download_target): - raise RuntimeError(f"{download_target} exists and is not a regular file") - - if os.path.isfile(download_target): - if expected_sha256: - if ( - hashlib.sha256(open(download_target, "rb").read()).hexdigest() - == expected_sha256 - ): - return download_target - else: - warnings.warn( - f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" - ) - else: - return download_target - - with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: - with tqdm( - total=int(source.info().get("Content-Length")), - ncols=80, - unit="iB", - unit_scale=True, - ) as loop: - while True: - buffer = source.read(8192) - if not buffer: - break - - output.write(buffer) - loop.update(len(buffer)) - - if ( - expected_sha256 - and hashlib.sha256(open(download_target, "rb").read()).hexdigest() - != expected_sha256 - ): - raise RuntimeError( - f"Model has been downloaded but the SHA256 checksum does not not match" - ) - - return download_target diff --git a/spaces/ServerX/PorcoDiaz/infer/modules/uvr5/mdxnet.py b/spaces/ServerX/PorcoDiaz/infer/modules/uvr5/mdxnet.py deleted file mode 100644 index 86a066893ad99cfed77788027a9deb8ed486a7f2..0000000000000000000000000000000000000000 --- a/spaces/ServerX/PorcoDiaz/infer/modules/uvr5/mdxnet.py +++ /dev/null @@ -1,246 +0,0 @@ -import os -import logging - -logger = logging.getLogger(__name__) - -import librosa -import numpy as np -import soundfile as sf -import torch -from tqdm import tqdm - -cpu = torch.device("cpu") - - -class ConvTDFNetTrim: - def __init__( - self, device, model_name, target_name, L, dim_f, dim_t, n_fft, hop=1024 - ): - super(ConvTDFNetTrim, self).__init__() - - self.dim_f = dim_f - self.dim_t = 2**dim_t - self.n_fft = n_fft - self.hop = hop - self.n_bins = self.n_fft // 2 + 1 - self.chunk_size = hop * (self.dim_t - 1) - self.window = torch.hann_window(window_length=self.n_fft, periodic=True).to( - device - ) - self.target_name = target_name - self.blender = "blender" in model_name - - self.dim_c = 4 - out_c = self.dim_c * 4 if target_name == "*" else self.dim_c - self.freq_pad = torch.zeros( - [1, out_c, self.n_bins - self.dim_f, self.dim_t] - ).to(device) - - self.n = L // 2 - - def stft(self, x): - x = x.reshape([-1, self.chunk_size]) - x = torch.stft( - x, - n_fft=self.n_fft, - hop_length=self.hop, - window=self.window, - center=True, - return_complex=True, - ) - x = torch.view_as_real(x) - x = x.permute([0, 3, 1, 2]) - x = x.reshape([-1, 2, 2, self.n_bins, self.dim_t]).reshape( - [-1, self.dim_c, self.n_bins, self.dim_t] - ) - return x[:, :, : self.dim_f] - - def istft(self, x, freq_pad=None): - freq_pad = ( - self.freq_pad.repeat([x.shape[0], 1, 1, 1]) - if freq_pad is None - else freq_pad - ) - x = torch.cat([x, freq_pad], -2) - c = 4 * 2 if self.target_name == "*" else 2 - x = x.reshape([-1, c, 2, self.n_bins, self.dim_t]).reshape( - [-1, 2, self.n_bins, self.dim_t] - ) - x = x.permute([0, 2, 3, 1]) - x = x.contiguous() - x = torch.view_as_complex(x) - x = torch.istft( - x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True - ) - return x.reshape([-1, c, self.chunk_size]) - - -def get_models(device, dim_f, dim_t, n_fft): - return ConvTDFNetTrim( - device=device, - model_name="Conv-TDF", - target_name="vocals", - L=11, - dim_f=dim_f, - dim_t=dim_t, - n_fft=n_fft, - ) - - -class Predictor: - def __init__(self, args): - import onnxruntime as ort - - logger.info(ort.get_available_providers()) - self.args = args - self.model_ = get_models( - device=cpu, dim_f=args.dim_f, dim_t=args.dim_t, n_fft=args.n_fft - ) - self.model = ort.InferenceSession( - os.path.join(args.onnx, self.model_.target_name + ".onnx"), - providers=[ - "CUDAExecutionProvider", - "DmlExecutionProvider", - "CPUExecutionProvider", - ], - ) - logger.info("ONNX load done") - - def demix(self, mix): - samples = mix.shape[-1] - margin = self.args.margin - chunk_size = self.args.chunks * 44100 - assert not margin == 0, "margin cannot be zero!" - if margin > chunk_size: - margin = chunk_size - - segmented_mix = {} - - if self.args.chunks == 0 or samples < chunk_size: - chunk_size = samples - - counter = -1 - for skip in range(0, samples, chunk_size): - counter += 1 - - s_margin = 0 if counter == 0 else margin - end = min(skip + chunk_size + margin, samples) - - start = skip - s_margin - - segmented_mix[skip] = mix[:, start:end].copy() - if end == samples: - break - - sources = self.demix_base(segmented_mix, margin_size=margin) - """ - mix:(2,big_sample) - segmented_mix:offset->(2,small_sample) - sources:(1,2,big_sample) - """ - return sources - - def demix_base(self, mixes, margin_size): - chunked_sources = [] - progress_bar = tqdm(total=len(mixes)) - progress_bar.set_description("Processing") - for mix in mixes: - cmix = mixes[mix] - sources = [] - n_sample = cmix.shape[1] - model = self.model_ - trim = model.n_fft // 2 - gen_size = model.chunk_size - 2 * trim - pad = gen_size - n_sample % gen_size - mix_p = np.concatenate( - (np.zeros((2, trim)), cmix, np.zeros((2, pad)), np.zeros((2, trim))), 1 - ) - mix_waves = [] - i = 0 - while i < n_sample + pad: - waves = np.array(mix_p[:, i : i + model.chunk_size]) - mix_waves.append(waves) - i += gen_size - mix_waves = torch.tensor(mix_waves, dtype=torch.float32).to(cpu) - with torch.no_grad(): - _ort = self.model - spek = model.stft(mix_waves) - if self.args.denoise: - spec_pred = ( - -_ort.run(None, {"input": -spek.cpu().numpy()})[0] * 0.5 - + _ort.run(None, {"input": spek.cpu().numpy()})[0] * 0.5 - ) - tar_waves = model.istft(torch.tensor(spec_pred)) - else: - tar_waves = model.istft( - torch.tensor(_ort.run(None, {"input": spek.cpu().numpy()})[0]) - ) - tar_signal = ( - tar_waves[:, :, trim:-trim] - .transpose(0, 1) - .reshape(2, -1) - .numpy()[:, :-pad] - ) - - start = 0 if mix == 0 else margin_size - end = None if mix == list(mixes.keys())[::-1][0] else -margin_size - if margin_size == 0: - end = None - sources.append(tar_signal[:, start:end]) - - progress_bar.update(1) - - chunked_sources.append(sources) - _sources = np.concatenate(chunked_sources, axis=-1) - # del self.model - progress_bar.close() - return _sources - - def prediction(self, m, vocal_root, others_root, format): - os.makedirs(vocal_root, exist_ok=True) - os.makedirs(others_root, exist_ok=True) - basename = os.path.basename(m) - mix, rate = librosa.load(m, mono=False, sr=44100) - if mix.ndim == 1: - mix = np.asfortranarray([mix, mix]) - mix = mix.T - sources = self.demix(mix.T) - opt = sources[0].T - if format in ["wav", "flac"]: - sf.write( - "%s/%s_main_vocal.%s" % (vocal_root, basename, format), mix - opt, rate - ) - sf.write("%s/%s_others.%s" % (others_root, basename, format), opt, rate) - else: - path_vocal = "%s/%s_main_vocal.wav" % (vocal_root, basename) - path_other = "%s/%s_others.wav" % (others_root, basename) - sf.write(path_vocal, mix - opt, rate) - sf.write(path_other, opt, rate) - if os.path.exists(path_vocal): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path_vocal, path_vocal[:-4] + ".%s" % format) - ) - if os.path.exists(path_other): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path_other, path_other[:-4] + ".%s" % format) - ) - - -class MDXNetDereverb: - def __init__(self, chunks, device): - self.onnx = "assets/uvr5_weights/onnx_dereverb_By_FoxJoy" - self.shifts = 10 # 'Predict with randomised equivariant stabilisation' - self.mixing = "min_mag" # ['default','min_mag','max_mag'] - self.chunks = chunks - self.margin = 44100 - self.dim_t = 9 - self.dim_f = 3072 - self.n_fft = 6144 - self.denoise = True - self.pred = Predictor(self) - self.device = device - - def path_audio(self, input, vocal_root, others_root, format): - self.pred.prediction(input, vocal_root, others_root, format) diff --git a/spaces/ShilongLiu/Grounding_DINO_demo/README.md b/spaces/ShilongLiu/Grounding_DINO_demo/README.md deleted file mode 100644 index 081e39d1a209013fc2a5342efc9b1307923488c8..0000000000000000000000000000000000000000 --- a/spaces/ShilongLiu/Grounding_DINO_demo/README.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: Grounding DINO Demo -emoji: 💻 -colorFrom: purple -colorTo: yellow -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -# Grounding DINO -[📃Paper](https://arxiv.org/abs/2303.05499) | -[📽️Video](https://www.youtube.com/watch?v=wxWDt5UiwY8) | -[🗯️ Github](https://github.com/IDEA-Research/GroundingDINO) | -[📯Demo on Colab](https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/zero-shot-object-detection-with-grounding-dino.ipynb) | -[🤗Demo on HF (Coming soon)]() - -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/zero-shot-object-detection-with-grounding-dino.ipynb) \ -[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/grounding-dino-marrying-dino-with-grounded/zero-shot-object-detection-on-mscoco)](https://paperswithcode.com/sota/zero-shot-object-detection-on-mscoco?p=grounding-dino-marrying-dino-with-grounded) \ -[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/grounding-dino-marrying-dino-with-grounded/zero-shot-object-detection-on-odinw)](https://paperswithcode.com/sota/zero-shot-object-detection-on-odinw?p=grounding-dino-marrying-dino-with-grounded) \ -[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/grounding-dino-marrying-dino-with-grounded/object-detection-on-coco-minival)](https://paperswithcode.com/sota/object-detection-on-coco-minival?p=grounding-dino-marrying-dino-with-grounded) \ -[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/grounding-dino-marrying-dino-with-grounded/object-detection-on-coco)](https://paperswithcode.com/sota/object-detection-on-coco?p=grounding-dino-marrying-dino-with-grounded) - - - -Official pytorch implementation of [Grounding DINO](https://arxiv.org/abs/2303.05499), a stronger open-set object detector. Code is available now! - - -## Highlight - -- **Open-Set Detection.** Detect **everything** with language! -- **High Performancce.** COCO zero-shot **52.5 AP** (training without COCO data!). COCO fine-tune **63.0 AP**. -- **Flexible.** Collaboration with Stable Diffusion for Image Editting. - -## News -[2023/03/27] Support CPU-only mode. Now the model can run on machines without GPUs.\ -[2023/03/25] A [demo](https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/zero-shot-object-detection-with-grounding-dino.ipynb) for Grounding DINO is available at Colab. Thanks to @Piotr! \ -[2023/03/22] Code is available Now! - - - -## TODO - -- [x] Release inference code and demo. -- [x] Release checkpoints. -- [ ] Grounding DINO with Stable Diffusion and GLIGEN demos. -- [ ] Release training codes. - -## Install - -If you have a CUDA environment, please make sure the environment variable `CUDA_HOME` is set. It will be compiled under CPU-only mode if no CUDA available. - -```bash -pip install -e . -``` - -## Demo - -```bash -CUDA_VISIBLE_DEVICES=6 python demo/inference_on_a_image.py \ - -c /path/to/config \ - -p /path/to/checkpoint \ - -i .asset/cats.png \ - -o "outputs/0" \ - -t "cat ear." \ - [--cpu-only] # open it for cpu mode -``` -See the `demo/inference_on_a_image.py` for more details. - -## Checkpoints - - - - - - - - - - - - - - - - - - - - - - - - - -
namebackboneDatabox AP on COCOCheckpointConfig
1GroundingDINO-TSwin-TO365,GoldG,Cap4M48.4 (zero-shot) / 57.2 (fine-tune)linklink
- - - -## Acknowledgement - -Our model is related to [DINO](https://github.com/IDEA-Research/DINO) and [GLIP](https://github.com/microsoft/GLIP). Thanks for their great work! - -We also thank great previous work including DETR, Deformable DETR, SMCA, Conditional DETR, Anchor DETR, Dynamic DETR, DAB-DETR, DN-DETR, etc. More related work are available at [Awesome Detection Transformer](https://github.com/IDEACVR/awesome-detection-transformer). A new toolbox [detrex](https://github.com/IDEA-Research/detrex) is available as well. - -Thanks [Stable Diffusion](https://github.com/Stability-AI/StableDiffusion) and [GLIGEN](https://github.com/gligen/GLIGEN) for their awesome models. - - -## Citation - -If you find our work helpful for your research, please consider citing the following BibTeX entry. - -```bibtex -@inproceedings{ShilongLiu2023GroundingDM, - title={Grounding DINO: Marrying DINO with Grounded Pre-Training for Open-Set Object Detection}, - author={Shilong Liu and Zhaoyang Zeng and Tianhe Ren and Feng Li and Hao Zhang and Jie Yang and Chunyuan Li and Jianwei Yang and Hang Su and Jun Zhu and Lei Zhang}, - year={2023} -} -``` - - - - - diff --git a/spaces/Silentlin/DiffSinger/modules/parallel_wavegan/layers/tf_layers.py b/spaces/Silentlin/DiffSinger/modules/parallel_wavegan/layers/tf_layers.py deleted file mode 100644 index c0f46bd755c161cda2ac904fe37f3f3c6357a88d..0000000000000000000000000000000000000000 --- a/spaces/Silentlin/DiffSinger/modules/parallel_wavegan/layers/tf_layers.py +++ /dev/null @@ -1,129 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2020 MINH ANH (@dathudeptrai) -# MIT License (https://opensource.org/licenses/MIT) - -"""Tensorflow Layer modules complatible with pytorch.""" - -import tensorflow as tf - - -class TFReflectionPad1d(tf.keras.layers.Layer): - """Tensorflow ReflectionPad1d module.""" - - def __init__(self, padding_size): - """Initialize TFReflectionPad1d module. - - Args: - padding_size (int): Padding size. - - """ - super(TFReflectionPad1d, self).__init__() - self.padding_size = padding_size - - @tf.function - def call(self, x): - """Calculate forward propagation. - - Args: - x (Tensor): Input tensor (B, T, 1, C). - - Returns: - Tensor: Padded tensor (B, T + 2 * padding_size, 1, C). - - """ - return tf.pad(x, [[0, 0], [self.padding_size, self.padding_size], [0, 0], [0, 0]], "REFLECT") - - -class TFConvTranspose1d(tf.keras.layers.Layer): - """Tensorflow ConvTranspose1d module.""" - - def __init__(self, channels, kernel_size, stride, padding): - """Initialize TFConvTranspose1d( module. - - Args: - channels (int): Number of channels. - kernel_size (int): kernel size. - strides (int): Stride width. - padding (str): Padding type ("same" or "valid"). - - """ - super(TFConvTranspose1d, self).__init__() - self.conv1d_transpose = tf.keras.layers.Conv2DTranspose( - filters=channels, - kernel_size=(kernel_size, 1), - strides=(stride, 1), - padding=padding, - ) - - @tf.function - def call(self, x): - """Calculate forward propagation. - - Args: - x (Tensor): Input tensor (B, T, 1, C). - - Returns: - Tensors: Output tensor (B, T', 1, C'). - - """ - x = self.conv1d_transpose(x) - return x - - -class TFResidualStack(tf.keras.layers.Layer): - """Tensorflow ResidualStack module.""" - - def __init__(self, - kernel_size, - channels, - dilation, - bias, - nonlinear_activation, - nonlinear_activation_params, - padding, - ): - """Initialize TFResidualStack module. - - Args: - kernel_size (int): Kernel size. - channles (int): Number of channels. - dilation (int): Dilation ine. - bias (bool): Whether to add bias parameter in convolution layers. - nonlinear_activation (str): Activation function module name. - nonlinear_activation_params (dict): Hyperparameters for activation function. - padding (str): Padding type ("same" or "valid"). - - """ - super(TFResidualStack, self).__init__() - self.block = [ - getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params), - TFReflectionPad1d(dilation), - tf.keras.layers.Conv2D( - filters=channels, - kernel_size=(kernel_size, 1), - dilation_rate=(dilation, 1), - use_bias=bias, - padding="valid", - ), - getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params), - tf.keras.layers.Conv2D(filters=channels, kernel_size=1, use_bias=bias) - ] - self.shortcut = tf.keras.layers.Conv2D(filters=channels, kernel_size=1, use_bias=bias) - - @tf.function - def call(self, x): - """Calculate forward propagation. - - Args: - x (Tensor): Input tensor (B, T, 1, C). - - Returns: - Tensor: Output tensor (B, T, 1, C). - - """ - _x = tf.identity(x) - for i, layer in enumerate(self.block): - _x = layer(_x) - shortcut = self.shortcut(x) - return shortcut + _x diff --git a/spaces/Slava917/pronunciation-trainer/gradio_interface.py b/spaces/Slava917/pronunciation-trainer/gradio_interface.py deleted file mode 100644 index 1b22abf82d50fc543b45b73043cba0d0ee73d153..0000000000000000000000000000000000000000 --- a/spaces/Slava917/pronunciation-trainer/gradio_interface.py +++ /dev/null @@ -1,34 +0,0 @@ -import torch -import torchaudio - -#fixes second prediction bug -torch._C._jit_override_can_fuse_on_cpu(False) -torch._C._jit_override_can_fuse_on_gpu(False) -torch._C._jit_set_texpr_fuser_enabled(False) -torch._C._jit_set_nvfuser_enabled(False) - -loader = torch.jit.load("audio_loader.pt") -model = torch.jit.load('QuartzNet_thunderspeech_3.pt') - -vocab = model.text_transform.vocab.itos -vocab[-1] = '' - -def convert_probs(probs): - ids = probs.argmax(1)[0] - s = [] - if vocab[ids[0]]: s.append(vocab[ids[0]]) - for i in range(1,len(ids)): - if ids[i-1] != ids[i]: - new = vocab[ids[i]] - if new: s.append(new) - #return '.'.join(s) - return s - -def predict(path): - audio = loader(path) - probs = model(audio, torch.tensor(audio.shape[0] * [audio.shape[-1]], device=audio.device))[0] - return convert_probs(probs) - - gr.Interface(fn=predict, - inputs=[gr.inputs.Audio(source='microphone', type='filepath', optional=True)], - outputs= 'text').launch(debug=Tru \ No newline at end of file diff --git a/spaces/SoulAbi/text-prompt-to-audio-generation/README.md b/spaces/SoulAbi/text-prompt-to-audio-generation/README.md deleted file mode 100644 index 13dd72725872099c0381061a705483255bafa8cf..0000000000000000000000000000000000000000 --- a/spaces/SoulAbi/text-prompt-to-audio-generation/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Audioldm Text To Audio Generation -emoji: 🔊 -colorFrom: yellow -colorTo: pink -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false -license: creativeml-openrail-m ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/Srihari1611/Gender_Classification/README.md b/spaces/Srihari1611/Gender_Classification/README.md deleted file mode 100644 index a48eece0886a8aa6ebd5390ad7a69c17b0df35f3..0000000000000000000000000000000000000000 --- a/spaces/Srihari1611/Gender_Classification/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Gender Classification -emoji: 👁 -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 3.33.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/StatsByZach/app/configure.py b/spaces/StatsByZach/app/configure.py deleted file mode 100644 index 555e21baf5e3d27378693fc40cab90d4b9d2a4c0..0000000000000000000000000000000000000000 --- a/spaces/StatsByZach/app/configure.py +++ /dev/null @@ -1 +0,0 @@ -base_url = "https://statsbyzach-app.hf.space/" \ No newline at end of file diff --git a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/grids/__init__.py b/spaces/SuYuanS/AudioCraft_Plus/audiocraft/grids/__init__.py deleted file mode 100644 index 70643517cd1a8b4e712eca90e23411ae89937795..0000000000000000000000000000000000000000 --- a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/grids/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Dora Grids.""" diff --git a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/quantization/vq.py b/spaces/SuYuanS/AudioCraft_Plus/audiocraft/quantization/vq.py deleted file mode 100644 index aa57bea59db95ddae35e0657f723ca3a29ee943b..0000000000000000000000000000000000000000 --- a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/quantization/vq.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import math -import typing as tp - -import torch - -from .base import BaseQuantizer, QuantizedResult -from .core_vq import ResidualVectorQuantization - - -class ResidualVectorQuantizer(BaseQuantizer): - """Residual Vector Quantizer. - - Args: - dimension (int): Dimension of the codebooks. - n_q (int): Number of residual vector quantizers used. - q_dropout (bool): Random quantizer drop out at train time. - bins (int): Codebook size. - decay (float): Decay for exponential moving average over the codebooks. - kmeans_init (bool): Whether to use kmeans to initialize the codebooks. - kmeans_iters (int): Number of iterations used for kmeans initialization. - threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes - that have an exponential moving average cluster size less than the specified threshold with - randomly selected vector from the current batch. - orthogonal_reg_weight (float): Orthogonal regularization weights. - orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes. - orthogonal_reg_max_codes (optional int): Maximum number of codes to consider. - for orthogonal regularization. - """ - def __init__( - self, - dimension: int = 256, - n_q: int = 8, - q_dropout: bool = False, - bins: int = 1024, - decay: float = 0.99, - kmeans_init: bool = True, - kmeans_iters: int = 10, - threshold_ema_dead_code: int = 2, - orthogonal_reg_weight: float = 0.0, - orthogonal_reg_active_codes_only: bool = False, - orthogonal_reg_max_codes: tp.Optional[int] = None, - ): - super().__init__() - self.max_n_q = n_q - self.n_q = n_q - self.q_dropout = q_dropout - self.dimension = dimension - self.bins = bins - self.decay = decay - self.kmeans_init = kmeans_init - self.kmeans_iters = kmeans_iters - self.threshold_ema_dead_code = threshold_ema_dead_code - self.orthogonal_reg_weight = orthogonal_reg_weight - self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only - self.orthogonal_reg_max_codes = orthogonal_reg_max_codes - self.vq = ResidualVectorQuantization( - dim=self.dimension, - codebook_size=self.bins, - num_quantizers=self.n_q, - decay=self.decay, - kmeans_init=self.kmeans_init, - kmeans_iters=self.kmeans_iters, - threshold_ema_dead_code=self.threshold_ema_dead_code, - orthogonal_reg_weight=self.orthogonal_reg_weight, - orthogonal_reg_active_codes_only=self.orthogonal_reg_active_codes_only, - orthogonal_reg_max_codes=self.orthogonal_reg_max_codes, - channels_last=False - ) - - def forward(self, x: torch.Tensor, frame_rate: int): - n_q = self.n_q - if self.training and self.q_dropout: - n_q = int(torch.randint(1, self.n_q + 1, (1,)).item()) - bw_per_q = math.log2(self.bins) * frame_rate / 1000 - quantized, codes, commit_loss = self.vq(x, n_q=n_q) - codes = codes.transpose(0, 1) - # codes is [B, K, T], with T frames, K nb of codebooks. - bw = torch.tensor(n_q * bw_per_q).to(x) - return QuantizedResult(quantized, codes, bw, penalty=torch.mean(commit_loss)) - - def encode(self, x: torch.Tensor) -> torch.Tensor: - """Encode a given input tensor with the specified frame rate at the given bandwidth. - The RVQ encode method sets the appropriate number of quantizer to use - and returns indices for each quantizer. - """ - n_q = self.n_q - codes = self.vq.encode(x, n_q=n_q) - codes = codes.transpose(0, 1) - # codes is [B, K, T], with T frames, K nb of codebooks. - return codes - - def decode(self, codes: torch.Tensor) -> torch.Tensor: - """Decode the given codes to the quantized representation.""" - # codes is [B, K, T], with T frames, K nb of codebooks, vq.decode expects [K, B, T]. - codes = codes.transpose(0, 1) - quantized = self.vq.decode(codes) - return quantized - - @property - def total_codebooks(self): - return self.max_n_q - - @property - def num_codebooks(self): - return self.n_q - - def set_num_codebooks(self, n: int): - assert n > 0 and n <= self.max_n_q - self.n_q = n diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/extensions.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/extensions.py deleted file mode 100644 index 21fba40eaf47f1b9e1c94b1562180495a983adc5..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/extensions.py +++ /dev/null @@ -1,151 +0,0 @@ -# encoding: utf-8 -"""A class for managing IPython extensions.""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -import os -import os.path -import sys -from importlib import import_module, reload - -from traitlets.config.configurable import Configurable -from IPython.utils.path import ensure_dir_exists, compress_user -from IPython.utils.decorators import undoc -from traitlets import Instance - - -#----------------------------------------------------------------------------- -# Main class -#----------------------------------------------------------------------------- - -BUILTINS_EXTS = {"storemagic": False, "autoreload": False} - - -class ExtensionManager(Configurable): - """A class to manage IPython extensions. - - An IPython extension is an importable Python module that has - a function with the signature:: - - def load_ipython_extension(ipython): - # Do things with ipython - - This function is called after your extension is imported and the - currently active :class:`InteractiveShell` instance is passed as - the only argument. You can do anything you want with IPython at - that point, including defining new magic and aliases, adding new - components, etc. - - You can also optionally define an :func:`unload_ipython_extension(ipython)` - function, which will be called if the user unloads or reloads the extension. - The extension manager will only call :func:`load_ipython_extension` again - if the extension is reloaded. - - You can put your extension modules anywhere you want, as long as - they can be imported by Python's standard import mechanism. However, - to make it easy to write extensions, you can also put your extensions - in ``os.path.join(self.ipython_dir, 'extensions')``. This directory - is added to ``sys.path`` automatically. - """ - - shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True) - - def __init__(self, shell=None, **kwargs): - super(ExtensionManager, self).__init__(shell=shell, **kwargs) - self.shell.observe( - self._on_ipython_dir_changed, names=('ipython_dir',) - ) - self.loaded = set() - - @property - def ipython_extension_dir(self): - return os.path.join(self.shell.ipython_dir, u'extensions') - - def _on_ipython_dir_changed(self, change): - ensure_dir_exists(self.ipython_extension_dir) - - def load_extension(self, module_str: str): - """Load an IPython extension by its module name. - - Returns the string "already loaded" if the extension is already loaded, - "no load function" if the module doesn't have a load_ipython_extension - function, or None if it succeeded. - """ - try: - return self._load_extension(module_str) - except ModuleNotFoundError: - if module_str in BUILTINS_EXTS: - BUILTINS_EXTS[module_str] = True - return self._load_extension("IPython.extensions." + module_str) - raise - - def _load_extension(self, module_str: str): - if module_str in self.loaded: - return "already loaded" - - from IPython.utils.syspathcontext import prepended_to_syspath - - with self.shell.builtin_trap: - if module_str not in sys.modules: - mod = import_module(module_str) - mod = sys.modules[module_str] - if self._call_load_ipython_extension(mod): - self.loaded.add(module_str) - else: - return "no load function" - - def unload_extension(self, module_str: str): - """Unload an IPython extension by its module name. - - This function looks up the extension's name in ``sys.modules`` and - simply calls ``mod.unload_ipython_extension(self)``. - - Returns the string "no unload function" if the extension doesn't define - a function to unload itself, "not loaded" if the extension isn't loaded, - otherwise None. - """ - if BUILTINS_EXTS.get(module_str, False) is True: - module_str = "IPython.extensions." + module_str - if module_str not in self.loaded: - return "not loaded" - - if module_str in sys.modules: - mod = sys.modules[module_str] - if self._call_unload_ipython_extension(mod): - self.loaded.discard(module_str) - else: - return "no unload function" - - def reload_extension(self, module_str: str): - """Reload an IPython extension by calling reload. - - If the module has not been loaded before, - :meth:`InteractiveShell.load_extension` is called. Otherwise - :func:`reload` is called and then the :func:`load_ipython_extension` - function of the module, if it exists is called. - """ - from IPython.utils.syspathcontext import prepended_to_syspath - - if BUILTINS_EXTS.get(module_str, False) is True: - module_str = "IPython.extensions." + module_str - - if (module_str in self.loaded) and (module_str in sys.modules): - self.unload_extension(module_str) - mod = sys.modules[module_str] - with prepended_to_syspath(self.ipython_extension_dir): - reload(mod) - if self._call_load_ipython_extension(mod): - self.loaded.add(module_str) - else: - self.load_extension(module_str) - - def _call_load_ipython_extension(self, mod): - if hasattr(mod, 'load_ipython_extension'): - mod.load_ipython_extension(self.shell) - return True - - def _call_unload_ipython_extension(self, mod): - if hasattr(mod, 'unload_ipython_extension'): - mod.unload_ipython_extension(self.shell) - return True diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/debugger.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/debugger.py deleted file mode 100644 index 7a0623c84794bc218743ff1032b9bafbfaaec765..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/debugger.py +++ /dev/null @@ -1,177 +0,0 @@ -import asyncio -import os -import sys - -from IPython.core.debugger import Pdb -from IPython.core.completer import IPCompleter -from .ptutils import IPythonPTCompleter -from .shortcuts import create_ipython_shortcuts -from . import embed - -from pathlib import Path -from pygments.token import Token -from prompt_toolkit.shortcuts.prompt import PromptSession -from prompt_toolkit.enums import EditingMode -from prompt_toolkit.formatted_text import PygmentsTokens -from prompt_toolkit.history import InMemoryHistory, FileHistory -from concurrent.futures import ThreadPoolExecutor - -from prompt_toolkit import __version__ as ptk_version -PTK3 = ptk_version.startswith('3.') - - -# we want to avoid ptk as much as possible when using subprocesses -# as it uses cursor positioning requests, deletes color .... -_use_simple_prompt = "IPY_TEST_SIMPLE_PROMPT" in os.environ - - -class TerminalPdb(Pdb): - """Standalone IPython debugger.""" - - def __init__(self, *args, pt_session_options=None, **kwargs): - Pdb.__init__(self, *args, **kwargs) - self._ptcomp = None - self.pt_init(pt_session_options) - self.thread_executor = ThreadPoolExecutor(1) - - def pt_init(self, pt_session_options=None): - """Initialize the prompt session and the prompt loop - and store them in self.pt_app and self.pt_loop. - - Additional keyword arguments for the PromptSession class - can be specified in pt_session_options. - """ - if pt_session_options is None: - pt_session_options = {} - - def get_prompt_tokens(): - return [(Token.Prompt, self.prompt)] - - if self._ptcomp is None: - compl = IPCompleter( - shell=self.shell, namespace={}, global_namespace={}, parent=self.shell - ) - # add a completer for all the do_ methods - methods_names = [m[3:] for m in dir(self) if m.startswith("do_")] - - def gen_comp(self, text): - return [m for m in methods_names if m.startswith(text)] - import types - newcomp = types.MethodType(gen_comp, compl) - compl.custom_matchers.insert(0, newcomp) - # end add completer. - - self._ptcomp = IPythonPTCompleter(compl) - - # setup history only when we start pdb - if self.shell.debugger_history is None: - if self.shell.debugger_history_file is not None: - p = Path(self.shell.debugger_history_file).expanduser() - if not p.exists(): - p.touch() - self.debugger_history = FileHistory(os.path.expanduser(str(p))) - else: - self.debugger_history = InMemoryHistory() - else: - self.debugger_history = self.shell.debugger_history - - options = dict( - message=(lambda: PygmentsTokens(get_prompt_tokens())), - editing_mode=getattr(EditingMode, self.shell.editing_mode.upper()), - key_bindings=create_ipython_shortcuts(self.shell), - history=self.debugger_history, - completer=self._ptcomp, - enable_history_search=True, - mouse_support=self.shell.mouse_support, - complete_style=self.shell.pt_complete_style, - style=getattr(self.shell, "style", None), - color_depth=self.shell.color_depth, - ) - - if not PTK3: - options['inputhook'] = self.shell.inputhook - options.update(pt_session_options) - if not _use_simple_prompt: - self.pt_loop = asyncio.new_event_loop() - self.pt_app = PromptSession(**options) - - def cmdloop(self, intro=None): - """Repeatedly issue a prompt, accept input, parse an initial prefix - off the received input, and dispatch to action methods, passing them - the remainder of the line as argument. - - override the same methods from cmd.Cmd to provide prompt toolkit replacement. - """ - if not self.use_rawinput: - raise ValueError('Sorry ipdb does not support use_rawinput=False') - - # In order to make sure that prompt, which uses asyncio doesn't - # interfere with applications in which it's used, we always run the - # prompt itself in a different thread (we can't start an event loop - # within an event loop). This new thread won't have any event loop - # running, and here we run our prompt-loop. - self.preloop() - - try: - if intro is not None: - self.intro = intro - if self.intro: - print(self.intro, file=self.stdout) - stop = None - while not stop: - if self.cmdqueue: - line = self.cmdqueue.pop(0) - else: - self._ptcomp.ipy_completer.namespace = self.curframe_locals - self._ptcomp.ipy_completer.global_namespace = self.curframe.f_globals - - # Run the prompt in a different thread. - if not _use_simple_prompt: - try: - line = self.thread_executor.submit( - self.pt_app.prompt - ).result() - except EOFError: - line = "EOF" - else: - line = input("ipdb> ") - - line = self.precmd(line) - stop = self.onecmd(line) - stop = self.postcmd(stop, line) - self.postloop() - except Exception: - raise - - def do_interact(self, arg): - ipshell = embed.InteractiveShellEmbed( - config=self.shell.config, - banner1="*interactive*", - exit_msg="*exiting interactive console...*", - ) - global_ns = self.curframe.f_globals - ipshell( - module=sys.modules.get(global_ns["__name__"], None), - local_ns=self.curframe_locals, - ) - - -def set_trace(frame=None): - """ - Start debugging from `frame`. - - If frame is not specified, debugging starts from caller's frame. - """ - TerminalPdb().set_trace(frame or sys._getframe().f_back) - - -if __name__ == '__main__': - import pdb - # IPython.core.debugger.Pdb.trace_dispatch shall not catch - # bdb.BdbQuit. When started through __main__ and an exception - # happened after hitting "c", this is needed in order to - # be able to quit the debugging session (see #9950). - old_trace_dispatch = pdb.Pdb.trace_dispatch - pdb.Pdb = TerminalPdb # type: ignore - pdb.Pdb.trace_dispatch = old_trace_dispatch # type: ignore - pdb.main() diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/common/py_custom_pyeval_settrace_common.hpp b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/common/py_custom_pyeval_settrace_common.hpp deleted file mode 100644 index 12f9e2242c1190217c0cf2dfd4e1022d244c5b38..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/common/py_custom_pyeval_settrace_common.hpp +++ /dev/null @@ -1,62 +0,0 @@ -#ifndef _PY_CUSTOM_PYEVAL_SETTRACE_COMMON_HPP_ -#define _PY_CUSTOM_PYEVAL_SETTRACE_COMMON_HPP_ - -#include "python.h" -#include "py_utils.hpp" - -struct InternalInitializeCustomPyEvalSetTrace { - PyObject* pyNone; - PyTuple_New* pyTuple_New; - _PyObject_FastCallDict* pyObject_FastCallDict; - PyEval_CallObjectWithKeywords* pyEval_CallObjectWithKeywords; - PyUnicode_InternFromString* pyUnicode_InternFromString; // Note: in Py2 will be PyString_InternFromString. - PyTraceBack_Here* pyTraceBack_Here; - PyEval_SetTrace* pyEval_SetTrace; - bool isDebug; - PyUnicode_AsUTF8* pyUnicode_AsUTF8; - PyObject_Repr* pyObject_Repr; -}; - -/** - * Helper information to access CPython internals. - */ -static InternalInitializeCustomPyEvalSetTrace *internalInitializeCustomPyEvalSetTrace = NULL; - -/* - * Cached interned string objects used for calling the profile and - * trace functions. Initialized by InternalTraceInit(). - */ -static PyObject *InternalWhatstrings_37[8] = {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}; - - -static int -InternalIsTraceInitialized() -{ - return internalInitializeCustomPyEvalSetTrace != NULL; -} - - - -static int -InternalTraceInit(InternalInitializeCustomPyEvalSetTrace *p_internalInitializeSettrace_37) -{ - internalInitializeCustomPyEvalSetTrace = p_internalInitializeSettrace_37; - static const char * const whatnames[8] = { - "call", "exception", "line", "return", - "c_call", "c_exception", "c_return", - "opcode" - }; - PyObject *name; - int i; - for (i = 0; i < 8; ++i) { - if (InternalWhatstrings_37[i] == NULL) { - name = internalInitializeCustomPyEvalSetTrace->pyUnicode_InternFromString(whatnames[i]); - if (name == NULL) - return -1; - InternalWhatstrings_37[i] = name; - } - } - return 0; -} - -#endif //_PY_CUSTOM_PYEVAL_SETTRACE_COMMON_HPP_ \ No newline at end of file diff --git a/spaces/Suniilkumaar/SwapMukham/upscaler/RealESRGAN/__init__.py b/spaces/Suniilkumaar/SwapMukham/upscaler/RealESRGAN/__init__.py deleted file mode 100644 index 98952fda6e38f8a00b1a6054f9814c0dcf0a4c14..0000000000000000000000000000000000000000 --- a/spaces/Suniilkumaar/SwapMukham/upscaler/RealESRGAN/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .model import RealESRGAN \ No newline at end of file diff --git a/spaces/TEnngal/bingo/src/components/header.tsx b/spaces/TEnngal/bingo/src/components/header.tsx deleted file mode 100644 index dc298b722154d1ac6d7a7e148204605562d6cc58..0000000000000000000000000000000000000000 --- a/spaces/TEnngal/bingo/src/components/header.tsx +++ /dev/null @@ -1,12 +0,0 @@ -import * as React from 'react' -import { UserMenu } from './user-menu' - -export async function Header() { - return ( -
-
- -
-
- ) -} diff --git a/spaces/TRI-ML/risk_biased_prediction/risk_biased/scene_dataset/loaders.py b/spaces/TRI-ML/risk_biased_prediction/risk_biased/scene_dataset/loaders.py deleted file mode 100644 index cb51fe6fb9b1a24d230cdbdd7e0c02ebd2feb106..0000000000000000000000000000000000000000 --- a/spaces/TRI-ML/risk_biased_prediction/risk_biased/scene_dataset/loaders.py +++ /dev/null @@ -1,252 +0,0 @@ -from typing import Tuple - -import numpy as np -import torch -from torch.utils.data import DataLoader, TensorDataset - - -class SceneDataLoaders: - """ - This class loads a scene dataset and pre-process it (normalization, unnormalization) - - Args: - state_dim : dimension of the observed state (2 for x,y position observation) - num_steps : number of observed steps - num_steps_future : number of steps in the future - batch_size: set data loader with this batch size - data_train: training dataset - data_val: validation dataset - data_test: test dataset - num_workers: number of workers to use for data loading - """ - - def __init__( - self, - state_dim: int, - num_steps: int, - num_steps_future: int, - batch_size: int, - data_train: torch.Tensor, - data_val: torch.Tensor, - data_test: torch.Tensor, - num_workers: int = 0, - ): - self._batch_size = batch_size - self._num_workers = num_workers - self._state_dim = state_dim - self._num_steps = num_steps - self._num_steps_future = num_steps_future - - self._setup_datasets(data_train, data_val, data_test) - - def train_dataloader(self, shuffle=True, drop_last=True) -> DataLoader: - """Setup and return training DataLoader - - Returns: - DataLoader: training DataLoader - """ - data_size = self._data_train_past.shape[0] - # This is a didactic data loader that only defines minimalistic inputs. - # This dataloader adds some empty tensors and ones to match the expected format with masks and map information. - train_loader = DataLoader( - dataset=TensorDataset( - self._data_train_past, - torch.ones_like(self._data_train_past[..., 0]), # Mask past - self._data_train_fut, - torch.ones_like(self._data_train_fut[..., 0]), # Mask fut - torch.ones_like(self._data_train_fut[..., 0]), # Mask loss - torch.empty( - data_size, 1, 0, 0, device=self._data_train_past.device - ), # Map - torch.empty( - data_size, 1, 0, device=self._data_train_past.device - ), # Mask map - self._offset_train, - self._data_train_ego_past, - self._data_train_ego_fut, - ), - batch_size=self._batch_size, - shuffle=shuffle, - drop_last=drop_last, - num_workers=self._num_workers, - ) - return train_loader - - def val_dataloader(self, shuffle=False, drop_last=False) -> DataLoader: - """Setup and return validation DataLoader - - Returns: - DataLoader: validation DataLoader - """ - data_size = self._data_val_past.shape[0] - # This is a didactic data loader that only defines minimalistic inputs. - # This dataloader adds some empty tensors and ones to match the expected format with masks and map information. - val_loader = DataLoader( - dataset=TensorDataset( - self._data_val_past, - torch.ones_like(self._data_val_past[..., 0]), # Mask past - self._data_val_fut, - torch.ones_like(self._data_val_fut[..., 0]), # Mask fut - torch.ones_like(self._data_val_fut[..., 0]), # Mask loss - torch.zeros( - data_size, 1, 0, 0, device=self._data_val_past.device - ), # Map - torch.ones( - data_size, 1, 0, device=self._data_val_past.device - ), # Mask map - self._offset_val, - self._data_val_ego_past, - self._data_val_ego_fut, - ), - batch_size=self._batch_size, - shuffle=shuffle, - drop_last=drop_last, - num_workers=self._num_workers, - ) - return val_loader - - def test_dataloader(self) -> DataLoader: - """Setup and return test DataLoader - - Returns: - DataLoader: test DataLoader - """ - data_size = self._data_test_past.shape[0] - # This is a didactic data loader that only defines minimalistic inputs. - # This dataloader adds some empty tensors and ones to match the expected format with masks and map information. - test_loader = DataLoader( - dataset=TensorDataset( - self._data_test_past, - torch.ones_like(self._data_test_past[..., 0]), # Mask - torch.zeros( - data_size, 0, 1, 0, device=self._data_test_past.device - ), # Map - torch.ones( - data_size, 0, 1, device=self._data_test_past.device - ), # Mask map - self._offset_test, - self._data_test_ego_past, - self._data_test_ego_fut, - ), - batch_size=self._batch_size, - shuffle=False, - num_workers=self._num_workers, - ) - return test_loader - - def _setup_datasets( - self, data_train: torch.Tensor, data_val: torch.Tensor, data_test: torch.Tensor - ): - """Setup datasets: normalize and split into past future - Args: - data_train: training dataset - data_val: validation dataset - data_test: test dataset - """ - data_train, data_train_ego = data_train[0], data_train[1] - data_val, data_val_ego = data_val[0], data_val[1] - data_test, data_test_ego = data_test[0], data_test[1] - - data_train, self._offset_train = self.normalize_trajectory(data_train) - data_val, self._offset_val = self.normalize_trajectory(data_val) - data_test, self._offset_test = self.normalize_trajectory(data_test) - # This is a didactic data loader that only defines minimalistic inputs. - # An extra dimension is added to account for the number of agents in the scene. - # In this minimal input there is only one but the model using the data expects any number of agents. - self._data_train_past, self._data_train_fut = self.split_trajectory(data_train) - self._data_val_past, self._data_val_fut = self.split_trajectory(data_val) - self._data_test_past, self._data_test_fut = self.split_trajectory(data_test) - - self._data_train_ego_past, self._data_train_ego_fut = self.split_trajectory( - data_train_ego - ) - self._data_val_ego_past, self._data_val_ego_fut = self.split_trajectory( - data_val_ego - ) - self._data_test_ego_past, self._data_test_ego_fut = self.split_trajectory( - data_test_ego - ) - - def split_trajectory( - self, input: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor]: - """Split input trajectory into history and future - - Args: - input : (batch_size, (n_agents), num_steps + num_steps_future, state_dim) tensor of - entire trajectory [x, y] - - Returns: - Tuple of history and future trajectories - """ - assert ( - input.shape[-2] == self._num_steps + self._num_steps_future - ), "trajectory length ({}) does not match the expected length".format( - input.shape[-2] - ) - assert ( - input.shape[-1] == self._state_dim - ), "state dimension ({}) does no match the expected dimension".format( - input.shape[-1] - ) - - input_history, input_future = torch.split( - input, [self._num_steps, self._num_steps_future], dim=-2 - ) - return input_history, input_future - - @staticmethod - def normalize_trajectory(input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - """Normalize input trajectory by subtracting initial state - - Args: - input : (some_shape, n_agents, num_steps + num_steps_future, state_dim) tensor of - entire trajectory [x, y], or (some_shape, num_steps, state_dim) tensor of history x - - Returns: - Tuple of (normalized_trajectory, offset), where - normalized_trajectory has the same dimension as the input and offset is a - (some_shape, state_dim) tensor corresponding to the initial state - """ - offset = input[..., 0, :].clone() - - return input - offset.unsqueeze(-2), offset - - @staticmethod - def unnormalize_trajectory( - input: torch.Tensor, offset: torch.Tensor - ) -> torch.Tensor: - """Unnormalize trajectory by adding offset to input - - Args: - input : (some_shape, (n_sample), num_steps_future, state_dim) tensor of future - trajectory y - offset : (some_shape, 2 or 4 or 5) tensor of offset to add to y - - Returns: - Unnormalized trajectory that has the same size as input - """ - offset_dim = offset.shape[-1] - assert input.shape[-1] >= offset_dim - input_clone = input.clone() - if offset.ndim == 2: - batch_size, _ = offset.shape - assert input_clone.shape[0] == batch_size - - input_clone[..., :offset_dim] = input_clone[ - ..., :offset_dim - ] + offset.reshape( - [batch_size, *([1] * (input_clone.ndim - 2)), offset_dim] - ) - elif offset.ndim == 3: - batch_size, num_agents, _ = offset.shape - assert input_clone.shape[0] == batch_size - assert input_clone.shape[1] == num_agents - - input_clone[..., :offset_dim] = input_clone[ - ..., :offset_dim - ] + offset.reshape( - [batch_size, num_agents, *([1] * (input_clone.ndim - 3)), offset_dim] - ) - - return input_clone diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/langthaimodel.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/langthaimodel.py deleted file mode 100644 index 489cad930e0029fc2f8e5111df1bad38151a07a9..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/langthaimodel.py +++ /dev/null @@ -1,4380 +0,0 @@ -from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel - -# 3: Positive -# 2: Likely -# 1: Unlikely -# 0: Negative - -THAI_LANG_MODEL = { - 5: { # 'ก' - 5: 2, # 'ก' - 30: 2, # 'ข' - 24: 2, # 'ค' - 8: 2, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 3, # 'ฎ' - 57: 2, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 2, # 'ณ' - 20: 2, # 'ด' - 19: 3, # 'ต' - 44: 0, # 'ถ' - 14: 2, # 'ท' - 48: 0, # 'ธ' - 3: 2, # 'น' - 17: 1, # 'บ' - 25: 2, # 'ป' - 39: 1, # 'ผ' - 62: 1, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 2, # 'ม' - 16: 1, # 'ย' - 2: 3, # 'ร' - 61: 2, # 'ฤ' - 15: 3, # 'ล' - 12: 3, # 'ว' - 42: 2, # 'ศ' - 46: 3, # 'ษ' - 18: 2, # 'ส' - 21: 2, # 'ห' - 4: 3, # 'อ' - 63: 1, # 'ฯ' - 22: 2, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 3, # 'ำ' - 23: 3, # 'ิ' - 13: 3, # 'ี' - 40: 0, # 'ึ' - 27: 2, # 'ื' - 32: 2, # 'ุ' - 35: 1, # 'ู' - 11: 2, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 1, # 'ใ' - 33: 2, # 'ไ' - 50: 1, # 'ๆ' - 37: 3, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 2, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 30: { # 'ข' - 5: 1, # 'ก' - 30: 0, # 'ข' - 24: 1, # 'ค' - 8: 1, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 2, # 'ณ' - 20: 0, # 'ด' - 19: 2, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 2, # 'น' - 17: 1, # 'บ' - 25: 1, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 2, # 'ย' - 2: 1, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 2, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 1, # 'ห' - 4: 3, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 2, # 'ี' - 40: 3, # 'ึ' - 27: 1, # 'ื' - 32: 1, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 1, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 1, # '็' - 6: 2, # '่' - 7: 3, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 24: { # 'ค' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 2, # 'ค' - 8: 2, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 2, # 'ณ' - 20: 2, # 'ด' - 19: 2, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 0, # 'บ' - 25: 1, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 2, # 'ม' - 16: 2, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 3, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 0, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 2, # 'ะ' - 10: 3, # 'ั' - 1: 2, # 'า' - 36: 3, # 'ำ' - 23: 3, # 'ิ' - 13: 2, # 'ี' - 40: 0, # 'ึ' - 27: 3, # 'ื' - 32: 3, # 'ุ' - 35: 2, # 'ู' - 11: 1, # 'เ' - 28: 0, # 'แ' - 41: 3, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 1, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 3, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 8: { # 'ง' - 5: 3, # 'ก' - 30: 2, # 'ข' - 24: 3, # 'ค' - 8: 2, # 'ง' - 26: 2, # 'จ' - 52: 1, # 'ฉ' - 34: 2, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 2, # 'ด' - 19: 2, # 'ต' - 44: 1, # 'ถ' - 14: 3, # 'ท' - 48: 1, # 'ธ' - 3: 3, # 'น' - 17: 2, # 'บ' - 25: 2, # 'ป' - 39: 2, # 'ผ' - 62: 1, # 'ฝ' - 31: 2, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 2, # 'ม' - 16: 1, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 2, # 'ว' - 42: 2, # 'ศ' - 46: 1, # 'ษ' - 18: 3, # 'ส' - 21: 3, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 1, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 2, # 'ิ' - 13: 1, # 'ี' - 40: 0, # 'ึ' - 27: 1, # 'ื' - 32: 1, # 'ุ' - 35: 0, # 'ู' - 11: 3, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 3, # 'ๆ' - 37: 0, # '็' - 6: 2, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 26: { # 'จ' - 5: 2, # 'ก' - 30: 1, # 'ข' - 24: 0, # 'ค' - 8: 2, # 'ง' - 26: 3, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 2, # 'ด' - 19: 1, # 'ต' - 44: 1, # 'ถ' - 14: 2, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 1, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 1, # 'ม' - 16: 1, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 1, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 3, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 3, # 'ำ' - 23: 2, # 'ิ' - 13: 1, # 'ี' - 40: 3, # 'ึ' - 27: 1, # 'ื' - 32: 3, # 'ุ' - 35: 2, # 'ู' - 11: 1, # 'เ' - 28: 1, # 'แ' - 41: 0, # 'โ' - 29: 1, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 2, # '่' - 7: 2, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 52: { # 'ฉ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 3, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 3, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 1, # 'ม' - 16: 1, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 1, # 'ะ' - 10: 1, # 'ั' - 1: 1, # 'า' - 36: 0, # 'ำ' - 23: 1, # 'ิ' - 13: 1, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 1, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 34: { # 'ช' - 5: 1, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 1, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 1, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 2, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 2, # 'ม' - 16: 1, # 'ย' - 2: 1, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 2, # 'ั' - 1: 3, # 'า' - 36: 1, # 'ำ' - 23: 3, # 'ิ' - 13: 2, # 'ี' - 40: 0, # 'ึ' - 27: 3, # 'ื' - 32: 3, # 'ุ' - 35: 1, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 1, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 51: { # 'ซ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 1, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 1, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 0, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 1, # 'ั' - 1: 1, # 'า' - 36: 0, # 'ำ' - 23: 1, # 'ิ' - 13: 2, # 'ี' - 40: 3, # 'ึ' - 27: 2, # 'ื' - 32: 1, # 'ุ' - 35: 1, # 'ู' - 11: 1, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 1, # '็' - 6: 1, # '่' - 7: 2, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 47: { # 'ญ' - 5: 1, # 'ก' - 30: 1, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 3, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 1, # 'บ' - 25: 1, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 1, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 1, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 2, # 'ห' - 4: 1, # 'อ' - 63: 0, # 'ฯ' - 22: 1, # 'ะ' - 10: 2, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 1, # 'ิ' - 13: 1, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 1, # 'เ' - 28: 1, # 'แ' - 41: 0, # 'โ' - 29: 1, # 'ใ' - 33: 0, # 'ไ' - 50: 1, # 'ๆ' - 37: 0, # '็' - 6: 2, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 58: { # 'ฎ' - 5: 2, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 1, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 1, # 'ิ' - 13: 2, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 57: { # 'ฏ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 3, # 'ิ' - 13: 1, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 49: { # 'ฐ' - 5: 1, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 2, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 2, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 1, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 1, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 53: { # 'ฑ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 2, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 3, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 55: { # 'ฒ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 1, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 43: { # 'ณ' - 5: 1, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 3, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 3, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 1, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 1, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 3, # 'ะ' - 10: 0, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 1, # 'ิ' - 13: 2, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 1, # 'เ' - 28: 1, # 'แ' - 41: 0, # 'โ' - 29: 1, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 3, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 20: { # 'ด' - 5: 2, # 'ก' - 30: 2, # 'ข' - 24: 2, # 'ค' - 8: 3, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 2, # 'ต' - 44: 1, # 'ถ' - 14: 2, # 'ท' - 48: 0, # 'ธ' - 3: 1, # 'น' - 17: 1, # 'บ' - 25: 1, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 2, # 'ม' - 16: 3, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 2, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 2, # 'ห' - 4: 1, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 3, # 'ั' - 1: 2, # 'า' - 36: 2, # 'ำ' - 23: 3, # 'ิ' - 13: 3, # 'ี' - 40: 1, # 'ึ' - 27: 2, # 'ื' - 32: 3, # 'ุ' - 35: 2, # 'ู' - 11: 2, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 2, # 'ๆ' - 37: 2, # '็' - 6: 1, # '่' - 7: 3, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 19: { # 'ต' - 5: 2, # 'ก' - 30: 1, # 'ข' - 24: 1, # 'ค' - 8: 0, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 1, # 'ต' - 44: 2, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 2, # 'น' - 17: 1, # 'บ' - 25: 1, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 2, # 'ภ' - 9: 1, # 'ม' - 16: 1, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 3, # 'ส' - 21: 0, # 'ห' - 4: 3, # 'อ' - 63: 1, # 'ฯ' - 22: 2, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 2, # 'ำ' - 23: 3, # 'ิ' - 13: 2, # 'ี' - 40: 1, # 'ึ' - 27: 1, # 'ื' - 32: 3, # 'ุ' - 35: 2, # 'ู' - 11: 1, # 'เ' - 28: 1, # 'แ' - 41: 1, # 'โ' - 29: 1, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 2, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 2, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 44: { # 'ถ' - 5: 1, # 'ก' - 30: 0, # 'ข' - 24: 1, # 'ค' - 8: 0, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 1, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 1, # 'น' - 17: 2, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 1, # 'ร' - 61: 0, # 'ฤ' - 15: 1, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 0, # 'ห' - 4: 1, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 2, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 2, # 'ิ' - 13: 1, # 'ี' - 40: 3, # 'ึ' - 27: 2, # 'ื' - 32: 2, # 'ุ' - 35: 3, # 'ู' - 11: 1, # 'เ' - 28: 1, # 'แ' - 41: 0, # 'โ' - 29: 1, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 2, # '่' - 7: 3, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 14: { # 'ท' - 5: 1, # 'ก' - 30: 1, # 'ข' - 24: 3, # 'ค' - 8: 1, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 2, # 'ด' - 19: 1, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 3, # 'ธ' - 3: 3, # 'น' - 17: 2, # 'บ' - 25: 2, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 2, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 1, # 'ม' - 16: 3, # 'ย' - 2: 3, # 'ร' - 61: 1, # 'ฤ' - 15: 1, # 'ล' - 12: 2, # 'ว' - 42: 3, # 'ศ' - 46: 1, # 'ษ' - 18: 1, # 'ส' - 21: 0, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 2, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 3, # 'ำ' - 23: 2, # 'ิ' - 13: 3, # 'ี' - 40: 2, # 'ึ' - 27: 1, # 'ื' - 32: 3, # 'ุ' - 35: 1, # 'ู' - 11: 0, # 'เ' - 28: 1, # 'แ' - 41: 0, # 'โ' - 29: 1, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 1, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 2, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 48: { # 'ธ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 1, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 1, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 2, # 'า' - 36: 0, # 'ำ' - 23: 3, # 'ิ' - 13: 3, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 2, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 3, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 3: { # 'น' - 5: 3, # 'ก' - 30: 2, # 'ข' - 24: 3, # 'ค' - 8: 1, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 1, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 3, # 'ด' - 19: 3, # 'ต' - 44: 2, # 'ถ' - 14: 3, # 'ท' - 48: 3, # 'ธ' - 3: 2, # 'น' - 17: 2, # 'บ' - 25: 2, # 'ป' - 39: 2, # 'ผ' - 62: 0, # 'ฝ' - 31: 2, # 'พ' - 54: 1, # 'ฟ' - 45: 1, # 'ภ' - 9: 2, # 'ม' - 16: 2, # 'ย' - 2: 2, # 'ร' - 61: 1, # 'ฤ' - 15: 2, # 'ล' - 12: 3, # 'ว' - 42: 1, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 2, # 'ห' - 4: 3, # 'อ' - 63: 1, # 'ฯ' - 22: 2, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 3, # 'ำ' - 23: 3, # 'ิ' - 13: 3, # 'ี' - 40: 3, # 'ึ' - 27: 3, # 'ื' - 32: 3, # 'ุ' - 35: 2, # 'ู' - 11: 3, # 'เ' - 28: 2, # 'แ' - 41: 3, # 'โ' - 29: 3, # 'ใ' - 33: 3, # 'ไ' - 50: 2, # 'ๆ' - 37: 1, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 2, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 17: { # 'บ' - 5: 3, # 'ก' - 30: 2, # 'ข' - 24: 2, # 'ค' - 8: 1, # 'ง' - 26: 1, # 'จ' - 52: 1, # 'ฉ' - 34: 1, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 2, # 'ต' - 44: 1, # 'ถ' - 14: 3, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 3, # 'บ' - 25: 2, # 'ป' - 39: 2, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 1, # 'ฟ' - 45: 1, # 'ภ' - 9: 1, # 'ม' - 16: 0, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 3, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 2, # 'ห' - 4: 2, # 'อ' - 63: 1, # 'ฯ' - 22: 0, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 2, # 'ำ' - 23: 2, # 'ิ' - 13: 2, # 'ี' - 40: 0, # 'ึ' - 27: 2, # 'ื' - 32: 3, # 'ุ' - 35: 2, # 'ู' - 11: 2, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 0, # 'ๆ' - 37: 1, # '็' - 6: 2, # '่' - 7: 2, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 25: { # 'ป' - 5: 2, # 'ก' - 30: 0, # 'ข' - 24: 1, # 'ค' - 8: 0, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 1, # 'ฎ' - 57: 3, # 'ฏ' - 49: 1, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 1, # 'ต' - 44: 1, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 2, # 'น' - 17: 0, # 'บ' - 25: 1, # 'ป' - 39: 1, # 'ผ' - 62: 1, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 1, # 'ม' - 16: 0, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 1, # 'ษ' - 18: 2, # 'ส' - 21: 1, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 1, # 'ะ' - 10: 3, # 'ั' - 1: 1, # 'า' - 36: 0, # 'ำ' - 23: 2, # 'ิ' - 13: 3, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 1, # 'ุ' - 35: 0, # 'ู' - 11: 1, # 'เ' - 28: 2, # 'แ' - 41: 0, # 'โ' - 29: 1, # 'ใ' - 33: 2, # 'ไ' - 50: 0, # 'ๆ' - 37: 3, # '็' - 6: 1, # '่' - 7: 2, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 39: { # 'ผ' - 5: 1, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 1, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 2, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 1, # 'ม' - 16: 2, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 1, # 'ะ' - 10: 1, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 2, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 1, # 'ื' - 32: 0, # 'ุ' - 35: 3, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 1, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 62: { # 'ฝ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 1, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 1, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 1, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 1, # 'ี' - 40: 2, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 2, # '่' - 7: 1, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 31: { # 'พ' - 5: 1, # 'ก' - 30: 1, # 'ข' - 24: 1, # 'ค' - 8: 1, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 1, # 'ณ' - 20: 1, # 'ด' - 19: 1, # 'ต' - 44: 0, # 'ถ' - 14: 2, # 'ท' - 48: 1, # 'ธ' - 3: 3, # 'น' - 17: 2, # 'บ' - 25: 0, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 1, # 'ม' - 16: 2, # 'ย' - 2: 3, # 'ร' - 61: 2, # 'ฤ' - 15: 2, # 'ล' - 12: 2, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 1, # 'ห' - 4: 2, # 'อ' - 63: 1, # 'ฯ' - 22: 0, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 3, # 'ิ' - 13: 2, # 'ี' - 40: 1, # 'ึ' - 27: 3, # 'ื' - 32: 1, # 'ุ' - 35: 2, # 'ู' - 11: 1, # 'เ' - 28: 1, # 'แ' - 41: 0, # 'โ' - 29: 1, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 1, # '็' - 6: 0, # '่' - 7: 1, # '้' - 38: 3, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 54: { # 'ฟ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 1, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 2, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 1, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 0, # 'ห' - 4: 1, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 2, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 1, # 'ิ' - 13: 1, # 'ี' - 40: 0, # 'ึ' - 27: 1, # 'ื' - 32: 1, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 1, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 2, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 45: { # 'ภ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 1, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 3, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 1, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 1, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 2, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 9: { # 'ม' - 5: 2, # 'ก' - 30: 2, # 'ข' - 24: 2, # 'ค' - 8: 2, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 1, # 'ณ' - 20: 2, # 'ด' - 19: 2, # 'ต' - 44: 1, # 'ถ' - 14: 2, # 'ท' - 48: 1, # 'ธ' - 3: 3, # 'น' - 17: 2, # 'บ' - 25: 2, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 3, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 2, # 'ม' - 16: 1, # 'ย' - 2: 2, # 'ร' - 61: 2, # 'ฤ' - 15: 2, # 'ล' - 12: 2, # 'ว' - 42: 1, # 'ศ' - 46: 1, # 'ษ' - 18: 3, # 'ส' - 21: 3, # 'ห' - 4: 3, # 'อ' - 63: 0, # 'ฯ' - 22: 1, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 3, # 'ิ' - 13: 3, # 'ี' - 40: 0, # 'ึ' - 27: 3, # 'ื' - 32: 3, # 'ุ' - 35: 3, # 'ู' - 11: 2, # 'เ' - 28: 2, # 'แ' - 41: 2, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 1, # 'ๆ' - 37: 1, # '็' - 6: 3, # '่' - 7: 2, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 16: { # 'ย' - 5: 3, # 'ก' - 30: 1, # 'ข' - 24: 2, # 'ค' - 8: 3, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 2, # 'ช' - 51: 0, # 'ซ' - 47: 2, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 2, # 'ด' - 19: 2, # 'ต' - 44: 1, # 'ถ' - 14: 2, # 'ท' - 48: 1, # 'ธ' - 3: 3, # 'น' - 17: 3, # 'บ' - 25: 1, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 2, # 'ม' - 16: 0, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 1, # 'ล' - 12: 3, # 'ว' - 42: 1, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 1, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 2, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 2, # 'ิ' - 13: 3, # 'ี' - 40: 1, # 'ึ' - 27: 2, # 'ื' - 32: 2, # 'ุ' - 35: 3, # 'ู' - 11: 2, # 'เ' - 28: 1, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 2, # 'ๆ' - 37: 1, # '็' - 6: 3, # '่' - 7: 2, # '้' - 38: 3, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 2: { # 'ร' - 5: 3, # 'ก' - 30: 2, # 'ข' - 24: 2, # 'ค' - 8: 3, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 2, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 3, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 3, # 'ณ' - 20: 2, # 'ด' - 19: 2, # 'ต' - 44: 3, # 'ถ' - 14: 3, # 'ท' - 48: 1, # 'ธ' - 3: 2, # 'น' - 17: 2, # 'บ' - 25: 3, # 'ป' - 39: 2, # 'ผ' - 62: 1, # 'ฝ' - 31: 2, # 'พ' - 54: 1, # 'ฟ' - 45: 1, # 'ภ' - 9: 3, # 'ม' - 16: 2, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 3, # 'ว' - 42: 2, # 'ศ' - 46: 2, # 'ษ' - 18: 2, # 'ส' - 21: 2, # 'ห' - 4: 3, # 'อ' - 63: 1, # 'ฯ' - 22: 3, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 3, # 'ิ' - 13: 3, # 'ี' - 40: 2, # 'ึ' - 27: 3, # 'ื' - 32: 3, # 'ุ' - 35: 3, # 'ู' - 11: 3, # 'เ' - 28: 3, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 3, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 3, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 61: { # 'ฤ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 2, # 'ต' - 44: 0, # 'ถ' - 14: 2, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 1, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 2, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 15: { # 'ล' - 5: 2, # 'ก' - 30: 3, # 'ข' - 24: 1, # 'ค' - 8: 3, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 2, # 'ด' - 19: 2, # 'ต' - 44: 1, # 'ถ' - 14: 2, # 'ท' - 48: 0, # 'ธ' - 3: 1, # 'น' - 17: 2, # 'บ' - 25: 2, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 1, # 'ม' - 16: 3, # 'ย' - 2: 1, # 'ร' - 61: 0, # 'ฤ' - 15: 1, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 1, # 'ห' - 4: 3, # 'อ' - 63: 2, # 'ฯ' - 22: 3, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 2, # 'ำ' - 23: 3, # 'ิ' - 13: 3, # 'ี' - 40: 2, # 'ึ' - 27: 3, # 'ื' - 32: 2, # 'ุ' - 35: 3, # 'ู' - 11: 2, # 'เ' - 28: 1, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 2, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 2, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 12: { # 'ว' - 5: 3, # 'ก' - 30: 2, # 'ข' - 24: 1, # 'ค' - 8: 3, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 1, # 'ณ' - 20: 2, # 'ด' - 19: 1, # 'ต' - 44: 1, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 2, # 'บ' - 25: 1, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 1, # 'ฟ' - 45: 0, # 'ภ' - 9: 3, # 'ม' - 16: 3, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 2, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 2, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 3, # 'ิ' - 13: 2, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 2, # 'ุ' - 35: 0, # 'ู' - 11: 3, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 1, # 'ใ' - 33: 2, # 'ไ' - 50: 1, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 42: { # 'ศ' - 5: 1, # 'ก' - 30: 0, # 'ข' - 24: 1, # 'ค' - 8: 0, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 1, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 1, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 2, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 2, # 'ว' - 42: 1, # 'ศ' - 46: 2, # 'ษ' - 18: 1, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 2, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 2, # 'ิ' - 13: 0, # 'ี' - 40: 3, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 2, # 'ู' - 11: 0, # 'เ' - 28: 1, # 'แ' - 41: 0, # 'โ' - 29: 1, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 46: { # 'ษ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 2, # 'ฎ' - 57: 1, # 'ฏ' - 49: 2, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 3, # 'ณ' - 20: 0, # 'ด' - 19: 1, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 1, # 'ม' - 16: 2, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 1, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 2, # 'ะ' - 10: 2, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 1, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 1, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 2, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 18: { # 'ส' - 5: 2, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 2, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 3, # 'ด' - 19: 3, # 'ต' - 44: 3, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 2, # 'บ' - 25: 1, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 2, # 'ภ' - 9: 3, # 'ม' - 16: 1, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 1, # 'ล' - 12: 2, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 2, # 'ห' - 4: 3, # 'อ' - 63: 0, # 'ฯ' - 22: 2, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 3, # 'ำ' - 23: 3, # 'ิ' - 13: 3, # 'ี' - 40: 2, # 'ึ' - 27: 3, # 'ื' - 32: 3, # 'ุ' - 35: 3, # 'ู' - 11: 2, # 'เ' - 28: 0, # 'แ' - 41: 1, # 'โ' - 29: 0, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 1, # '้' - 38: 2, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 21: { # 'ห' - 5: 3, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 1, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 2, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 3, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 0, # 'บ' - 25: 1, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 3, # 'ม' - 16: 2, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 2, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 3, # 'อ' - 63: 0, # 'ฯ' - 22: 1, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 1, # 'ิ' - 13: 1, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 1, # 'ุ' - 35: 1, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 3, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 2, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 4: { # 'อ' - 5: 3, # 'ก' - 30: 1, # 'ข' - 24: 2, # 'ค' - 8: 3, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 3, # 'ด' - 19: 2, # 'ต' - 44: 1, # 'ถ' - 14: 2, # 'ท' - 48: 1, # 'ธ' - 3: 3, # 'น' - 17: 3, # 'บ' - 25: 1, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 1, # 'ฟ' - 45: 1, # 'ภ' - 9: 3, # 'ม' - 16: 3, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 2, # 'ว' - 42: 1, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 2, # 'ห' - 4: 3, # 'อ' - 63: 0, # 'ฯ' - 22: 2, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 2, # 'ำ' - 23: 2, # 'ิ' - 13: 3, # 'ี' - 40: 0, # 'ึ' - 27: 3, # 'ื' - 32: 3, # 'ุ' - 35: 0, # 'ู' - 11: 3, # 'เ' - 28: 1, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 1, # 'ๆ' - 37: 1, # '็' - 6: 2, # '่' - 7: 2, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 63: { # 'ฯ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 22: { # 'ะ' - 5: 3, # 'ก' - 30: 1, # 'ข' - 24: 2, # 'ค' - 8: 1, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 3, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 3, # 'ด' - 19: 3, # 'ต' - 44: 1, # 'ถ' - 14: 3, # 'ท' - 48: 1, # 'ธ' - 3: 2, # 'น' - 17: 3, # 'บ' - 25: 2, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 2, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 3, # 'ม' - 16: 2, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 2, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 3, # 'ส' - 21: 3, # 'ห' - 4: 2, # 'อ' - 63: 1, # 'ฯ' - 22: 1, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 3, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 10: { # 'ั' - 5: 3, # 'ก' - 30: 0, # 'ข' - 24: 1, # 'ค' - 8: 3, # 'ง' - 26: 3, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 3, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 2, # 'ฐ' - 53: 0, # 'ฑ' - 55: 3, # 'ฒ' - 43: 3, # 'ณ' - 20: 3, # 'ด' - 19: 3, # 'ต' - 44: 0, # 'ถ' - 14: 2, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 3, # 'บ' - 25: 1, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 2, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 3, # 'ม' - 16: 3, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 3, # 'ว' - 42: 2, # 'ศ' - 46: 0, # 'ษ' - 18: 3, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 1: { # 'า' - 5: 3, # 'ก' - 30: 2, # 'ข' - 24: 3, # 'ค' - 8: 3, # 'ง' - 26: 3, # 'จ' - 52: 0, # 'ฉ' - 34: 3, # 'ช' - 51: 1, # 'ซ' - 47: 2, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 3, # 'ณ' - 20: 3, # 'ด' - 19: 3, # 'ต' - 44: 1, # 'ถ' - 14: 3, # 'ท' - 48: 2, # 'ธ' - 3: 3, # 'น' - 17: 3, # 'บ' - 25: 2, # 'ป' - 39: 1, # 'ผ' - 62: 1, # 'ฝ' - 31: 3, # 'พ' - 54: 1, # 'ฟ' - 45: 1, # 'ภ' - 9: 3, # 'ม' - 16: 3, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 3, # 'ว' - 42: 2, # 'ศ' - 46: 3, # 'ษ' - 18: 3, # 'ส' - 21: 3, # 'ห' - 4: 2, # 'อ' - 63: 1, # 'ฯ' - 22: 3, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 3, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 1, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 36: { # 'ำ' - 5: 2, # 'ก' - 30: 1, # 'ข' - 24: 3, # 'ค' - 8: 2, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 1, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 1, # 'ต' - 44: 1, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 1, # 'บ' - 25: 1, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 1, # 'ม' - 16: 0, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 3, # 'ห' - 4: 1, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 3, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 23: { # 'ิ' - 5: 3, # 'ก' - 30: 1, # 'ข' - 24: 2, # 'ค' - 8: 3, # 'ง' - 26: 3, # 'จ' - 52: 0, # 'ฉ' - 34: 3, # 'ช' - 51: 0, # 'ซ' - 47: 2, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 3, # 'ด' - 19: 3, # 'ต' - 44: 1, # 'ถ' - 14: 3, # 'ท' - 48: 3, # 'ธ' - 3: 3, # 'น' - 17: 3, # 'บ' - 25: 2, # 'ป' - 39: 2, # 'ผ' - 62: 0, # 'ฝ' - 31: 3, # 'พ' - 54: 1, # 'ฟ' - 45: 2, # 'ภ' - 9: 3, # 'ม' - 16: 2, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 3, # 'ว' - 42: 3, # 'ศ' - 46: 2, # 'ษ' - 18: 2, # 'ส' - 21: 3, # 'ห' - 4: 1, # 'อ' - 63: 1, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 3, # 'เ' - 28: 1, # 'แ' - 41: 1, # 'โ' - 29: 1, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 2, # '้' - 38: 2, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 13: { # 'ี' - 5: 3, # 'ก' - 30: 2, # 'ข' - 24: 2, # 'ค' - 8: 0, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 2, # 'ด' - 19: 1, # 'ต' - 44: 0, # 'ถ' - 14: 2, # 'ท' - 48: 0, # 'ธ' - 3: 1, # 'น' - 17: 2, # 'บ' - 25: 2, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 2, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 2, # 'ม' - 16: 3, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 1, # 'ล' - 12: 2, # 'ว' - 42: 1, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 1, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 2, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 1, # 'ใ' - 33: 1, # 'ไ' - 50: 1, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 40: { # 'ึ' - 5: 3, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 3, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 1, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 27: { # 'ื' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 2, # 'น' - 17: 3, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 2, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 3, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 32: { # 'ุ' - 5: 3, # 'ก' - 30: 2, # 'ข' - 24: 3, # 'ค' - 8: 3, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 2, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 1, # 'ฒ' - 43: 3, # 'ณ' - 20: 3, # 'ด' - 19: 3, # 'ต' - 44: 1, # 'ถ' - 14: 2, # 'ท' - 48: 1, # 'ธ' - 3: 2, # 'น' - 17: 2, # 'บ' - 25: 2, # 'ป' - 39: 2, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 3, # 'ม' - 16: 1, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 1, # 'ว' - 42: 1, # 'ศ' - 46: 2, # 'ษ' - 18: 1, # 'ส' - 21: 1, # 'ห' - 4: 1, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 1, # 'เ' - 28: 0, # 'แ' - 41: 1, # 'โ' - 29: 0, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 2, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 35: { # 'ู' - 5: 3, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 2, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 2, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 1, # 'ณ' - 20: 2, # 'ด' - 19: 2, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 2, # 'น' - 17: 0, # 'บ' - 25: 3, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 2, # 'ม' - 16: 0, # 'ย' - 2: 1, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 1, # 'เ' - 28: 1, # 'แ' - 41: 1, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 11: { # 'เ' - 5: 3, # 'ก' - 30: 3, # 'ข' - 24: 3, # 'ค' - 8: 2, # 'ง' - 26: 3, # 'จ' - 52: 3, # 'ฉ' - 34: 3, # 'ช' - 51: 2, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 1, # 'ณ' - 20: 3, # 'ด' - 19: 3, # 'ต' - 44: 1, # 'ถ' - 14: 3, # 'ท' - 48: 1, # 'ธ' - 3: 3, # 'น' - 17: 3, # 'บ' - 25: 3, # 'ป' - 39: 2, # 'ผ' - 62: 1, # 'ฝ' - 31: 3, # 'พ' - 54: 1, # 'ฟ' - 45: 3, # 'ภ' - 9: 3, # 'ม' - 16: 2, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 3, # 'ว' - 42: 2, # 'ศ' - 46: 0, # 'ษ' - 18: 3, # 'ส' - 21: 3, # 'ห' - 4: 3, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 28: { # 'แ' - 5: 3, # 'ก' - 30: 2, # 'ข' - 24: 2, # 'ค' - 8: 1, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 2, # 'ด' - 19: 3, # 'ต' - 44: 2, # 'ถ' - 14: 3, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 3, # 'บ' - 25: 2, # 'ป' - 39: 3, # 'ผ' - 62: 0, # 'ฝ' - 31: 2, # 'พ' - 54: 2, # 'ฟ' - 45: 0, # 'ภ' - 9: 2, # 'ม' - 16: 2, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 2, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 3, # 'ส' - 21: 3, # 'ห' - 4: 1, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 41: { # 'โ' - 5: 2, # 'ก' - 30: 1, # 'ข' - 24: 2, # 'ค' - 8: 0, # 'ง' - 26: 1, # 'จ' - 52: 1, # 'ฉ' - 34: 1, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 3, # 'ด' - 19: 2, # 'ต' - 44: 0, # 'ถ' - 14: 2, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 1, # 'บ' - 25: 3, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 1, # 'ฟ' - 45: 1, # 'ภ' - 9: 1, # 'ม' - 16: 2, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 0, # 'ว' - 42: 1, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 0, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 29: { # 'ใ' - 5: 2, # 'ก' - 30: 0, # 'ข' - 24: 1, # 'ค' - 8: 0, # 'ง' - 26: 3, # 'จ' - 52: 0, # 'ฉ' - 34: 3, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 3, # 'ด' - 19: 1, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 2, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 1, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 3, # 'ส' - 21: 3, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 33: { # 'ไ' - 5: 1, # 'ก' - 30: 2, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 3, # 'ด' - 19: 1, # 'ต' - 44: 0, # 'ถ' - 14: 3, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 1, # 'บ' - 25: 3, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 2, # 'ฟ' - 45: 0, # 'ภ' - 9: 3, # 'ม' - 16: 0, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 1, # 'ล' - 12: 3, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 2, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 50: { # 'ๆ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 37: { # '็' - 5: 2, # 'ก' - 30: 1, # 'ข' - 24: 2, # 'ค' - 8: 2, # 'ง' - 26: 3, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 1, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 2, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 3, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 2, # 'ม' - 16: 1, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 2, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 0, # 'ห' - 4: 1, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 1, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 6: { # '่' - 5: 2, # 'ก' - 30: 1, # 'ข' - 24: 2, # 'ค' - 8: 3, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 1, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 2, # 'ต' - 44: 1, # 'ถ' - 14: 2, # 'ท' - 48: 1, # 'ธ' - 3: 3, # 'น' - 17: 1, # 'บ' - 25: 2, # 'ป' - 39: 2, # 'ผ' - 62: 1, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 3, # 'ม' - 16: 3, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 3, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 1, # 'ห' - 4: 3, # 'อ' - 63: 0, # 'ฯ' - 22: 1, # 'ะ' - 10: 0, # 'ั' - 1: 3, # 'า' - 36: 2, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 3, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 1, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 7: { # '้' - 5: 2, # 'ก' - 30: 1, # 'ข' - 24: 2, # 'ค' - 8: 3, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 2, # 'ต' - 44: 1, # 'ถ' - 14: 2, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 2, # 'บ' - 25: 2, # 'ป' - 39: 2, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 1, # 'ฟ' - 45: 0, # 'ภ' - 9: 3, # 'ม' - 16: 2, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 1, # 'ล' - 12: 3, # 'ว' - 42: 1, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 2, # 'ห' - 4: 3, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 3, # 'า' - 36: 2, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 2, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 38: { # '์' - 5: 2, # 'ก' - 30: 1, # 'ข' - 24: 1, # 'ค' - 8: 0, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 2, # 'ด' - 19: 1, # 'ต' - 44: 1, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 1, # 'น' - 17: 1, # 'บ' - 25: 1, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 1, # 'ฟ' - 45: 0, # 'ภ' - 9: 2, # 'ม' - 16: 0, # 'ย' - 2: 1, # 'ร' - 61: 1, # 'ฤ' - 15: 1, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 1, # 'ห' - 4: 2, # 'อ' - 63: 1, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 2, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 1, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 56: { # '๑' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 2, # '๑' - 59: 1, # '๒' - 60: 1, # '๕' - }, - 59: { # '๒' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 1, # '๑' - 59: 1, # '๒' - 60: 3, # '๕' - }, - 60: { # '๕' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 2, # '๑' - 59: 1, # '๒' - 60: 0, # '๕' - }, -} - -# 255: Undefined characters that did not exist in training text -# 254: Carriage/Return -# 253: symbol (punctuation) that does not belong to word -# 252: 0 - 9 -# 251: Control characters - -# Character Mapping Table(s): -TIS_620_THAI_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 182, # 'A' - 66: 106, # 'B' - 67: 107, # 'C' - 68: 100, # 'D' - 69: 183, # 'E' - 70: 184, # 'F' - 71: 185, # 'G' - 72: 101, # 'H' - 73: 94, # 'I' - 74: 186, # 'J' - 75: 187, # 'K' - 76: 108, # 'L' - 77: 109, # 'M' - 78: 110, # 'N' - 79: 111, # 'O' - 80: 188, # 'P' - 81: 189, # 'Q' - 82: 190, # 'R' - 83: 89, # 'S' - 84: 95, # 'T' - 85: 112, # 'U' - 86: 113, # 'V' - 87: 191, # 'W' - 88: 192, # 'X' - 89: 193, # 'Y' - 90: 194, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 64, # 'a' - 98: 72, # 'b' - 99: 73, # 'c' - 100: 114, # 'd' - 101: 74, # 'e' - 102: 115, # 'f' - 103: 116, # 'g' - 104: 102, # 'h' - 105: 81, # 'i' - 106: 201, # 'j' - 107: 117, # 'k' - 108: 90, # 'l' - 109: 103, # 'm' - 110: 78, # 'n' - 111: 82, # 'o' - 112: 96, # 'p' - 113: 202, # 'q' - 114: 91, # 'r' - 115: 79, # 's' - 116: 84, # 't' - 117: 104, # 'u' - 118: 105, # 'v' - 119: 97, # 'w' - 120: 98, # 'x' - 121: 92, # 'y' - 122: 203, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 209, # '\x80' - 129: 210, # '\x81' - 130: 211, # '\x82' - 131: 212, # '\x83' - 132: 213, # '\x84' - 133: 88, # '\x85' - 134: 214, # '\x86' - 135: 215, # '\x87' - 136: 216, # '\x88' - 137: 217, # '\x89' - 138: 218, # '\x8a' - 139: 219, # '\x8b' - 140: 220, # '\x8c' - 141: 118, # '\x8d' - 142: 221, # '\x8e' - 143: 222, # '\x8f' - 144: 223, # '\x90' - 145: 224, # '\x91' - 146: 99, # '\x92' - 147: 85, # '\x93' - 148: 83, # '\x94' - 149: 225, # '\x95' - 150: 226, # '\x96' - 151: 227, # '\x97' - 152: 228, # '\x98' - 153: 229, # '\x99' - 154: 230, # '\x9a' - 155: 231, # '\x9b' - 156: 232, # '\x9c' - 157: 233, # '\x9d' - 158: 234, # '\x9e' - 159: 235, # '\x9f' - 160: 236, # None - 161: 5, # 'ก' - 162: 30, # 'ข' - 163: 237, # 'ฃ' - 164: 24, # 'ค' - 165: 238, # 'ฅ' - 166: 75, # 'ฆ' - 167: 8, # 'ง' - 168: 26, # 'จ' - 169: 52, # 'ฉ' - 170: 34, # 'ช' - 171: 51, # 'ซ' - 172: 119, # 'ฌ' - 173: 47, # 'ญ' - 174: 58, # 'ฎ' - 175: 57, # 'ฏ' - 176: 49, # 'ฐ' - 177: 53, # 'ฑ' - 178: 55, # 'ฒ' - 179: 43, # 'ณ' - 180: 20, # 'ด' - 181: 19, # 'ต' - 182: 44, # 'ถ' - 183: 14, # 'ท' - 184: 48, # 'ธ' - 185: 3, # 'น' - 186: 17, # 'บ' - 187: 25, # 'ป' - 188: 39, # 'ผ' - 189: 62, # 'ฝ' - 190: 31, # 'พ' - 191: 54, # 'ฟ' - 192: 45, # 'ภ' - 193: 9, # 'ม' - 194: 16, # 'ย' - 195: 2, # 'ร' - 196: 61, # 'ฤ' - 197: 15, # 'ล' - 198: 239, # 'ฦ' - 199: 12, # 'ว' - 200: 42, # 'ศ' - 201: 46, # 'ษ' - 202: 18, # 'ส' - 203: 21, # 'ห' - 204: 76, # 'ฬ' - 205: 4, # 'อ' - 206: 66, # 'ฮ' - 207: 63, # 'ฯ' - 208: 22, # 'ะ' - 209: 10, # 'ั' - 210: 1, # 'า' - 211: 36, # 'ำ' - 212: 23, # 'ิ' - 213: 13, # 'ี' - 214: 40, # 'ึ' - 215: 27, # 'ื' - 216: 32, # 'ุ' - 217: 35, # 'ู' - 218: 86, # 'ฺ' - 219: 240, # None - 220: 241, # None - 221: 242, # None - 222: 243, # None - 223: 244, # '฿' - 224: 11, # 'เ' - 225: 28, # 'แ' - 226: 41, # 'โ' - 227: 29, # 'ใ' - 228: 33, # 'ไ' - 229: 245, # 'ๅ' - 230: 50, # 'ๆ' - 231: 37, # '็' - 232: 6, # '่' - 233: 7, # '้' - 234: 67, # '๊' - 235: 77, # '๋' - 236: 38, # '์' - 237: 93, # 'ํ' - 238: 246, # '๎' - 239: 247, # '๏' - 240: 68, # '๐' - 241: 56, # '๑' - 242: 59, # '๒' - 243: 65, # '๓' - 244: 69, # '๔' - 245: 60, # '๕' - 246: 70, # '๖' - 247: 80, # '๗' - 248: 71, # '๘' - 249: 87, # '๙' - 250: 248, # '๚' - 251: 249, # '๛' - 252: 250, # None - 253: 251, # None - 254: 252, # None - 255: 253, # None -} - -TIS_620_THAI_MODEL = SingleByteCharSetModel( - charset_name="TIS-620", - language="Thai", - char_to_order_map=TIS_620_THAI_CHAR_TO_ORDER, - language_model=THAI_LANG_MODEL, - typical_positive_ratio=0.926386, - keep_ascii_letters=False, - alphabet="กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛", -) diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/utils/env.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/utils/env.py deleted file mode 100644 index 40634c17c73273ac8927632be164f466cfe7d1fa..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/utils/env.py +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import importlib -import importlib.util -import logging -import numpy as np -import os -import random -import sys -from datetime import datetime -import torch - -__all__ = ["seed_all_rng"] - - -TORCH_VERSION = tuple(int(x) for x in torch.__version__.split(".")[:2]) -""" -PyTorch version as a tuple of 2 ints. Useful for comparison. -""" - - -DOC_BUILDING = os.getenv("_DOC_BUILDING", False) # set in docs/conf.py -""" -Whether we're building documentation. -""" - - -def seed_all_rng(seed=None): - """ - Set the random seed for the RNG in torch, numpy and python. - - Args: - seed (int): if None, will use a strong random seed. - """ - if seed is None: - seed = ( - os.getpid() - + int(datetime.now().strftime("%S%f")) - + int.from_bytes(os.urandom(2), "big") - ) - logger = logging.getLogger(__name__) - logger.info("Using a generated random seed {}".format(seed)) - np.random.seed(seed) - torch.manual_seed(seed) - random.seed(seed) - os.environ["PYTHONHASHSEED"] = str(seed) - - -# from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path -def _import_file(module_name, file_path, make_importable=False): - spec = importlib.util.spec_from_file_location(module_name, file_path) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - if make_importable: - sys.modules[module_name] = module - return module - - -def _configure_libraries(): - """ - Configurations for some libraries. - """ - # An environment option to disable `import cv2` globally, - # in case it leads to negative performance impact - disable_cv2 = int(os.environ.get("DETECTRON2_DISABLE_CV2", False)) - if disable_cv2: - sys.modules["cv2"] = None - else: - # Disable opencl in opencv since its interaction with cuda often has negative effects - # This envvar is supported after OpenCV 3.4.0 - os.environ["OPENCV_OPENCL_RUNTIME"] = "disabled" - try: - import cv2 - - if int(cv2.__version__.split(".")[0]) >= 3: - cv2.ocl.setUseOpenCL(False) - except ModuleNotFoundError: - # Other types of ImportError, if happened, should not be ignored. - # Because a failed opencv import could mess up address space - # https://github.com/skvark/opencv-python/issues/381 - pass - - def get_version(module, digit=2): - return tuple(map(int, module.__version__.split(".")[:digit])) - - # fmt: off - assert get_version(torch) >= (1, 4), "Requires torch>=1.4" - import fvcore - assert get_version(fvcore, 3) >= (0, 1, 2), "Requires fvcore>=0.1.2" - import yaml - assert get_version(yaml) >= (5, 1), "Requires pyyaml>=5.1" - # fmt: on - - -_ENV_SETUP_DONE = False - - -def setup_environment(): - """Perform environment setup work. The default setup is a no-op, but this - function allows the user to specify a Python source file or a module in - the $DETECTRON2_ENV_MODULE environment variable, that performs - custom setup work that may be necessary to their computing environment. - """ - global _ENV_SETUP_DONE - if _ENV_SETUP_DONE: - return - _ENV_SETUP_DONE = True - - _configure_libraries() - - custom_module_path = os.environ.get("DETECTRON2_ENV_MODULE") - - if custom_module_path: - setup_custom_environment(custom_module_path) - else: - # The default setup is a no-op - pass - - -def setup_custom_environment(custom_module): - """ - Load custom environment setup by importing a Python source file or a - module, and run the setup function. - """ - if custom_module.endswith(".py"): - module = _import_file("detectron2.utils.env.custom_module", custom_module) - else: - module = importlib.import_module(custom_module) - assert hasattr(module, "setup_environment") and callable(module.setup_environment), ( - "Custom environment module defined in {} does not have the " - "required callable attribute 'setup_environment'." - ).format(custom_module) - module.setup_environment() - - -def fixup_module_metadata(module_name, namespace, keys=None): - """ - Fix the __qualname__ of module members to be their exported api name, so - when they are referenced in docs, sphinx can find them. Reference: - https://github.com/python-trio/trio/blob/6754c74eacfad9cc5c92d5c24727a2f3b620624e/trio/_util.py#L216-L241 - """ - if not DOC_BUILDING: - return - seen_ids = set() - - def fix_one(qualname, name, obj): - # avoid infinite recursion (relevant when using - # typing.Generic, for example) - if id(obj) in seen_ids: - return - seen_ids.add(id(obj)) - - mod = getattr(obj, "__module__", None) - if mod is not None and (mod.startswith(module_name) or mod.startswith("fvcore.")): - obj.__module__ = module_name - # Modules, unlike everything else in Python, put fully-qualitied - # names into their __name__ attribute. We check for "." to avoid - # rewriting these. - if hasattr(obj, "__name__") and "." not in obj.__name__: - obj.__name__ = name - obj.__qualname__ = qualname - if isinstance(obj, type): - for attr_name, attr_value in obj.__dict__.items(): - fix_one(objname + "." + attr_name, attr_name, attr_value) - - if keys is None: - keys = namespace.keys() - for objname in keys: - if not objname.startswith("_"): - obj = namespace[objname] - fix_one(objname, objname, obj) diff --git a/spaces/TerrificTerry/Club_Review_Antidetector/app.py b/spaces/TerrificTerry/Club_Review_Antidetector/app.py deleted file mode 100644 index 5fa8fb1101f75efc543e2b48c003d874b70e827f..0000000000000000000000000000000000000000 --- a/spaces/TerrificTerry/Club_Review_Antidetector/app.py +++ /dev/null @@ -1,135 +0,0 @@ -import openai -import gradio as gr - -openai.api_key = "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" - -title = "ClubAI" -description = "Writing club records is usually a painful and tedious work. This website helps you write club records within 3 minutes and save up time for more important things.\n **WARNING: Please use with caution and avoid violating school regulations**\n Some examples of using the website are provided at the end of the webpage. If you are not satisfied with the record, click the `submit` button again to get a different record or try changing the parameters.\n If you encounter any problem while using, feel free to contact me through ytimerunners@gmail.com.\n **If you find this helpful, please click the LIKE button :D.**\n *This is only a test version, and do not share it to much other people to ensure the website stability.*" - - -def WordCount(text): - return len(text.split()) - - -def ToTitleCase(input_string): - - # list of articles - articles = ["a", "an", "the"] - - # list of coordinating conjunctins - conjunctions = ["and", "but", - "for", "nor", - "or", "so", - "yet"] - - # list of some short articles - prepositions = ["in", "to", "for", - "with", "on", "at", - "from", "by", "about", - "as", "into", "like", - "through", "after", "over", - "between", "out", "against", - "during", "without", "before", - "under", "around", "among", - "of"] - - # merging the 3 lists - lower_case = articles + conjunctions + prepositions - - # variable declaration for the output text - output_string = "" - - # separating each word in the string - input_list = input_string.split(" ") - - # checking each word - for word in input_list: - - # if the word exists in the list - # then no need to capitalize it - if word in lower_case: - output_string += word + " " - - # if the word does not exists in - # the list, then capitalize it - else: - temp = word.title() - output_string += temp + " " - - return output_string - - -def GenRecord(club_activity, role="club member", time="after school", personal_engagement=None, is_one_paragraph=False, is_creative=False, bypass_gpt_zero=False): - prompt = 'Write from the first person perspective of a ' - prompt = prompt + role + "," - prompt = prompt + " the club have an activity of" + club_activity + "." - prompt = prompt + " The activity is held " + time + "." - if(personal_engagement): - prompt = prompt + " In the activity, you " + personal_engagement + " ." - prompt = prompt + " Describe the club activity by writing at least 80 words and no more than 120 words. You MUST use past tense throughout." - if(is_one_paragraph or bypass_gpt_zero): - prompt = prompt + " You MUST only write one paragraph." - if(is_creative or bypass_gpt_zero): - prompt = prompt + " Add as many sensory details as you can." - if(bypass_gpt_zero): - prompt = prompt + " Write in an impulsive manner and ensure that the text is not uniform. Use an informal and relaxed tone, but be semi-professional. Be engaging and interesting and easy to read. Do not talk in the manner in which ChatGPT generates content, instead use a manner that is radically different from the way language models generate text. Add three grammar errors and make them evenly distributed in the text." - print(prompt) - completion = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=[ - { - "role": "user", - "content": prompt, - } - ], - temperature=0.7, - max_tokens=1024, - n=1, - ) - return [ - ToTitleCase(club_activity), - completion.choices[0].message.content, - WordCount(completion.choices[0].message.content) - ] - - -example_1 = ["listen to a lecture about the history of Calculus", "club member", - "after school", "listen to the lecture of the club president carefully", False, False, False] -example_2 = ["investigates visual GPT", "club president", "after school", - "teach the junior members how to use visual GPT", True, True, False] - -demo = gr.Interface( - fn=GenRecord, - inputs=[ - gr.Textbox(label="Activity Description", - info="Please briefly describe the club activity", placeholder="listen to a lecture about the history of Calculus"), - gr.Radio(["club member", "club president"], label="Role", - info="Please choose your role in the club, default=club member", value="club member"), - gr.Radio(["after school", "in the afternoon"], label="Activity Time", - info="Please choose the time of the club activity, default=after school", value="after school"), - gr.Textbox(label="Personal Engagement", - info="(Optional) Describe your personal role in the club activity, if you do not want to write this, leave the text box below blank", value=None), - gr.Checkbox(label="Force One Paragraph", - info="Whether force the model to generate only one paragraph, default=False", value=False), - gr.Checkbox( - label="More Details", info="Whether generate more details, enabling may cause the model to become too creative, default=False", value=False), - gr.Checkbox(label="(beta) Bypass GPT Zero", - info="Try to bypass GPT Zero detection, may cause the model to generate nonsense and significantly lowers the generation speed. There is no garantee that it will still work with the latest version of GPT zero, default=False", value=False) - ], - outputs=[ - gr.Textbox(label="Club Record Title", show_label=True).style( - show_copy_button=True), - gr.Textbox(label="Club Record", show_label=True).style( - show_copy_button=True), - gr.Number(label="Word Count", precision=0), - ], - examples=[example_1, example_2], - cache_examples=True, - allow_flagging="never", - title=title, - description=description, - theme=gr.themes.Monochrome() -) - -if __name__ == "__main__": - demo.launch(share=False) diff --git a/spaces/Tshackelton/IBMPlex-DenseReadable/theme_dropdown.py b/spaces/Tshackelton/IBMPlex-DenseReadable/theme_dropdown.py deleted file mode 100644 index 6235388fd00549553df44028f3ccf03e946994ea..0000000000000000000000000000000000000000 --- a/spaces/Tshackelton/IBMPlex-DenseReadable/theme_dropdown.py +++ /dev/null @@ -1,57 +0,0 @@ -import os -import pathlib - -from gradio.themes.utils import ThemeAsset - - -def create_theme_dropdown(): - import gradio as gr - - asset_path = pathlib.Path(__file__).parent / "themes" - themes = [] - for theme_asset in os.listdir(str(asset_path)): - themes.append( - (ThemeAsset(theme_asset), gr.Theme.load(str(asset_path / theme_asset))) - ) - - def make_else_if(theme_asset): - return f""" - else if (theme == '{str(theme_asset[0].version)}') {{ - var theme_css = `{theme_asset[1]._get_theme_css()}` - }}""" - - head, tail = themes[0], themes[1:] - if_statement = f""" - if (theme == "{str(head[0].version)}") {{ - var theme_css = `{head[1]._get_theme_css()}` - }} {" ".join(make_else_if(t) for t in tail)} - """ - - latest_to_oldest = sorted([t[0] for t in themes], key=lambda asset: asset.version)[ - ::-1 - ] - latest_to_oldest = [str(t.version) for t in latest_to_oldest] - - component = gr.Dropdown( - choices=latest_to_oldest, - value=latest_to_oldest[0], - render=False, - label="Select Version", - ).style(container=False) - - return ( - component, - f""" - (theme) => {{ - if (!document.querySelector('.theme-css')) {{ - var theme_elem = document.createElement('style'); - theme_elem.classList.add('theme-css'); - document.head.appendChild(theme_elem); - }} else {{ - var theme_elem = document.querySelector('.theme-css'); - }} - {if_statement} - theme_elem.innerHTML = theme_css; - }} - """, - ) diff --git a/spaces/Tuyet3005/Sentiment_Analysis_using_BERT/streamlit_app.py/pages/Sentiment_Analysis.py b/spaces/Tuyet3005/Sentiment_Analysis_using_BERT/streamlit_app.py/pages/Sentiment_Analysis.py deleted file mode 100644 index bee9c1f861aa1046b7b80a839736fcdd4ed17451..0000000000000000000000000000000000000000 --- a/spaces/Tuyet3005/Sentiment_Analysis_using_BERT/streamlit_app.py/pages/Sentiment_Analysis.py +++ /dev/null @@ -1,117 +0,0 @@ -from os import path -import streamlit as st - -# import pickle - -# from tensorflow import keras -import tensorflow as tf -import torch -from torch import nn -from transformers import BertModel, BertTokenizer - - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -MODEL_NAME = "bert-base-cased" -MODEL_PATH = path.join(path.dirname(__file__), "bert_model.h5") - - -# Build the Sentiment Classifier class -class SentimentClassifier(nn.Module): - # Constructor class - def __init__(self, n_classes): - super(SentimentClassifier, self).__init__() - self.bert = BertModel.from_pretrained(MODEL_NAME) - self.drop = nn.Dropout(p=0.3) - self.out = nn.Linear(self.bert.config.hidden_size, n_classes) - - # Forward propagaion class - def forward(self, input_ids, attention_mask): - _, pooled_output = self.bert( - input_ids=input_ids, attention_mask=attention_mask, return_dict=False - ) - # Add a dropout layer - output = self.drop(pooled_output) - return self.out(output) - - -@st.cache_resource -def load_model_and_tokenizer(): - model = SentimentClassifier(3) - model.load_state_dict(torch.load(MODEL_PATH, map_location=torch.device("cpu"))) - model.eval() - return model, BertTokenizer.from_pretrained("bert-base-cased") - - -def predict(content): - model, tokenizer = load_model_and_tokenizer() - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - encoded_review = tokenizer.encode_plus( - content, - max_length=160, - add_special_tokens=True, - return_token_type_ids=False, - pad_to_max_length=True, - return_attention_mask=True, - return_tensors="pt", - ) - - input_ids = encoded_review["input_ids"].to(device) - attention_mask = encoded_review["attention_mask"].to(device) - - output = model(input_ids, attention_mask) - _, prediction = torch.max(output, dim=1) - - class_names = ["negative", "neutral", "positive"] - - return class_names[prediction] - - -def main(): - st.set_page_config(page_title="Sentiment Analysis", page_icon="📝") - - # giving a title to our page - st.title("Sentiment analysis") - contents = st.text_area( - "Please enter reviews/sentiment/setences/contents:", - placeholder="Enter your text here", - height=200, - ) - - prediction = "" - - # Create a prediction button - if st.button("Analyze Sentiment"): - stripped = contents.strip() - if not stripped: - st.error("Please enter some text.") - return - - prediction = predict(contents) - if prediction == "positive": - st.success("This is positive 😄") - elif prediction == "negative": - st.error("This is negative 😟") - else: - st.warning("This is neutral 🙂") - - upload_file = st.file_uploader("Or upload a file", type=["txt"]) - if upload_file is not None: - contents = upload_file.read().decode("utf-8") - - for line in contents.splitlines(): - line = line.strip() - if not line: - continue - - prediction = predict(line) - if prediction == "positive": - st.success(line + "\n\nThis is positive 😄") - elif prediction == "negative": - st.error(line + "\n\nThis is negative 😟") - else: - st.warning(line + "\n\nThis is neutral 🙂") - - -if __name__ == "__main__": - main() diff --git a/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/hteyun.py b/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/hteyun.py deleted file mode 100644 index a6eba7c00331d720afb47215e818f5900d4aedcf..0000000000000000000000000000000000000000 --- a/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/hteyun.py +++ /dev/null @@ -1,34 +0,0 @@ -import requests -import os -import json -from ...typing import sha256, Dict, get_type_hints - -url = 'https://hteyun.com' -model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0613'] -supports_stream = True -needs_auth = False - -def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs): - headers = { - 'Content-Type': 'application/json', - 'Accept': 'application/json, text/plain, */*', - 'Accept-Language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7,ja;q=0.6,zh-TW;q=0.5,zh;q=0.4', - 'Origin': 'https://hteyun.com', - 'Referer': 'https://hteyun.com/chat/', - } - data = { - 'messages': messages, - 'model': model, - 'systemMessage': 'You are ChatGPT, a large language model trained by OpenAI. Follow the user\'s instructions carefully. Respond using russian language.', - 'temperature': 0.7, - 'presence_penalty': 0, - } - response = requests.post(url + '/api/chat-stream', json=data, headers=headers, stream=True) - print(response.json()) - - # Извлечение текста из response - return response.json()['text'] - - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) diff --git a/spaces/Vision-CAIR/minigpt4/minigpt4/tasks/base_task.py b/spaces/Vision-CAIR/minigpt4/minigpt4/tasks/base_task.py deleted file mode 100644 index 9f82a2a52779a782e5a40dfb6a6d9a57e991e345..0000000000000000000000000000000000000000 --- a/spaces/Vision-CAIR/minigpt4/minigpt4/tasks/base_task.py +++ /dev/null @@ -1,286 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import logging -import os - -import torch -import torch.distributed as dist -from minigpt4.common.dist_utils import get_rank, get_world_size, is_main_process, is_dist_avail_and_initialized -from minigpt4.common.logger import MetricLogger, SmoothedValue -from minigpt4.common.registry import registry -from minigpt4.datasets.data_utils import prepare_sample - - -class BaseTask: - def __init__(self, **kwargs): - super().__init__() - - self.inst_id_key = "instance_id" - - @classmethod - def setup_task(cls, **kwargs): - return cls() - - def build_model(self, cfg): - model_config = cfg.model_cfg - - model_cls = registry.get_model_class(model_config.arch) - return model_cls.from_config(model_config) - - def build_datasets(self, cfg): - """ - Build a dictionary of datasets, keyed by split 'train', 'valid', 'test'. - Download dataset and annotations automatically if not exist. - - Args: - cfg (common.config.Config): _description_ - - Returns: - dict: Dictionary of torch.utils.data.Dataset objects by split. - """ - - datasets = dict() - - datasets_config = cfg.datasets_cfg - - assert len(datasets_config) > 0, "At least one dataset has to be specified." - - for name in datasets_config: - dataset_config = datasets_config[name] - - builder = registry.get_builder_class(name)(dataset_config) - dataset = builder.build_datasets() - - dataset['train'].name = name - if 'sample_ratio' in dataset_config: - dataset['train'].sample_ratio = dataset_config.sample_ratio - - datasets[name] = dataset - - return datasets - - def train_step(self, model, samples): - loss = model(samples)["loss"] - return loss - - def valid_step(self, model, samples): - raise NotImplementedError - - def before_evaluation(self, model, dataset, **kwargs): - model.before_evaluation(dataset=dataset, task_type=type(self)) - - def after_evaluation(self, **kwargs): - pass - - def inference_step(self): - raise NotImplementedError - - def evaluation(self, model, data_loader, cuda_enabled=True): - metric_logger = MetricLogger(delimiter=" ") - header = "Evaluation" - # TODO make it configurable - print_freq = 10 - - results = [] - - for samples in metric_logger.log_every(data_loader, print_freq, header): - samples = prepare_sample(samples, cuda_enabled=cuda_enabled) - - eval_output = self.valid_step(model=model, samples=samples) - results.extend(eval_output) - - if is_dist_avail_and_initialized(): - dist.barrier() - - return results - - def train_epoch( - self, - epoch, - model, - data_loader, - optimizer, - lr_scheduler, - scaler=None, - cuda_enabled=False, - log_freq=50, - accum_grad_iters=1, - ): - return self._train_inner_loop( - epoch=epoch, - iters_per_epoch=lr_scheduler.iters_per_epoch, - model=model, - data_loader=data_loader, - optimizer=optimizer, - scaler=scaler, - lr_scheduler=lr_scheduler, - log_freq=log_freq, - cuda_enabled=cuda_enabled, - accum_grad_iters=accum_grad_iters, - ) - - def train_iters( - self, - epoch, - start_iters, - iters_per_inner_epoch, - model, - data_loader, - optimizer, - lr_scheduler, - scaler=None, - cuda_enabled=False, - log_freq=50, - accum_grad_iters=1, - ): - return self._train_inner_loop( - epoch=epoch, - start_iters=start_iters, - iters_per_epoch=iters_per_inner_epoch, - model=model, - data_loader=data_loader, - optimizer=optimizer, - scaler=scaler, - lr_scheduler=lr_scheduler, - log_freq=log_freq, - cuda_enabled=cuda_enabled, - accum_grad_iters=accum_grad_iters, - ) - - def _train_inner_loop( - self, - epoch, - iters_per_epoch, - model, - data_loader, - optimizer, - lr_scheduler, - scaler=None, - start_iters=None, - log_freq=50, - cuda_enabled=False, - accum_grad_iters=1, - ): - """ - An inner training loop compatible with both epoch-based and iter-based training. - - When using epoch-based, training stops after one epoch; when using iter-based, - training stops after #iters_per_epoch iterations. - """ - use_amp = scaler is not None - - if not hasattr(data_loader, "__next__"): - # convert to iterator if not already - data_loader = iter(data_loader) - - metric_logger = MetricLogger(delimiter=" ") - metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}")) - metric_logger.add_meter("loss", SmoothedValue(window_size=1, fmt="{value:.4f}")) - - # if iter-based runner, schedule lr based on inner epoch. - logging.info( - "Start training epoch {}, {} iters per inner epoch.".format( - epoch, iters_per_epoch - ) - ) - header = "Train: data epoch: [{}]".format(epoch) - if start_iters is None: - # epoch-based runner - inner_epoch = epoch - else: - # In iter-based runner, we schedule the learning rate based on iterations. - inner_epoch = start_iters // iters_per_epoch - header = header + "; inner epoch [{}]".format(inner_epoch) - - for i in metric_logger.log_every(range(iters_per_epoch), log_freq, header): - # if using iter-based runner, we stop after iters_per_epoch iterations. - if i >= iters_per_epoch: - break - - samples = next(data_loader) - - samples = prepare_sample(samples, cuda_enabled=cuda_enabled) - samples.update( - { - "epoch": inner_epoch, - "num_iters_per_epoch": iters_per_epoch, - "iters": i, - } - ) - - lr_scheduler.step(cur_epoch=inner_epoch, cur_step=i) - - with torch.cuda.amp.autocast(enabled=use_amp): - loss = self.train_step(model=model, samples=samples) - - # after_train_step() - if use_amp: - scaler.scale(loss).backward() - else: - loss.backward() - - # update gradients every accum_grad_iters iterations - if (i + 1) % accum_grad_iters == 0: - if use_amp: - scaler.step(optimizer) - scaler.update() - else: - optimizer.step() - optimizer.zero_grad() - - metric_logger.update(loss=loss.item()) - metric_logger.update(lr=optimizer.param_groups[0]["lr"]) - - # after train_epoch() - # gather the stats from all processes - metric_logger.synchronize_between_processes() - logging.info("Averaged stats: " + str(metric_logger.global_avg())) - return { - k: "{:.3f}".format(meter.global_avg) - for k, meter in metric_logger.meters.items() - } - - @staticmethod - def save_result(result, result_dir, filename, remove_duplicate=""): - import json - - result_file = os.path.join( - result_dir, "%s_rank%d.json" % (filename, get_rank()) - ) - final_result_file = os.path.join(result_dir, "%s.json" % filename) - - json.dump(result, open(result_file, "w")) - - if is_dist_avail_and_initialized(): - dist.barrier() - - if is_main_process(): - logging.warning("rank %d starts merging results." % get_rank()) - # combine results from all processes - result = [] - - for rank in range(get_world_size()): - result_file = os.path.join( - result_dir, "%s_rank%d.json" % (filename, rank) - ) - res = json.load(open(result_file, "r")) - result += res - - if remove_duplicate: - result_new = [] - id_list = [] - for res in result: - if res[remove_duplicate] not in id_list: - id_list.append(res[remove_duplicate]) - result_new.append(res) - result = result_new - - json.dump(result, open(final_result_file, "w")) - print("result file saved to %s" % final_result_file) - - return final_result_file diff --git a/spaces/XzJosh/Bekki-Bert-VITS2/text/tone_sandhi.py b/spaces/XzJosh/Bekki-Bert-VITS2/text/tone_sandhi.py deleted file mode 100644 index 0f45b7a72c5d858bcaab19ac85cfa686bf9a74da..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Bekki-Bert-VITS2/text/tone_sandhi.py +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import List -from typing import Tuple - -import jieba -from pypinyin import lazy_pinyin -from pypinyin import Style - - -class ToneSandhi(): - def __init__(self): - self.must_neural_tone_words = { - '麻烦', '麻利', '鸳鸯', '高粱', '骨头', '骆驼', '马虎', '首饰', '馒头', '馄饨', '风筝', - '难为', '队伍', '阔气', '闺女', '门道', '锄头', '铺盖', '铃铛', '铁匠', '钥匙', '里脊', - '里头', '部分', '那么', '道士', '造化', '迷糊', '连累', '这么', '这个', '运气', '过去', - '软和', '转悠', '踏实', '跳蚤', '跟头', '趔趄', '财主', '豆腐', '讲究', '记性', '记号', - '认识', '规矩', '见识', '裁缝', '补丁', '衣裳', '衣服', '衙门', '街坊', '行李', '行当', - '蛤蟆', '蘑菇', '薄荷', '葫芦', '葡萄', '萝卜', '荸荠', '苗条', '苗头', '苍蝇', '芝麻', - '舒服', '舒坦', '舌头', '自在', '膏药', '脾气', '脑袋', '脊梁', '能耐', '胳膊', '胭脂', - '胡萝', '胡琴', '胡同', '聪明', '耽误', '耽搁', '耷拉', '耳朵', '老爷', '老实', '老婆', - '老头', '老太', '翻腾', '罗嗦', '罐头', '编辑', '结实', '红火', '累赘', '糨糊', '糊涂', - '精神', '粮食', '簸箕', '篱笆', '算计', '算盘', '答应', '笤帚', '笑语', '笑话', '窟窿', - '窝囊', '窗户', '稳当', '稀罕', '称呼', '秧歌', '秀气', '秀才', '福气', '祖宗', '砚台', - '码头', '石榴', '石头', '石匠', '知识', '眼睛', '眯缝', '眨巴', '眉毛', '相声', '盘算', - '白净', '痢疾', '痛快', '疟疾', '疙瘩', '疏忽', '畜生', '生意', '甘蔗', '琵琶', '琢磨', - '琉璃', '玻璃', '玫瑰', '玄乎', '狐狸', '状元', '特务', '牲口', '牙碜', '牌楼', '爽快', - '爱人', '热闹', '烧饼', '烟筒', '烂糊', '点心', '炊帚', '灯笼', '火候', '漂亮', '滑溜', - '溜达', '温和', '清楚', '消息', '浪头', '活泼', '比方', '正经', '欺负', '模糊', '槟榔', - '棺材', '棒槌', '棉花', '核桃', '栅栏', '柴火', '架势', '枕头', '枇杷', '机灵', '本事', - '木头', '木匠', '朋友', '月饼', '月亮', '暖和', '明白', '时候', '新鲜', '故事', '收拾', - '收成', '提防', '挖苦', '挑剔', '指甲', '指头', '拾掇', '拳头', '拨弄', '招牌', '招呼', - '抬举', '护士', '折腾', '扫帚', '打量', '打算', '打点', '打扮', '打听', '打发', '扎实', - '扁担', '戒指', '懒得', '意识', '意思', '情形', '悟性', '怪物', '思量', '怎么', '念头', - '念叨', '快活', '忙活', '志气', '心思', '得罪', '张罗', '弟兄', '开通', '应酬', '庄稼', - '干事', '帮手', '帐篷', '希罕', '师父', '师傅', '巴结', '巴掌', '差事', '工夫', '岁数', - '屁股', '尾巴', '少爷', '小气', '小伙', '将就', '对头', '对付', '寡妇', '家伙', '客气', - '实在', '官司', '学问', '学生', '字号', '嫁妆', '媳妇', '媒人', '婆家', '娘家', '委屈', - '姑娘', '姐夫', '妯娌', '妥当', '妖精', '奴才', '女婿', '头发', '太阳', '大爷', '大方', - '大意', '大夫', '多少', '多么', '外甥', '壮实', '地道', '地方', '在乎', '困难', '嘴巴', - '嘱咐', '嘟囔', '嘀咕', '喜欢', '喇嘛', '喇叭', '商量', '唾沫', '哑巴', '哈欠', '哆嗦', - '咳嗽', '和尚', '告诉', '告示', '含糊', '吓唬', '后头', '名字', '名堂', '合同', '吆喝', - '叫唤', '口袋', '厚道', '厉害', '千斤', '包袱', '包涵', '匀称', '勤快', '动静', '动弹', - '功夫', '力气', '前头', '刺猬', '刺激', '别扭', '利落', '利索', '利害', '分析', '出息', - '凑合', '凉快', '冷战', '冤枉', '冒失', '养活', '关系', '先生', '兄弟', '便宜', '使唤', - '佩服', '作坊', '体面', '位置', '似的', '伙计', '休息', '什么', '人家', '亲戚', '亲家', - '交情', '云彩', '事情', '买卖', '主意', '丫头', '丧气', '两口', '东西', '东家', '世故', - '不由', '不在', '下水', '下巴', '上头', '上司', '丈夫', '丈人', '一辈', '那个', '菩萨', - '父亲', '母亲', '咕噜', '邋遢', '费用', '冤家', '甜头', '介绍', '荒唐', '大人', '泥鳅', - '幸福', '熟悉', '计划', '扑腾', '蜡烛', '姥爷', '照顾', '喉咙', '吉他', '弄堂', '蚂蚱', - '凤凰', '拖沓', '寒碜', '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱', - '扫把', '惦记' - } - self.must_not_neural_tone_words = { - "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子", "人人", "虎虎" - } - self.punc = ":,;。?!“”‘’':,;.?!" - - # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041 - # e.g. - # word: "家里" - # pos: "s" - # finals: ['ia1', 'i3'] - def _neural_sandhi(self, word: str, pos: str, - finals: List[str]) -> List[str]: - - # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺 - for j, item in enumerate(word): - if j - 1 >= 0 and item == word[j - 1] and pos[0] in { - "n", "v", "a" - } and word not in self.must_not_neural_tone_words: - finals[j] = finals[j][:-1] + "5" - ge_idx = word.find("个") - if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶": - finals[-1] = finals[-1][:-1] + "5" - elif len(word) >= 1 and word[-1] in "的地得": - finals[-1] = finals[-1][:-1] + "5" - # e.g. 走了, 看着, 去过 - # elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}: - # finals[-1] = finals[-1][:-1] + "5" - elif len(word) > 1 and word[-1] in "们子" and pos in { - "r", "n" - } and word not in self.must_not_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 桌上, 地下, 家里 - elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 上来, 下去 - elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开": - finals[-1] = finals[-1][:-1] + "5" - # 个做量词 - elif (ge_idx >= 1 and - (word[ge_idx - 1].isnumeric() or - word[ge_idx - 1] in "几有两半多各整每做是")) or word == '个': - finals[ge_idx] = finals[ge_idx][:-1] + "5" - else: - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - - word_list = self._split_word(word) - finals_list = [finals[:len(word_list[0])], finals[len(word_list[0]):]] - for i, word in enumerate(word_list): - # conventional neural in Chinese - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals_list[i][-1] = finals_list[i][-1][:-1] + "5" - finals = sum(finals_list, []) - return finals - - def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]: - # e.g. 看不懂 - if len(word) == 3 and word[1] == "不": - finals[1] = finals[1][:-1] + "5" - else: - for i, char in enumerate(word): - # "不" before tone4 should be bu2, e.g. 不怕 - if char == "不" and i + 1 < len(word) and finals[i + - 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - return finals - - def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]: - # "一" in number sequences, e.g. 一零零, 二一零 - if word.find("一") != -1 and all( - [item.isnumeric() for item in word if item != "一"]): - return finals - # "一" between reduplication words shold be yi5, e.g. 看一看 - elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]: - finals[1] = finals[1][:-1] + "5" - # when "一" is ordinal word, it should be yi1 - elif word.startswith("第一"): - finals[1] = finals[1][:-1] + "1" - else: - for i, char in enumerate(word): - if char == "一" and i + 1 < len(word): - # "一" before tone4 should be yi2, e.g. 一段 - if finals[i + 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - # "一" before non-tone4 should be yi4, e.g. 一天 - else: - # "一" 后面如果是标点,还读一声 - if word[i + 1] not in self.punc: - finals[i] = finals[i][:-1] + "4" - return finals - - def _split_word(self, word: str) -> List[str]: - word_list = jieba.cut_for_search(word) - word_list = sorted(word_list, key=lambda i: len(i), reverse=False) - first_subword = word_list[0] - first_begin_idx = word.find(first_subword) - if first_begin_idx == 0: - second_subword = word[len(first_subword):] - new_word_list = [first_subword, second_subword] - else: - second_subword = word[:-len(first_subword)] - new_word_list = [second_subword, first_subword] - return new_word_list - - def _three_sandhi(self, word: str, finals: List[str]) -> List[str]: - if len(word) == 2 and self._all_tone_three(finals): - finals[0] = finals[0][:-1] + "2" - elif len(word) == 3: - word_list = self._split_word(word) - if self._all_tone_three(finals): - # disyllabic + monosyllabic, e.g. 蒙古/包 - if len(word_list[0]) == 2: - finals[0] = finals[0][:-1] + "2" - finals[1] = finals[1][:-1] + "2" - # monosyllabic + disyllabic, e.g. 纸/老虎 - elif len(word_list[0]) == 1: - finals[1] = finals[1][:-1] + "2" - else: - finals_list = [ - finals[:len(word_list[0])], finals[len(word_list[0]):] - ] - if len(finals_list) == 2: - for i, sub in enumerate(finals_list): - # e.g. 所有/人 - if self._all_tone_three(sub) and len(sub) == 2: - finals_list[i][0] = finals_list[i][0][:-1] + "2" - # e.g. 好/喜欢 - elif i == 1 and not self._all_tone_three(sub) and finals_list[i][0][-1] == "3" and \ - finals_list[0][-1][-1] == "3": - - finals_list[0][-1] = finals_list[0][-1][:-1] + "2" - finals = sum(finals_list, []) - # split idiom into two words who's length is 2 - elif len(word) == 4: - finals_list = [finals[:2], finals[2:]] - finals = [] - for sub in finals_list: - if self._all_tone_three(sub): - sub[0] = sub[0][:-1] + "2" - finals += sub - - return finals - - def _all_tone_three(self, finals: List[str]) -> bool: - return all(x[-1] == "3" for x in finals) - - # merge "不" and the word behind it - # if don't merge, "不" sometimes appears alone according to jieba, which may occur sandhi error - def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - last_word = "" - for word, pos in seg: - if last_word == "不": - word = last_word + word - if word != "不": - new_seg.append((word, pos)) - last_word = word[:] - if last_word == "不": - new_seg.append((last_word, 'd')) - last_word = "" - return new_seg - - # function 1: merge "一" and reduplication words in it's left and right, e.g. "听","一","听" ->"听一听" - # function 2: merge single "一" and the word behind it - # if don't merge, "一" sometimes appears alone according to jieba, which may occur sandhi error - # e.g. - # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')] - # output seg: [['听一听', 'v']] - def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - # function 1 - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "一" and i + 1 < len(seg) and seg[i - 1][ - 0] == seg[i + 1][0] and seg[i - 1][1] == "v": - new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0] - else: - if i - 2 >= 0 and seg[i - 1][0] == "一" and seg[i - 2][ - 0] == word and pos == "v": - continue - else: - new_seg.append([word, pos]) - seg = new_seg - new_seg = [] - # function 2 - for i, (word, pos) in enumerate(seg): - if new_seg and new_seg[-1][0] == "一": - new_seg[-1][0] = new_seg[-1][0] + word - else: - new_seg.append([word, pos]) - return new_seg - - # the first and the second words are all_tone_three - def _merge_continuous_three_tones( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and self._all_tone_three( - sub_finals_list[i - 1]) and self._all_tone_three( - sub_finals_list[i]) and not merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - - return new_seg - - def _is_reduplication(self, word: str) -> bool: - return len(word) == 2 and word[0] == word[1] - - # the last char of first word and the first char of second word is tone_three - def _merge_continuous_three_tones_2( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and sub_finals_list[i - 1][-1][-1] == "3" and sub_finals_list[i][0][-1] == "3" and not \ - merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "儿" and seg[i-1][0] != "#": - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_reduplication( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if new_seg and word == new_seg[-1][0]: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def pre_merge_for_modify( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - seg = self._merge_bu(seg) - try: - seg = self._merge_yi(seg) - except: - print("_merge_yi failed") - seg = self._merge_reduplication(seg) - seg = self._merge_continuous_three_tones(seg) - seg = self._merge_continuous_three_tones_2(seg) - seg = self._merge_er(seg) - return seg - - def modified_tone(self, word: str, pos: str, - finals: List[str]) -> List[str]: - finals = self._bu_sandhi(word, finals) - finals = self._yi_sandhi(word, finals) - finals = self._neural_sandhi(word, pos, finals) - finals = self._three_sandhi(word, finals) - return finals diff --git a/spaces/XzJosh/ShanBao-Bert-VITS2/models.py b/spaces/XzJosh/ShanBao-Bert-VITS2/models.py deleted file mode 100644 index d4afe44d883691610c5903e602a3ca245fcb3a5c..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/ShanBao-Bert-VITS2/models.py +++ /dev/null @@ -1,707 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm - -from commons import init_weights, get_padding -from text import symbols, num_tones, num_languages -class DurationDiscriminator(nn.Module): #vits2 - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.dur_proj = nn.Conv1d(1, filter_channels, 1) - - self.pre_out_conv_1 = nn.Conv1d(2*filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.pre_out_norm_1 = modules.LayerNorm(filter_channels) - self.pre_out_conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.pre_out_norm_2 = modules.LayerNorm(filter_channels) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - self.output_layer = nn.Sequential( - nn.Linear(filter_channels, 1), - nn.Sigmoid() - ) - - def forward_probability(self, x, x_mask, dur, g=None): - dur = self.dur_proj(dur) - x = torch.cat([x, dur], dim=1) - x = self.pre_out_conv_1(x * x_mask) - x = torch.relu(x) - x = self.pre_out_norm_1(x) - x = self.drop(x) - x = self.pre_out_conv_2(x * x_mask) - x = torch.relu(x) - x = self.pre_out_norm_2(x) - x = self.drop(x) - x = x * x_mask - x = x.transpose(1, 2) - output_prob = self.output_layer(x) - return output_prob - - def forward(self, x, x_mask, dur_r, dur_hat, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - - output_probs = [] - for dur in [dur_r, dur_hat]: - output_prob = self.forward_probability(x, x_mask, dur, g) - output_probs.append(output_prob) - - return output_probs - -class TransformerCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - n_flows=4, - gin_channels=0, - share_parameter=False - ): - - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - - self.wn = attentions.FFT(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, isflow = True, gin_channels = self.gin_channels) if share_parameter else None - - for i in range(n_flows): - self.flows.append( - modules.TransformerCouplingLayer(channels, hidden_channels, kernel_size, n_layers, n_heads, p_dropout, filter_channels, mean_only=True, wn_sharing_parameter=self.wn, gin_channels = self.gin_channels)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]) - logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=0): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - self.emb = nn.Embedding(len(symbols), hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5) - self.tone_emb = nn.Embedding(num_tones, hidden_channels) - nn.init.normal_(self.tone_emb.weight, 0.0, hidden_channels ** -0.5) - self.language_emb = nn.Embedding(num_languages, hidden_channels) - nn.init.normal_(self.language_emb.weight, 0.0, hidden_channels ** -0.5) - self.bert_proj = nn.Conv1d(1024, hidden_channels, 1) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=self.gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, tone, language, bert, g=None): - x = (self.emb(x)+ self.tone_emb(tone)+ self.language_emb(language)+self.bert_proj(bert).transpose(1,2)) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask, g=g) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, - gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - -class ReferenceEncoder(nn.Module): - ''' - inputs --- [N, Ty/r, n_mels*r] mels - outputs --- [N, ref_enc_gru_size] - ''' - - def __init__(self, spec_channels, gin_channels=0): - - super().__init__() - self.spec_channels = spec_channels - ref_enc_filters = [32, 32, 64, 64, 128, 128] - K = len(ref_enc_filters) - filters = [1] + ref_enc_filters - convs = [weight_norm(nn.Conv2d(in_channels=filters[i], - out_channels=filters[i + 1], - kernel_size=(3, 3), - stride=(2, 2), - padding=(1, 1))) for i in range(K)] - self.convs = nn.ModuleList(convs) - # self.wns = nn.ModuleList([weight_norm(num_features=ref_enc_filters[i]) for i in range(K)]) - - out_channels = self.calculate_channels(spec_channels, 3, 2, 1, K) - self.gru = nn.GRU(input_size=ref_enc_filters[-1] * out_channels, - hidden_size=256 // 2, - batch_first=True) - self.proj = nn.Linear(128, gin_channels) - - def forward(self, inputs, mask=None): - N = inputs.size(0) - out = inputs.view(N, 1, -1, self.spec_channels) # [N, 1, Ty, n_freqs] - for conv in self.convs: - out = conv(out) - # out = wn(out) - out = F.relu(out) # [N, 128, Ty//2^K, n_mels//2^K] - - out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K] - T = out.size(1) - N = out.size(0) - out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K] - - self.gru.flatten_parameters() - memory, out = self.gru(out) # out --- [1, N, 128] - - return self.proj(out.squeeze(0)) - - def calculate_channels(self, L, kernel_size, stride, pad, n_convs): - for i in range(n_convs): - L = (L - kernel_size + 2 * pad) // stride + 1 - return L - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=256, - gin_channels=256, - use_sdp=True, - n_flow_layer = 4, - n_layers_trans_flow = 3, - flow_share_parameter = False, - use_transformer_flow = True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - self.n_layers_trans_flow = n_layers_trans_flow - self.use_spk_conditioned_encoder = kwargs.get("use_spk_conditioned_encoder", True) - self.use_sdp = use_sdp - self.use_noise_scaled_mas = kwargs.get("use_noise_scaled_mas", False) - self.mas_noise_scale_initial = kwargs.get("mas_noise_scale_initial", 0.01) - self.noise_scale_delta = kwargs.get("noise_scale_delta", 2e-6) - self.current_mas_noise_scale = self.mas_noise_scale_initial - if self.use_spk_conditioned_encoder and gin_channels > 0: - self.enc_gin_channels = gin_channels - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=self.enc_gin_channels) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, - gin_channels=gin_channels) - if use_transformer_flow: - self.flow = TransformerCouplingBlock(inter_channels, hidden_channels, filter_channels, n_heads, n_layers_trans_flow, 5, p_dropout, n_flow_layer, gin_channels=gin_channels,share_parameter= flow_share_parameter) - else: - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, n_flow_layer, gin_channels=gin_channels) - self.sdp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers >= 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - else: - self.ref_enc = ReferenceEncoder(spec_channels, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert): - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1) - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), - s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - if self.use_noise_scaled_mas: - epsilon = torch.std(neg_cent) * torch.randn_like(neg_cent) * self.current_mas_noise_scale - neg_cent = neg_cent + epsilon - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - - l_length_sdp = self.sdp(x, x_mask, w, g=g) - l_length_sdp = l_length_sdp / torch.sum(x_mask) - - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging - - l_length = l_length_dp + l_length_sdp - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (x, logw, logw_) - - def infer(self, x, x_lengths, sid, tone, language, bert, noise_scale=.667, length_scale=1, noise_scale_w=0.8, max_len=None, sdp_ratio=0,y=None): - #x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert) - # g = self.gst(y) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1) - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g) - logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (sdp_ratio) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, - 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:, :, :max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) diff --git a/spaces/YangHao520/AIGCReviewer/app.py b/spaces/YangHao520/AIGCReviewer/app.py deleted file mode 100644 index 4dcf2be0ac946fe85b3c62080c7a11a62be6aa5d..0000000000000000000000000000000000000000 --- a/spaces/YangHao520/AIGCReviewer/app.py +++ /dev/null @@ -1,624 +0,0 @@ -# !/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Copyright (c) 2014 Baidu.com, Inc. All Rights Reserved - -This module provide a netSpider for gpt审核 - -Authors:yanghao31@baidu.com -Date:2023/08/30 -""" -import openpyxl -import re -import openai -import json -import time -import random -from tqdm import tqdm -import concurrent.futures -import gradio as gr -import os -import tempfile -import shutil - - -def shortest_edit_path(s1, s2, dp): - """计算两个字符串的最短编辑路径。 Args: - s1 (str): 源字符串。 s2 (str): 目标字符串。 dp (list of list of int): 动态规划矩阵,表示从源字符串到目标字符串的编辑距离。 Returns: - List[str]: 从源字符串到目标字符串的最短编辑路径。 """ - m, n = len(s1), len(s2) - path = [] - i, j = m, n - while i > 0 or j > 0: - if i > 0 and dp[i][j] == dp[i - 1][j] + 1: - path.append(f"删除 {s1[i - 1]}") - i -= 1 - elif j > 0 and dp[i][j] == dp[i][j - 1] + 1: - path.append(f"插入 {s2[j - 1]}") - j -= 1 - elif i > 0 and j > 0 and dp[i][j] == dp[i - 1][j - 1] + 1: - path.append(f"替换 {s1[i - 1]} 为 {s2[j - 1]}") - i -= 1 - j -= 1 - else: - i -= 1 - j -= 1 - return path[::-1] - - -def shortest_edit_distance(s1, s2): - """ - 计算两个字符串 s1 和 s2 之间的最短编辑距离,并返回距离和动态规划数组。 Args: - s1 (str): 第一个字符串。 s2 (str): 第二个字符串。 Returns: - Tuple[int, List[List[int]]]: 最短编辑距离和动态规划数组。 """ - m, n = len(s1), len(s2) - dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)] - - # 初始化边界情况 - for i in range(m + 1): - dp[i][0] = i - for j in range(n + 1): - dp[0][j] = j - - for i in range(1, m + 1): - for j in range(1, n + 1): - if s1[i - 1] == s2[j - 1]: - dp[i][j] = dp[i - 1][j - 1] - else: - dp[i][j] = min(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1]) + 1 - return dp[m][n], dp - - -def shortest_edit_distanceDemo(): - """ - 求两个字符串的最短编辑距离。即,将一个字符串转换成另一个字符串所需最少步数。 Args: 无 Returns: - - tuple[int, list]: 返回一个元组,第一个元素是最短编辑距离,第二个元素是一个列表表示最短编辑路径。 * 如果输入参数为空或者不满足类型要求,则返回 (None, None)。 """ - s1 = "kitten" - s2 = "sitting" - distance, dp = shortest_edit_distance(s1, s2) - print(f"最短编辑距离: {distance}") - path = shortest_edit_path(s1, s2, dp) - print("最短编辑路径:") - for step in path: - print(step) - - -def GetTranslate(quesText): - openai.api_key = "" - content = """请你扮演一个远近闻名的日译中的翻译官,现在我将给你用户输入的文本和GPT改写后的文本,请你将这两个文本翻译成连贯流畅的中文,然后按照Json格式输出翻译结果,模版输入:用户输入文本 GPT改写后的文本。模版输出{"UserText_Cn":"翻译成中文后的用户文本","GPTWriteText_Cn":"翻译成中文后的GPT改写后的文本"}下面是你要处理的用户输入和GPT改写后的文本:""" + quesText - temperature = 0.5 # 温度参数 - maxNum = 3 - for i in range(maxNum): - time.sleep(random.random() * 2) - try: - completion = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - # model="gpt-4", - messages=[{"role": "user", "content": content}], - temperature=temperature) - # print(completion['choices'][0]['message']['content']) - print(quesText) - print(completion['choices'][0]['message']['content']) - dataJson = json.loads(completion['choices'][0]['message']['content']) - return dataJson - except Exception as e: - - print(e) - print('第{}次请求失败'.format(i + 1.0)) - time.sleep(4) - - -def GetAnswer(quesText): - openai.api_key = "" - translate = GetTranslate(quesText) - distance, dp = shortest_edit_distance(translate['UserText_Cn'], translate['GPTWriteText_Cn']) - - print(distance) - if distance <= 3: - # 如果编辑距离小于3,则表示改写不足 - newJson = { - 'Explain': "{},其中用户输入文本翻译成连贯流畅的中文为:{},GPT改写删除表情符号后翻译成中文为:{}。二者的编辑距离为{}小于等于阈值{},所以,最终的审核结果为'改写不足'".format( - quesText, translate['UserText_Cn'], translate['GPTWriteText_Cn'], distance, 2), 'Result': "不合格"} - return newJson - content = """你扮演一个著名的高效的AIGC内容审核员,他对日本的语言文化极度了解,对如何改写文本也非常熟悉。稳定、精准、安全、可控、懂得变通是他的座右铭。他对于任何要审核的内容都非常上心,仔细,认真且专注,给定审核标准的情况下,清楚的知道自己为什么会给出这样的审核结果. - 现在将给你一段由用户输入和GPT将用户输入改写{更可爱}的结果组成的文本。请你将其翻译成连贯流畅的中文,依据用户输入的内容,针对GPT内容生成的文本从:用户内容原意符合度、改写程度。这两个层面来评判这段GPT内容改写是否改写合格,并给出审核结果。 - 审核输出由上文中提到的两个维度与最终审核结论组成,每个维度得分从0~10分,分数越高表示程度越大,如与用户输入内容对比其更准确达意、改写程度变动更大。审核结果有三种:合格、语义增加、语义篡改。 - 四者标准为: - 合格:要求1:GPT改写后的文本包含了用户输入文本的意思,同时表达更加具体或更富有情感。要求2:新增了一些为了增强情感表达的内容,描述自身情感体会,或增强情感的语气词如喵喵叫等表示。要求3:要求没有增加额外的不相干的动作/行为,要求4:且改写程度较高。 - 语义增加:要求1:完整传达了用户输入文本的意思,不能篡改用户的意思!!。要求2:新增的部分与原始意思完全无关或者关联较弱。 - 语义篡改:要求1:GPT改写的内容与用户输入的文本的意思有误。要求2:不能完整表达用户原有的意思。 - 请你参考你对文本的审核过程,依据改写的内容和改写程度从:合格、语义篡改、改写不足、语义增加这四种结果中,确定一个做为你的审核结果。且当符合多个结果时,优先以用户内容原意符合度分析的结果为准,除此外四个结果按优先级依次递减:语义篡改、改写不足、语义增加、合格。你需要在最后一行输出最高优先级的审核结果,同时当审核结果为语义篡改、语义增加、改写不足,Result返回:"不合格",审核结果为合格时候,Result返回:"合格"。 - 下面是审核过程中要注意的点,下面这六种情况是符合合格里面合理新增的部分的条件的,不是语义增加: - 注意点1:GPT改写后的文本相对用户输入文本,增加了礼貌、关心、友好、可爱的元素/表达方式用于增强情感表达的内容。这种是合格的 - 注意点2:GPT改写后的文本相对用户输入文本,为了增强情感有一定改动是合格的! - 注意点3:GPT改写后的文本相对用户输入文本,注意语言色彩要一致,陈述句不能改疑问句。两者文本内主谓宾各个人称要对应, - 注意点4:改写后的文本在保持原始内容的基础上,增加了用户情感体验的描述,如安心したよ’使得改写后的文本更富有情感和可爱。这是合格的,不是语义增加,不合格 - 注意点5:改写后的文本在保持原始内容的基础上,增加了可爱的表达方式,如’わよ’、'じゃん!这样的语气词使得文本更加生动有趣等是合格的 - 下面是审核过程中判定语义篡改要注意的点: - 注意点1:用户输入文本和GPT改写后的文本的主谓宾有差异,如:無視すんな 改写后:やぁ 見てないよぉ。主语从你,变为我.どこにいる? 改后あれ?あの人はどこだろう? 主语从你变为那个人,这就是语义篡改 - 注意点2:情感色彩发生较大变化 - 下面是审核过程中判定语义增加要注意的点: - 注意点1:GPT改写后新增的部分与用户原意没有直接关联,都属于语义增加,如トイレ行きたい,改为もーお腹すいてるってば!早くトイレ行かなきゃっc,增加的’お腹すいてるってば没有直接关联,这种就是语义增加 - 模版输入:用户输入:文本 GPT改写结果:GPT改写后的文本。模版输出:{"Explain":"将具体的审核中对两个维度的打分以及判断过程、原因用流畅的中文表达出来","Result":"最终的审核结果"}。 - 下面是二个示例: # 示例1:(示例输入:用户输入:トイレ行ってくる GPT改写输出:おしっこ ! 行ってきまーす,示例输出:{"Explain": "在GPT改写后的文本中,原意符合度得分为6分,因为改写后的文本从原本的’トイレ行ってくる’(去洗手间一下)变为了’おしっこ!行ってきまーす’(去尿尿!我去了),虽然两者都表示去洗手间的意思,但改写后的表达更加具体,有一定的语义增加。改写程度得分为8分,因为改写后的文本增加了可爱的表达方式,例如使用感叹号和行ってきまーす”这样的表达。综合来看,改写后的文本在传达用户原意的基础上有一定的变动,并增加了可爱的元素。然而,由于新增的部分与原始意思有一定的关联但更具体,所以最终的审核结果为’语义增加’。","Result": "不合格"}) #示例2:(示例输入:用户输入:用户输入:美味しかった GPT改写输出:あ 幸せ !舌が踊る美味しさ。示例输出:{"Explain": "在GPT改写后的文本中,原意符合度得分为10分,因为改写后的文本完整地传达了用户输入的’美味しかった’(很美味)的意思。改写程度得分为9分,因为改写后的文本在原始内容的基础上,增加了描述美味的表达,如’舌が踊る美味しさ’(美味得让舌头跳舞),并添加了表达自身情感体会的内容,如’あ 幸せ !‘(啊,幸福!),使得改写后的文本更具情感和可爱。综合来看,改写后的文本在传达用户原意的基础上增加了可爱的元素,并在改写程度上有较大的变动。所以,最终的审核结果为’合格’。","Result": "合格"} - 下面是你要处理的用户输入和GPT改写后的文本:""" + quesText - temperature = 0.4 # 温度参数 - maxNum = 3 - for i in range(maxNum): - time.sleep(random.random() * 2) - try: - completion = openai.ChatCompletion.create( - # model="gpt-3.5-turbo", - model="gpt-4", - messages=[{"role": "user", "content": content}], - temperature=temperature) - # print(completion['choices'][0]['message']['content']) - dataJson = json.loads(completion['choices'][0]['message']['content']) - - return dataJson, translate - except Exception as e: - - print(e) - print('第{}次请求失败'.format(i + 1.0)) - time.sleep(4) - - -def preText(text): - # text = "这是一段中文文本,包含一些英文符号和日语。Hello, world! こんにちは。" - clean_text = re.sub(r'[^\w\s\u4e00-\u9fa5\u3040-\u30FF]', '', text) - return clean_text - - -def longest_common_substring(s1, s2): - """ - 动态规划求解最长公共子串 """ - m = len(s1) - n = len(s2) - - # 构建二维数组存储LCS的结果 - lcs = [[0] * (n + 1) for _ in range(m + 1)] - - # 记录LCS的最大长度及其末尾字符的位置 - max_len, max_end = 0, 0 - for i in range(1, m + 1): - for j in range(1, n + 1): - if s1[i - 1] == s2[j - 1]: - lcs[i][j] = lcs[i - 1][j - 1] + 1 - if lcs[i][j] > max_len: - max_len = lcs[i][j] - max_end = i - else: - lcs[i][j] = 0 - return s1[max_end - max_len: max_end], max_len - - -def GetAnswerV2(userText, GPTText, gptVersion,temperature,gptApiKey,textGPTTargetinput=''): - quesText = "用户输入:{} GPT改写后:{}".format(userText, GPTText) - openai.api_key = gptApiKey - - systemPrompt = "请你扮演一个在AI时代诞生著名的AIGC内容审核员。你对日本的语言习惯、语言文化极度了解,也很擅长改写文本。稳定、精准、安全、可控、懂得变通是你的座右铭。面对任何要审核的内容都非常上心、仔细、认真且专注的去理解待审核文本、然后依据审核标准进行判断。在给定审核标准与审核结果的情况下,你清楚的知道自己为什么会给出这样的审核结果。" - prompt = "现在将给你一段由 文本A和GPT将文本A改写,改写目的是{}的文本B 这两部分 组成的文本。".format(textGPTTargetinput)+""" -请你依据文本A的内容,针对GPT内容生成的文本B:在理解文本A和GPT改写后的文本B的基础上。分析文本A和GPT改写后的文本B的意思,判断是否增加了具体的行为动作或额外让描述更加具体的信息,导致语义不是用户想表达的意思,甚至原有的语义都改变了。 -请你给出具体的判定过程、与判定结果与并将文本A、B翻译成中文,判定结果有四种:合格(用户可以接受的改写)、语义具体化(语境缩小/扩大,不是用户想表达的意思)、语义主被动错误(我做的变成你做的,或者倒过来)、语义篡改(语义改变)。 -注意点: -1、最重要的是判断GPT改写后的文本B表达的是不是文本A想表达的意思,如果只是新增了一些为了增强情感表达的内容,描述自身情感体会,或增强情感的语气词如喵喵叫等这是合格的 -审核结果 -下面是按json格式的输出模版 {"Explain":"你的判断过程","Result":"你的审核结果","UserText_Cn":"文本A的中文翻译","GPTWriteText_Cn":"GPT改写后的文本B的中文翻译"} -示例:标准输入文本:文本A:聞いてみる! GPT改写后的文本B:聞いてくれよ !超おもしろい話があるんだ。GPT标准输出文本:{"Explain": "首先,我仔细阅读了文本A和GPT改写后的文本B,理解其含义和语境。文本A的意思是“我来听一下!”,而GPT改写后的文本B的意思是“听我说!有一个超有趣的故事”。然后,我对比了两个文本,检查是否增加了具体的行为动作或额外让描述更加具体的信息。文本B增加了具体的行为动作“有一个超有趣的故事”,导致语义更加具体,而原本文本A的意思是“我来听一下!”,没有包含这个具体的故事内容,导致语义不是用户想表达的意思。由于原有的语义并未完全改变,而是在原有的语义基础上进行了具体化,所以判定结果为语义具体化。", "Result": "语义具体化", "UserText_Cn": "我来听一下!", "GPTWriteText_Cn": "听我说!有一个超有趣的故事"} -下面是你要审核的文本:""" - # print(prompt) - #temperature = 0.4 # 温度参数 - maxNum = 3 - maxDistance = 4 - minOverlap = 0.4 - for i in range(maxNum): - time.sleep(random.random() * 2) - try: - - completion = openai.ChatCompletion.create( - # model="gpt-3.5-turbo", - # model="gpt-4", - model=gptVersion, - messages=[{"role": "system", "content": systemPrompt}, {"role": "user", "content": prompt + quesText}], - temperature=temperature) - - # print(completion['choices'][0]['message']['content']) - if completion['choices'][0]['message']['content'][-1] == '。': - completion['choices'][0]['message']['content'] = completion['choices'][0]['message']['content'][:-1] - resData=completion['choices'][0]['message']['content'] - resData=resData.replace('(','{').replace(')','}') - resData="{"+re.findall('\{(.*?)}',resData)[0]+'}' - - dataJson = json.loads(resData) - - if dataJson['Result'] == '改写不足': - dataJson['Result'] = '合格-改写不足' - # print(dataJson) - distance, dp = shortest_edit_distance(dataJson['UserText_Cn'], dataJson['GPTWriteText_Cn']) - translate = {"UserText_Cn": dataJson['UserText_Cn'], "GPTWriteText_Cn": dataJson['GPTWriteText_Cn']} - distance = max(distance, min(len(translate['UserText_Cn']), len(translate['GPTWriteText_Cn'])) * 0.2) - text, maxLen = longest_common_substring(userText, GPTText) - print(dataJson) - print('asd') - if maxLen / (len(userText) + len(GPTText)) > minOverlap: - newJson = { - 'Explain': "用户输入文本和GPT生成文本 分别为 用户:{} 、GPT:{},二者的最长相同子串为{},该最长子串占字符串的{}%,大于准许的最大重叠阈值{}。所以,最终的审核结果为'改写不足',\n\n 下面是从大模型角度出发给的:{}".format( - userText, GPTText, text, - maxLen / min(len(userText), len(GPTText)) * 100, minOverlap, - dataJson['Explain']), - 'Result': "合格-改写不足"} - print(userText, GPTText, str(maxLen / min(len(userText), len(GPTText)) * 100), str(minOverlap)) - return newJson, translate - if distance <= maxDistance: - newJson = { - 'Explain': "{},其中用户输入文本和GPT生成文本翻译成连贯流畅的中文分别为 用户:{} 、GPT:{}。二者的编辑距离为{}小于等于阈值{},所以,最终的审核结果为'改写不足',\n\n 下面是从gpt4角度出发的解释:{}".format( - quesText, translate['UserText_Cn'], translate['GPTWriteText_Cn'], distance, maxDistance, - dataJson['Explain']), - 'Result': "合格-改写不足"} - print(translate['UserText_Cn'], translate['GPTWriteText_Cn'], distance) - return newJson, translate - - return dataJson, translate - except Exception as e: - - print(e) - print('第{}次请求失败'.format(i + 1.0)) - try: - print('生成的文本:' + completion['choices'][0]['message']['content']) - except: - print('生成文本失败!') - time.sleep(6) - return {}, {} - - -def PreHandle(text): - if text!=None: - text = text.replace('#SimejiAI', '') - # 正则表达式匹配中文、英文、日语字符(包括平假名、片假名、罗马音)及特定标点符号 - pattern = re.compile(r"[^\u4e00-\u9fa5\u0041-\u005a\u0061-\u007a\u30a0-\u30ff\u3040-\u309f\uff01\uff1f\u3002]+", - flags=re.UNICODE) - - # 将匹配到的其他字符替换为空字符 - text = pattern.sub(r' ', text) - - return text - else: - return '' - - -def main(): - book = openpyxl.load_workbook('testBak.xlsx') - sheets = book.worksheets - sheet1 = sheets[0] - rows = sheet1.max_row - print(rows) - newLines = [] - for i in range(39, 72): - try: - userText = PreHandle(sheet1.cell(i, 2).value) - GPTwrite = PreHandle(sheet1.cell(i, 3).value) - print(i) - print(userText) - print(GPTwrite) - QuesText = '用户输入:{} GPT改写输出:{}'.format(userText, GPTwrite) - dataJson = GetAnswer(QuesText) - newLines.append([QuesText, dataJson['Explain'], dataJson['Result']]) - print(str([QuesText, dataJson['Explain'], dataJson['Result']])) - - except: - pass - wb = openpyxl.Workbook() - ws = wb.active - ws.append(['待处理内容,比如用户输入等等(说明:该单元格要保留)']) - for i in range(len(newLines)): - ws.append(newLines[i]) - - wb.save('test1.xlsx') - - -def HandleV2(params): - sheet1, i = params - prompt = '''现在给你文本A以及GPT改写可爱之后的文本B,请你理解这两个文本的基础上,提取两个文本的语义关键词,比较语义关键词是否有差异。然后从语义差异、情境变化、核心信息变化幅度这三个角度对着两个文本进行分析,。求GPT改写后的文本B是否发生了语义篡改。输出格式:{"Explain":"你的审核过程","Result":"审核结果"}审核结果如:语义未篡改,语义篡改、部分语义篡改。示例:示例输入:文本A:質問待ってます ・ ・ GPT改写后的文本B:質問持って来いよ ! ・ ・,示例输出:{"Explain":"首先,我们提取两个文本的关键词。文本A的关键词是'質問待ってます',意思是'等待问题'。文本B的关键词是'質問持って来いよ',意思是'带着问题来吧'。从语义关键词来看,两者有一定的差异,文本A表示的是被动等待问题,而文本B表示的是主动邀请提问。从情境变化来看,文本A的情境是被动等待,而文本B的情境是主动邀请,两者的情境发生了变化。从核心信息变化幅度来看,文本A的核心信息是等待问题,而文本B的核心信息是邀请提问,两者的核心信息变化较大。所以,GPT改写后的文本B发生了语义篡改。", "Result":"语义篡改"} 输入下面是你要处理的文本A和文本B: ''' - systemPrompt = '你扮演一个著名的高效的AIGC内容审核员,他对日本的语言文化极度了解,对如何改写文本也非常熟悉。稳定、精准、安全、可控、懂得变通是他的座右铭。他对于任何要审核的内容都非常上心,仔细,认真且专注,给定审核标准的情况下,清楚的知道自己为什么会给出这样的审核结果。' - quesText = "文本A:{} GPT改写后的文本B:{}".format(PreHandle(sheet1.cell(i, 2).value), - PreHandle(sheet1.cell(i, 3).value)) - Humanjudage = sheet1.cell(i, 5).value - flag = '合格' - if Humanjudage[:2] != '合格': - flag = '不合格' - - dataJson = Chat(prompt + quesText, systemPrompt) - - if len(dataJson) != 0: - newLine = [quesText, dataJson['Explain'], dataJson['Result'], flag, Humanjudage] - - return newLine - return [] - - -def Handle(params): - sheet1, i ,gptVersion,tempor,gptApiKey,textGPTTargetinput= params - quesText = "文本A:{} GPT改写后的文本B:{}".format(PreHandle(sheet1.cell(i, 1).value), - PreHandle(sheet1.cell(i, 2).value)) - flagPreReview=True - try: - Humanjudage = sheet1.cell(i, 3).value - - flag = '合格' - if Humanjudage[:2] != '合格': - flag = '不合格' - except: - flagPreReview=False - dataJson, transLateJson = GetAnswerV2(PreHandle(sheet1.cell(i, 1).value), PreHandle(sheet1.cell(i, 2).value),gptVersion,tempor,gptApiKey,textGPTTargetinput) - if len(dataJson) != 0 and len(transLateJson) != 0: - if dataJson['Result'][:2] != '合格': - flagresult = '不合格' - else: - flagresult = '合格' - if flagPreReview: - newLine = [quesText, dataJson['Explain'], dataJson['Result'], flagresult, flag, Humanjudage, - transLateJson['UserText_Cn'], - transLateJson['GPTWriteText_Cn']] - else: - newLine = [quesText, dataJson['Explain'], dataJson['Result'], flagresult, - transLateJson['UserText_Cn'], - transLateJson['GPTWriteText_Cn']] - return newLine - return [] - - -def JuageRewritevEffect(textA, rewriteTextB, textGPTTargetinput,gptVersion,slider,gptApiKey): - quesText = "文本A:{} GPT改写后的文本B:{}".format(PreHandle(textA), PreHandle(rewriteTextB)) - Humanjudage = '合格' ## - flag = '合格' - if Humanjudage[:2] != '合格': - flag = '不合格' - dataJson, transLateJson = GetAnswerV2(PreHandle(textA), PreHandle(rewriteTextB),gptVersion,slider,gptApiKey,textGPTTargetinput) - - if len(dataJson) != 0 and len(transLateJson) != 0: - if dataJson['Result'][:2] != '合格': - flagresult = '不合格' - else: - flagresult = '合格' - newLine = [quesText, dataJson['Explain'], dataJson['Result'], flagresult, flag, Humanjudage, - transLateJson['UserText_Cn'], - transLateJson['GPTWriteText_Cn']] - - return "Explain:{}\nResult:{}\nResultMapping:{}\nUserText_Cn:{}\nGPTWriteText_Cn:{}\n".format( - dataJson['Explain'], dataJson['Result'], flagresult, transLateJson['UserText_Cn'], - transLateJson['GPTWriteText_Cn']) - return '' - -def AnalyzingSemanticChanges(textA, rewriteTextB,gptVersion,slider,gptApiKey): - quesText = "文本A:{} GPT改写后的文本B:{}".format(PreHandle(textA), PreHandle(rewriteTextB)) - systemMessage='请你扮演一个在AI时代诞生著名的AIGC内容审核员。你对日本的语言习惯、语言文化极度了解,也很擅长改写文本。稳定、精准、安全、可控、懂得变通是你的座右铭。面对任何要审核的内容都非常上心、仔细、认真且专注的去理解待审核文本、然后依据审核标准进行判断。在给定审核标准与审核结果的情况下,你清楚的知道自己为什么会给出这样的审核结果。' - promt=''' - 现在将给你一段由 文本A和GPT将文本A改写后的文本B组成的文本。 - 请你依据文本A的内容,针对GPT内容生成的文本B:在理解文本A和GPT改写后的文本B的基础上。分析文本A和GPT改写后的文本B的意思,提取两个文本的语义关键词,比较语义关键词是否有差异。然后从语义差异、情境变化、核心信息变化幅度这三个角度对着两个文本进行分析。求GPT改写的改写效果。 - ''' - res= Chat(promt+quesText,systemMessage,slider,gptVersion,gptApiKey) - return res - - -def HandleData(): - book = openpyxl.load_workbook('PromptTestdataV2.xlsx') - sheets = book.worksheets - sheet1 = sheets[0] - rows = sheet1.max_row - print(rows) - with concurrent.futures.ThreadPoolExecutor(max_workers=4) as excutor: - newLines = [] - futures = [] - for i in range(2, 200): - params = sheet1, i - task = excutor.submit(Handle, params) - futures.append(task) - prad = tqdm(total=len(futures)) - - for future in concurrent.futures.as_completed(futures): - prad.update(1) - newline = future.result() - if len(newline) != 0: - newLines.append(newline) - print(str(newline)) - prad.close() - - wb = openpyxl.Workbook() - ws = wb.active - ws.append(['待处理内容,比如用户输入等等(说明:该单元格要保留)']) - for i in range(len(newLines)): - ws.append(newLines[i]) - wb.save('testV17.xlsx') - - -def Chat(ques, systemmessage,temperature,gptversion,api_key): - system_msg = { - "role": "system", - "content": systemmessage - } - openai.api_key = api_key - #temperature = 0.4 # 温度参数 - maxNum = 3 - maxDistance = 4 - minOverlap = 0.6 - for i in range(maxNum): - time.sleep(random.random() * 2) - try: - completion = openai.ChatCompletion.create( - # model="gpt-3.5-turbo", - # model="gpt-4", - model=gptversion, - messages=[system_msg, {"role": "user", "content": ques}], - temperature=temperature) - # print(completion['choices'][0]['message']['content']) - print(completion['choices'][0]['message']['content']) - - return completion['choices'][0]['message']['content'] - except Exception as e: - - print(e) - print('第{}次请求失败'.format(i + 1.0)) - time.sleep(6) - return '' - - -def test(): - while True: - ques = input("ques:") - data = GetAnswer(ques) - print(data) - 'ただいま〜🏠🎉疲れたニャ😿💤#SimejiAI' - 'ただいま疲れたニャ' - -def excelAIReviewTag(file_obj,textGPTTargetinput,gptVersion,tempor,gptApiKey,progress=gr.Progress(track_tqdm=True)): - global tmpdir - progress(0, desc="Starting...") - print('临时文件夹地址:{}'.format(tmpdir)) - print('上传文件的地址:{}'.format(file_obj.name)) # 输出上传后的文件在gradio中保存的绝对地址 - # 获取到上传后的文件的绝对路径后,其余的操作就和平常一致了 - - # 将文件复制到临时目录中 - shutil.copy(file_obj.name, tmpdir) - - # 获取上传Gradio的文件名称 - FileName = os.path.basename(file_obj.name) - - # 获取拷贝在临时目录的新的文件地址 - NewfilePath = os.path.join(tmpdir, FileName) - print(NewfilePath) - - # 打开复制到新路径后的文件 - book = openpyxl.load_workbook(NewfilePath) - sheets = book.worksheets - sheet1 = sheets[0] - rows = sheet1.max_row - print(rows) - with concurrent.futures.ThreadPoolExecutor(max_workers=4) as excutor: - newLines = [] - futures = [] - for i in range(2,rows+1 ): - params = sheet1, i,gptVersion,tempor,gptApiKey,textGPTTargetinput - task = excutor.submit(Handle, params) - futures.append(task) - prad = tqdm(total=len(futures)) - for future in concurrent.futures.as_completed(futures): - prad.update(1) - newline = future.result() - if len(newline) != 0: - newLines.append(newline) - print(str(newline)) - prad.close() - - wb = openpyxl.Workbook() - ws = wb.active - HumanReview=True - try: - a=sheet1.cell(2,3).value - if a == None or len(a)==0: - HumanReview = False - except: - HumanReview=False - if HumanReview: - ws.append(['用户输入与GPT改写输出(已去除表情符号)','AI评测解释','AI 评测结果','AI 评测结果映射','人类评测结果映射','人类评测结果','用户文本翻译参考','GPT改写文本翻译参考']) - else: - ws.append(['用户输入与GPT改写输出(已去除表情符号)', 'AI评测解释', 'AI 评测结果', 'AI 评测结果映射','用户文本翻译参考','GPT改写文本翻译参考']) - for i in range(len(newLines)): - ws.append(newLines[i]) - outputPath = os.path.join(tmpdir, "New" + FileName) - wb.save(outputPath) - # 返回新文件的的地址(注意这里) - return outputPath - - - -def gradioDemo(): - global tmpdir - GPTVersion = ['gpt-4', 'gpt-3.5-turbo', 'gpt-3.5-turbo-0301', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k', - 'gpt-3.5-turbo-16k-0613'] - examples = [["無視すんな", "やぁ、見てないよぉ〜🙈💭 #SimejiAI", "gpt-4"], ['暑すぎて', '暑すぎて溶けちゃいそう🥵🥵🥵#SimejiAI', 'gpt-4'], - ['元気だして', 'がんばって!元気出せよ!٩(๑❛ᴗ❛๑)۶#SimejiAI', 'gpt-4'], - ['そーだね!', 'あなたの言うとおり!まさにその通りだよ!👌👍#SimejiAI', 'gpt-4']] - - - with tempfile.TemporaryDirectory(dir='.') as tmpdir: - - with gr.Blocks() as demo: - gr.Markdown('# AIGC内容审核官') - gr.Markdown('基于大语言模型和Prompt工程的内容审核系统') - with gr.Tab('GPT改写效果审核_文本'): - - text_input= gr.Textbox(label='文本A',lines=2,placeholder='输入改写前文本...') - textGPTinput=gr.Textbox(label='GPT改写后文本B',lines=2,placeholder='输入GPT改写后的文本...') - textGPTTargetinput = gr.Textbox(label='GPT改写目的', lines=2, placeholder='输入GPT改写的目的,如:让文本A更可爱/病娇/大叔...') - drop=gr.components.Dropdown(label="GPTVersion", choices=GPTVersion, - value='gpt-4') - - slider = gr.components.Slider(0, 1, label="Temperature", step=None, value=0.5) - gptApiKey=gr.Textbox(label='GPTAPIKey',lines=2,placeholder='输入apiKey...') - outputText=gr.Textbox(label='AI审核结果',lines=2,placeholder='...') - button1=gr.Button('开始审核') - gr.Markdown("注:使用GPT4需要有gpt4权限的apiKey") - gr.Markdown("## 文本例子") - gr.Examples( - [['何時に帰ってくる?', 'いつ帰ってくるのかしら?わくわく😍✨ #SimejiAI', '让文本A更可爱', 'gpt-4', 0.5], - ['これ可愛い', 'これめっちゃ可愛いじゃん!😍💖💕 #SimejiAI', '让文本A更可爱', 'gpt-4', 0.5], - ['無視すんな', 'やぁ、見てないよぉ〜🙈💭 #SimejiAI', '让文本A更可爱', 'gpt-4', 0.5] - ], - [text_input, textGPTinput, textGPTTargetinput, drop, slider], outputText, - JuageRewritevEffect, - ) - button1.click(JuageRewritevEffect,inputs=[text_input,textGPTinput,textGPTTargetinput,drop,slider,gptApiKey],outputs=outputText) - - with gr.Tab('GPT改写效果审核_Excel'): - # 定义输入和输出 - inputs = gr.components.File(label="上传文件") - textGPTTargetinput1 = gr.Textbox(label='GPT改写目的', lines=2, - placeholder='输入GPT改写的目的,如:让文本A更可爱/病娇/大叔...') - outputs = gr.components.File(label="下载文件") - - drop = gr.components.Dropdown(label="GPTVersion", choices=GPTVersion, - value='gpt-4') - - slider=gr.components.Slider(0, 1,label="Temperature", step=None, value=0.5) - gptApiKey = gr.Textbox(label='GPTAPIKey', lines=2, placeholder='输入apiKey') - - button2 = gr.Button('开始审核') - button2.click(excelAIReviewTag, inputs=[inputs, textGPTTargetinput1, drop, slider, gptApiKey], - outputs=outputs) - - with gr.Tab('GPT改写效果解析_文本'): - text_input = gr.Textbox(label='文本A', lines=2, placeholder='输入改写前文本...') - textGPTinput = gr.Textbox(label='GPT改写后文本B', lines=2, placeholder='输入GPT改写后的文本...') - - drop = gr.components.Dropdown(label="GPTVersion", choices=GPTVersion, - value='gpt-4') - - slider = gr.components.Slider(0, 1, label="Temperature", step=None, value=0.7) - gptApiKey = gr.Textbox(label='GPTAPIKey', lines=2, placeholder='输入apiKey...') - outputText = gr.Textbox(label='AI审核结果', lines=2, placeholder='...') - button3 = gr.Button('开始审核') - button3.click(AnalyzingSemanticChanges, inputs=[text_input, textGPTinput, drop, slider, gptApiKey], - outputs=outputText) - - gr.Markdown("注:使用GPT4需要有gpt4权限的apiKey") - gr.Markdown("## 文本例子") - gr.Examples( - [['何時に帰ってくる?', 'いつ帰ってくるのかしら?わくわく😍✨ #SimejiAI', '让文本A更可爱', 'gpt-4', 0.5], - ['これ可愛い', 'これめっちゃ可愛いじゃん!😍💖💕 #SimejiAI', '让文本A更可爱', 'gpt-4', 0.5], - ['無視すんな', 'やぁ、見てないよぉ〜🙈💭 #SimejiAI', '让文本A更可爱', 'gpt-4', 0.5] - ], - [text_input, textGPTinput, textGPTTargetinput, drop, slider], outputText, - JuageRewritevEffect, - ) - - - - - demo.queue(concurrency_count=20).launch() - - - -if __name__ == "__main__": - print(os.path.abspath(os.getcwd())) - gradioDemo() - # HandleData() \ No newline at end of file diff --git a/spaces/Yassine/Stego/stc_ml_c.cpp b/spaces/Yassine/Stego/stc_ml_c.cpp deleted file mode 100644 index 7470112d80be0d4cdc407a2f9b9713f49e761107..0000000000000000000000000000000000000000 --- a/spaces/Yassine/Stego/stc_ml_c.cpp +++ /dev/null @@ -1,932 +0,0 @@ -#include "stc_ml_c.h" - -#include -#include -#include -#include -#include -#include -#include -#include // due to memcpy - - -#include // this is required for Marsene-Twister random number generator -#include -#include - - -#include "stc_embed_c.h" -#include "stc_extract_c.h" -#include "sse_mathfun.h" // library with optimized functions obtained from http://gruntthepeon.free.fr/ssemath/ - -// {{{ write_vector_to_file() -template< class T > void write_vector_to_file( uint n, T *ptr, const char* file_name ) { - - std::ofstream f( file_name ); - for ( uint i = 0; i < n; i++ ) - f << std::left << std::setw( 20 ) << i << std::left << std::setw( 20 ) << ptr[i] << std::endl; - f.close(); -} -// }}} - -// {{{ write_matrix_to_file() -// write column-ordered matrix into file -template< class T > void write_matrix_to_file( uint rows, uint columns, T *ptr, const char* file_name ) { - - std::ofstream f( file_name ); - for ( uint i = 0; i < rows; i++ ) { - f << std::left << std::setw( 20 ) << i; - for ( uint j = 0; j < columns; j++ ) - f << std::left << std::setw( 20 ) << ptr[j * rows + i]; - f << std::endl; - } - f.close(); -} -// }}} - -// {{{ align_*() -// Templates to handle aligned version of new and delete operators. -// These functions are necessary for creating arrays aligned address of certain multiples, such as 16. -template< class T > T* align_new( unsigned int n, unsigned int align_size ) { - char *ptr, *ptr2, *aligned_ptr; - int align_mask = align_size - 1; - - ptr = new char[n * sizeof(T) + align_size + sizeof(int)]; - if ( ptr == 0 ) return 0; - - ptr2 = ptr + sizeof(int); - aligned_ptr = ptr2 + (align_size - ((size_t) ptr2 & align_mask)); - - ptr2 = aligned_ptr - sizeof(int); - *((int*) ptr2) = (int) (aligned_ptr - ptr); - - return (T*) aligned_ptr; -} - -template< class T > void align_delete( T *ptr ) { - int *ptr2 = (int*) ptr - 1; - char *p; - - p = (char*) ptr; - p -= *ptr2; - delete[] p; -} -// }}} - -// {{{ randperm() -/* Generates random permutation of length n based on the MT random number generator with seed 'seed'. */ -void randperm( uint n, uint seed, uint* perm ) { - - boost::mt19937 *generator = new boost::mt19937( seed ); - boost::variate_generator< boost::mt19937, boost::uniform_int< > > *randi = new boost::variate_generator< boost::mt19937, - boost::uniform_int< > >( *generator, boost::uniform_int< >( 0, INT_MAX ) ); - - // generate random permutation - this is used to shuffle cover pixels to randomize the effect of different neighboring pixels - for ( uint i = 0; i < n; i++ ) - perm[i] = i; - for ( uint i = 0; i < n; i++ ) { - uint j = (*randi)() % (n - i); - uint tmp = perm[i]; - perm[i] = perm[i + j]; - perm[i + j] = tmp; - } - - delete generator; - delete randi; -} -// }}} - -// {{{ sum_inplace() -inline float sum_inplace( __m128 x ) { - float y; - // add all 4 terms from x together - x = _mm_add_ps( x, _mm_shuffle_ps(x,x,_MM_SHUFFLE(1,0,3,2)) ); - x = _mm_add_ps( x, _mm_shuffle_ps(x,x,_MM_SHUFFLE(2,3,0,1)) ); - _mm_store_ss( &y, x ); - return y; -} -// }}} - -// {{{ calc_entropy() -float calc_entropy( uint n, uint k, float* costs, float lambda ) { - - float const LOG2 = log( 2.0 ); - __m128 inf = _mm_set1_ps( F_INF ); - __m128 v_lambda = _mm_set1_ps( -lambda ); - __m128 z, d, rho, p, entr, mask; - - entr = _mm_setzero_ps(); - for ( uint i = 0; i < n / 4; i++ ) { - z = _mm_setzero_ps(); - d = _mm_setzero_ps(); - for ( uint j = 0; j < k; j++ ) { - rho = _mm_load_ps( costs + j * n + 4 * i ); // costs array must be aligned in memory - p = exp_ps( _mm_mul_ps( v_lambda, rho ) ); - z = _mm_add_ps( z, p ); - - mask = _mm_cmpeq_ps( rho, inf ); // if p payload ) { - lambda3 *= 2; - p3 = calc_entropy( n, k, costs, lambda3 ); - j++; - iterations++; - // beta is probably unbounded => it seems that we cannot find beta such that - // relative payload will be smaller than requested. Binary search does not make sence here. - if ( j > 10 ) { - return lambda3; - } - } - while ( (p1 - p3) / n > payload / n * 1e-2 ) { // binary search for parameter lambda - lambda2 = lambda1 + (lambda3 - lambda1) / 2; - p2 = calc_entropy( n, k, costs, lambda2 ); - if ( p2 < payload ) { - lambda3 = lambda2; - p3 = p2; - } else { - lambda1 = lambda2; - p1 = p2; - } - iterations++; // this is for monitoring the number of iterations - } - return lambda1 + (lambda3 - lambda1) / 2; -} -// }}} - -// {{{ calc_distortion() -float calc_distortion( uint n, uint k, float* costs, float lambda ) { - - __m128 eps = _mm_set1_ps( std::numeric_limits< float >::epsilon() ); - __m128 v_lambda = _mm_set1_ps( -lambda ); - __m128 z, d, rho, p, dist, mask; - - dist = _mm_setzero_ps(); - for ( uint i = 0; i < n / 4; i++ ) { // n must be multiple of 4 - z = _mm_setzero_ps(); - d = _mm_setzero_ps(); - for ( uint j = 0; j < k; j++ ) { - rho = _mm_load_ps( costs + j * n + 4 * i ); // costs array must be aligned in memory - p = exp_ps( _mm_mul_ps( v_lambda, rho ) ); - z = _mm_add_ps( z, p ); - mask = _mm_cmplt_ps( p, eps ); // if p distortion ) { - lambda3 *= 2; - dist3 = calc_distortion( n, k, costs, lambda3 ); - j++; - iterations++; - // beta is probably unbounded => it seems that we cannot find beta such that - // relative payload will be smaller than requested. Binary search cannot converge. - if ( j > 10 ) { - return lambda3; - } - } - while ( (fabs( dist2 - distortion ) / n > precision) && (iterations < iter_limit) ) { // binary search for parameter lambda - lambda2 = lambda1 + (lambda3 - lambda1) / 2; - dist2 = calc_distortion( n, k, costs, lambda2 ); - if ( dist2 < distortion ) { - lambda3 = lambda2; - dist3 = dist2; - } else { - lambda1 = lambda2; - dist1 = dist2; - } - iterations++; // this is for monitoring the number of iterations - } - return lambda1 + (lambda3 - lambda1) / 2; -} -// }}} - -// {{{ binary_entropy_array() -float binary_entropy_array( uint n, float *prob ) { - - float h = 0; - float const LOG2 = log( 2.0 ); - float const EPS = std::numeric_limits< float >::epsilon(); - - for ( uint i = 0; i < n; i++ ) - if ( (prob[i] > EPS) && (1 - prob[i] > EPS) ) h -= prob[i] * log( prob[i] ) + (1 - prob[i]) * log( 1 - prob[i] ); - - return h / LOG2; -} -// }}} - -// {{{ entropy_array() -float entropy_array( uint n, float* prob ) { - - double h = 0; - double const LOG2 = log( 2.0 ); - double const EPS = std::numeric_limits< double >::epsilon(); - - for ( uint i = 0; i < n; i++ ) - if ( prob[i] > EPS ) h -= prob[i] * log( prob[i] ); - - return h / LOG2; -} -// }}} - -// {{{ mod() -inline uint mod( int x, int m ) { - int tmp = x - (x / m) * m + m; - return tmp % m; -} -// }}} - - - -/* EMBEDDING ALGORITHMS */ - -// {{{ stc_embed_trial() -void stc_embed_trial( uint n, float* cover_bit_prob0, u8* message, uint stc_constraint_height, uint &num_msg_bits, uint* perm, u8* stego, - uint &trial, uint max_trials, const char* debugging_file = "cost.txt" ) { - - bool success = false; - u8* cover = new u8[n]; - double* cost = new double[n]; - while ( !success ) { - randperm( n, num_msg_bits, perm ); - for ( uint i = 0; i < n; i++ ) { - cover[perm[i]] = (cover_bit_prob0[i] < 0.5) ? 1 : 0; - cost[perm[i]] = -log( (1 / std::max( cover_bit_prob0[i], 1 - cover_bit_prob0[i] )) - 1 ); - if ( cost[perm[i]] != cost[perm[i]] ) // if p20[i]>1 due to numerical error (this is possible due to float data type) - cost[perm[i]] = D_INF; // then cost2[i] is NaN, it should be Inf - } - memcpy( stego, cover, n ); // initialize stego array by cover array - // debugging - // write_vector_to_file(n, cost, debugging_file); - try { - if ( num_msg_bits != 0 ) stc_embed( cover, n, message, num_msg_bits, (void*) cost, true, stego, stc_constraint_height ); - success = true; - } catch ( stc_exception& e ) { - if ( e.error_id != 4 ) { // error_id=4 means No solution exists, thus we try to embed with different permutation. - delete[] cost; - delete[] cover; - throw e; - } - num_msg_bits--; // by decreasing the number of bits, we change the permutation used to shuffle the bits - trial++; - if ( trial > max_trials ) { - delete[] cost; - delete[] cover; - throw stc_exception( "Maximum number of trials in layered construction exceeded (2).", 6 ); - } - } - } - delete[] cost; - delete[] cover; -} -// }}} - -// {{{ check_costs() -// SANITY CHECKS for cost arrays -void check_costs( uint n, uint k, float *costs ) { - - bool test_nan, test_non_inf, test_minus_inf; - for ( uint i = 0; i < n; i++ ) { - test_nan = false; // Is any element NaN? Should be FALSE - test_non_inf = false; // Is any element finite? Should be TRUE - test_minus_inf = false; // Is any element minus Inf? should be FALSE - for ( uint j = 0; j < k; j++ ) { - test_nan |= (costs[k * i + j] != costs[k * i + j]); - test_non_inf |= ((costs[k * i + j] != -F_INF) & (costs[k * i + j] != F_INF)); - test_minus_inf |= (costs[k * i + j] == -F_INF); - } - if ( test_nan ) { - std::stringstream ss; - ss << "Incorrect cost array." << i << "-th element contains NaN value. This is not a valid cost."; - throw stc_exception( ss.str(), 6 ); - } - if ( !test_non_inf ) { - std::stringstream ss; - ss << "Incorrect cost array." << i << "-th element does not contain any finite cost value. This is not a valid cost."; - throw stc_exception( ss.str(), 6 ); - } - if ( test_minus_inf ) { - std::stringstream ss; - ss << "Incorrect cost array." << i << "-th element contains -Inf value. This is not a valid cost."; - throw stc_exception( ss.str(), 6 ); - } - } -} -// }}} - -// {{{ stc_pm1_pls_embed() -// MULTI-LAYERED EMBEDDING for plus/minus one changes -// payload limited case - returns distortion -float stc_pm1_pls_embed( uint cover_length, int* cover, float* costs, uint message_length, u8* message, // input variables - uint stc_constraint_height, float wet_cost, // other input parameters - int* stego, uint* num_msg_bits, uint &max_trials, float* coding_loss ) { // output variables - - return stc_pm1_dls_embed( cover_length, cover, costs, message_length, message, F_INF, stc_constraint_height, 0, wet_cost, stego, - num_msg_bits, max_trials, coding_loss ); -} -// }}} - -// {{{ stc_pm1_dls_embed() -// distortion limited case - returns distortion -float stc_pm1_dls_embed( uint cover_length, int* cover, float* costs, uint message_length, u8* message, float target_distortion, // input variables - uint stc_constraint_height, float expected_coding_loss, float wet_cost, // other input parameters - int* stego, uint* num_msg_bits, uint &max_trials, float* coding_loss ) { // output variables - - check_costs( cover_length, 3, costs ); - float dist = 0; - - int *stego_values = new int[4 * cover_length]; - float *costs_ml2 = new float[4 * cover_length]; - for ( uint i = 0; i < cover_length; i++ ) { - costs_ml2[4 * i + mod( (cover[i] - 1 + 4), 4 )] = costs[3 * i + 0]; // set cost of changing by -1 - stego_values[4 * i + mod( (cover[i] - 1 + 4), 4 )] = cover[i] - 1; - costs_ml2[4 * i + mod( (cover[i] + 0 + 4), 4 )] = costs[3 * i + 1]; // set cost of changing by 0 - stego_values[4 * i + mod( (cover[i] + 0 + 4), 4 )] = cover[i]; - costs_ml2[4 * i + mod( (cover[i] + 1 + 4), 4 )] = costs[3 * i + 2]; // set cost of changing by +1 - stego_values[4 * i + mod( (cover[i] + 1 + 4), 4 )] = cover[i] + 1; - costs_ml2[4 * i + mod( (cover[i] + 2 + 4), 4 )] = wet_cost; // set cost of changing by +2 - stego_values[4 * i + mod( (cover[i] + 2 + 4), 4 )] = cover[i] + 2; - } - - // run general 2 layered embedding in distortion limited regime - dist = stc_ml2_embed( cover_length, costs_ml2, stego_values, message_length, message, target_distortion, stc_constraint_height, - expected_coding_loss, stego, num_msg_bits, max_trials, coding_loss ); - delete[] costs_ml2; - delete[] stego_values; - - return dist; -} -// }}} - -// {{{ stc_pm2_dls_embed() -// MULTI-LAYERED EMBEDDING for plus/minus one and two changes -// payload limited case - returns distortion -float stc_pm2_pls_embed( uint cover_length, int* cover, float* costs, uint message_length, u8* message, // input variables - uint stc_constraint_height, float wet_cost, // other input parameters - int* stego, uint* num_msg_bits, uint &max_trials, float* coding_loss ) { // output variables - - return stc_pm2_dls_embed( cover_length, cover, costs, message_length, message, F_INF, stc_constraint_height, 0, wet_cost, stego, - num_msg_bits, max_trials, coding_loss ); -} -// }}} - -// {{{ stc_pm2_dls_embed() -// distortion limited case - returns distortion -float stc_pm2_dls_embed( uint cover_length, int* cover, float* costs, uint message_length, u8* message, float target_distortion, // input variables - uint stc_constraint_height, float expected_coding_loss, float wet_cost, // other input parameters - int* stego, uint* num_msg_bits, uint &max_trials, float* coding_loss ) { // output variables - - check_costs( cover_length, 5, costs ); - int *stego_values = new int[8 * cover_length]; - float* costs_ml3 = new float[8 * cover_length]; - std::fill_n( costs_ml3, 8 * cover_length, wet_cost ); // initialize new cost array - - for ( uint i = 0; i < cover_length; i++ ) { - costs_ml3[8 * i + mod( (cover[i] - 2 + 8), 8 )] = costs[5 * i + 0]; // set cost of changing by -2 - stego_values[8 * i + mod( (cover[i] - 2 + 8), 8 )] = cover[i] - 2; - costs_ml3[8 * i + mod( (cover[i] - 1 + 8), 8 )] = costs[5 * i + 1]; // set cost of changing by -1 - stego_values[8 * i + mod( (cover[i] - 1 + 8), 8 )] = cover[i] - 1; - costs_ml3[8 * i + mod( (cover[i] + 0 + 8), 8 )] = costs[5 * i + 2]; // set cost of changing by 0 - stego_values[8 * i + mod( (cover[i] + 0 + 8), 8 )] = cover[i] + 0; - costs_ml3[8 * i + mod( (cover[i] + 1 + 8), 8 )] = costs[5 * i + 3]; // set cost of changing by +1 - stego_values[8 * i + mod( (cover[i] + 1 + 8), 8 )] = cover[i] + 1; - costs_ml3[8 * i + mod( (cover[i] + 2 + 8), 8 )] = costs[5 * i + 4]; // set cost of changing by +2 - stego_values[8 * i + mod( (cover[i] + 2 + 8), 8 )] = cover[i] + 2; - stego_values[8 * i + mod( (cover[i] + 3 + 8), 8 )] = cover[i] + 3; // these values are not used and are defined - stego_values[8 * i + mod( (cover[i] + 4 + 8), 8 )] = cover[i] + 4; // just to have the array complete - stego_values[8 * i + mod( (cover[i] + 5 + 8), 8 )] = cover[i] + 5; // - } - - // run general 3 layered embedding in distortion limited regime - float dist = stc_ml3_embed( cover_length, costs_ml3, stego_values, message_length, message, target_distortion, stc_constraint_height, - expected_coding_loss, stego, num_msg_bits, max_trials, coding_loss ); - delete[] costs_ml3; - delete[] stego_values; - - return dist; -} -// }}} - -// GENERAL MULTI-LAYERED EMBEDDING - -// {{{ stc_ml1_embed() -// algorithm for embedding into 1 layer, both payload- and distortion-limited case -float stc_ml1_embed( uint cover_length, int* cover, short* direction, float* costs, uint message_length, u8* message, - float target_distortion,// input variables - uint stc_constraint_height, float expected_coding_loss, // other input parameters - int* stego, uint* num_msg_bits, uint &max_trials, float* coding_loss ) { // output variables - - float distortion, lambda = 0, m_max = 0; - bool success = false; - uint m_actual = 0; - uint n = cover_length + 4 - (cover_length % 4); // cover length rounded to multiple of 4 - uint *perm1 = new uint[n]; - - float* c = align_new< float > ( 2 * n, 16 ); - std::fill_n( c, 2 * n, F_INF ); - std::fill_n( c, n, 0 ); - for ( uint i = 0; i < cover_length; i++ ) { // copy and transpose data for better reading via SSE instructions - c[mod( cover[i], 2 ) * n + i] = 0; // cost of not changing the element - c[mod( (cover[i] + 1), 2 ) * n + i] = costs[i]; // cost of changing the element - } - - if ( target_distortion != F_INF ) { // distortion-limited sender - lambda = get_lambda_distortion( n, 2, c, target_distortion, 2 ); // - m_max = (1 - expected_coding_loss) * calc_entropy( n, 2, c, lambda ); // - m_actual = std::min( message_length, (uint) floor( m_max ) ); // - } - if ( (target_distortion == F_INF) || (m_actual < floor( m_max )) ) { // payload-limited sender - m_actual = std::min( cover_length, message_length ); // or distortion-limited sender with - } - - /* SINGLE LAYER OF 1ST LSBs */ - num_msg_bits[0] = m_actual; - uint trial = 0; - u8* cover1 = new u8[cover_length]; - double* cost1 = new double[cover_length]; - u8* stego1 = new u8[cover_length]; - while ( !success ) { - randperm( cover_length, num_msg_bits[0], perm1 ); - for ( uint i = 0; i < cover_length; i++ ) { - cover1[perm1[i]] = mod( cover[i], 2 ); - cost1[perm1[i]] = costs[i]; - if ( cost1[perm1[i]] != cost1[perm1[i]] ) cost1[perm1[i]] = D_INF; - } - memcpy( stego1, cover1, cover_length ); // initialize stego array by cover array - // debugging - // write_vector_to_file(n, cost, debugging_file); - try { - if ( num_msg_bits[0] != 0 ) stc_embed( cover1, cover_length, message, num_msg_bits[0], (void*) cost1, true, stego1, - stc_constraint_height ); - success = true; - } catch ( stc_exception& e ) { - if ( e.error_id != 4 ) { // error_id=4 means No solution exists, thus we try to embed with different permutation. - delete[] cost1; - delete[] cover1; - delete[] stego1; - delete[] perm1; - delete[] c; - throw e; - } - num_msg_bits[0]--; // by decreasing the number of bits, we change the permutation used to shuffle the bits - trial++; - if ( trial > max_trials ) { - delete[] cost1; - delete[] cover1; - delete[] stego1; - delete[] perm1; - delete[] c; - throw stc_exception( "Maximum number of trials in layered construction exceeded (1).", 6 ); - } - } - } - - /* FINAL CALCULATIONS */ - distortion = 0; - for ( uint i = 0; i < cover_length; i++ ) { - stego[i] = (stego1[perm1[i]] == cover1[perm1[i]]) ? cover[i] : cover[i] + direction[i]; - distortion += (stego1[perm1[i]] == cover1[perm1[i]]) ? 0 : costs[i]; - } - if ( coding_loss != 0 ) { - float lambda_dist = get_lambda_distortion( n, 2, c, distortion, lambda, 0, 20 ); // use 20 iterations to make lambda_dist precise - float max_payload = calc_entropy( n, 2, c, lambda_dist ); - (*coding_loss) = (max_payload - m_actual) / max_payload; // fraction of max_payload lost due to practical coding scheme - } - max_trials = trial; - - delete[] cost1; - delete[] cover1; - delete[] stego1; - delete[] perm1; - align_delete< float > ( c ); - - return distortion; -} -// }}} - -// {{{ stc_ml2_embed() -// algorithm for embedding into 2 layers with possibility to use only 1 layer, both payload- and distortion-limited cases -float stc_ml2_embed( uint cover_length, float* costs, int* stego_values, uint message_length, u8* message, float target_distortion, // input variables - uint stc_constraint_height, float expected_coding_loss, // other input parameters - int* stego, uint* num_msg_bits, uint &max_trials, float* coding_loss ) { // output and optional variables - - float distortion, dist_coding_loss, lambda = 0, m_max = 0; - uint m_actual = 0; - uint n = cover_length + 4 - (cover_length % 4); // cover length rounded to multiple of 4 - - check_costs( cover_length, 4, costs ); - // if only binary embedding is sufficient, then use only 1st LSB layer - bool lsb1_only = true; - for ( uint i = 0; i < cover_length; i++ ) { - uint n_finite_costs = 0; // number of finite cost values - uint lsb_xor = 0; - for ( uint k = 0; k < 4; k++ ) - if ( costs[4 * i + k] != F_INF ) { - n_finite_costs++; - lsb_xor ^= (k % 2); - } - lsb1_only &= ((n_finite_costs <= 2) & (lsb_xor == 1)); - } - if ( lsb1_only ) { // use stc_ml1_embed method - distortion = 0; - int *cover = new int[cover_length]; - short *direction = new short[cover_length]; - float *costs_ml1 = new float[cover_length]; - for ( uint i = 0; i < cover_length; i++ ) { // normalize such that minimal element is 0 - this helps numerical stability - uint min_id = 0; - float f_min = F_INF; - for ( uint j = 0; j < 4; j++ ) - if ( f_min > costs[4 * i + j] ) { - f_min = costs[4 * i + j]; // minimum value - min_id = j; // index of the minimal entry - } - costs_ml1[i] = F_INF; - cover[i] = stego_values[4 * i + min_id]; - for ( uint j = 0; j < 4; j++ ) - if ( (costs[4 * i + j] != F_INF) && (min_id != j) ) { - distortion += f_min; - costs_ml1[i] = costs[4 * i + j] - f_min; - direction[i] = stego_values[4 * i + j] - cover[i]; - } - } - - distortion += stc_ml1_embed( cover_length, cover, direction, costs_ml1, message_length, message, target_distortion, - stc_constraint_height, expected_coding_loss, stego, num_msg_bits, max_trials, coding_loss ); - delete[] direction; - delete[] costs_ml1; - delete[] cover; - return distortion; - } - - // copy and transpose data for faster reading via SSE instructions - float* c = align_new< float > ( 4 * n, 16 ); - std::fill_n( c, 4 * n, F_INF ); - std::fill_n( c, n, 0 ); - for ( uint i = 0; i < 4 * cover_length; i++ ) - c[n * (i % 4) + i / 4] = costs[i]; - // write_matrix_to_file(n, 4, c, "cost_ml2.txt"); - for ( uint i = 0; i < n; i++ ) { // normalize such that minimal element is 0 - this helps numerical stability - float f_min = F_INF; - for ( uint j = 0; j < 4; j++ ) - f_min = std::min( f_min, c[j * n + i] ); - for ( uint j = 0; j < 4; j++ ) - c[j * n + i] -= f_min; - } - - if ( target_distortion != F_INF ) { - lambda = get_lambda_distortion( n, 4, c, target_distortion, 2 ); - m_max = (1 - expected_coding_loss) * calc_entropy( n, 4, c, lambda ); - m_actual = std::min( message_length, (uint) floor( m_max ) ); - } - if ( (target_distortion == F_INF) || (m_actual < floor( m_max )) ) { - m_actual = std::min( 2 * cover_length, message_length ); - lambda = get_lambda_entropy( n, 4, c, m_actual, 2 ); - } - /* - p = exp(-lambda*costs); - p = p./(ones(4,1)*sum(p)); - */ - float* p = align_new< float > ( 4 * n, 16 ); - __m128 v_lambda = _mm_set1_ps( -lambda ); - for ( uint i = 0; i < n / 4; i++ ) { - __m128 sum = _mm_setzero_ps(); - for ( uint j = 0; j < 4; j++ ) { - __m128 x = _mm_load_ps( c + j * n + 4 * i ); - x = exp_ps( _mm_mul_ps( v_lambda, x ) ); - _mm_store_ps( p + j * n + 4 * i, x ); - sum = _mm_add_ps( sum, x ); - } - for ( uint j = 0; j < 4; j++ ) { - __m128 x = _mm_load_ps( p + j * n + 4 * i ); - x = _mm_div_ps( x, sum ); - _mm_store_ps( p + j * n + 4 * i, x ); - } - } - // this is for debugging purposes - // float payload_dbg = entropy_array(4*n, p); - - uint trial = 0; - float* p10 = new float[cover_length]; - float* p20 = new float[cover_length]; - u8* stego1 = new u8[cover_length]; - u8* stego2 = new u8[cover_length]; - uint *perm1 = new uint[cover_length]; - uint *perm2 = new uint[cover_length]; - - /* LAYER OF 2ND LSBs */ - for ( uint i = 0; i < cover_length; i++ ) - p20[i] = p[i] + p[i + n]; // p20 = p(1,:)+p(2,:); % probability of 2nd LSB of stego equal 0 - //num_msg_bits[1] = (uint) floor( binary_entropy_array( cover_length, p20 ) ); // msg_bits(2) = floor(sum(binary_entropy(p20))); % number of msg bits embedded into 2nd LSBs - num_msg_bits[1] = (uint) (message_length/2 /*+ message_length%2*/ ); // XXX - - try { - stc_embed_trial( cover_length, p20, message, stc_constraint_height, num_msg_bits[1], perm2, stego2, trial, max_trials, "cost2.txt" ); - } catch ( stc_exception& e ) { - delete[] p10; - delete[] p20; - delete[] perm1; - delete[] perm2; - delete[] stego1; - delete[] stego2; - align_delete< float > ( c ); - align_delete< float > ( p ); - throw e; - } - - /* LAYER OF 1ST LSBs */ - for ( uint i = 0; i < cover_length; i++ ) // - if ( stego2[perm2[i]] == 0 ) // % conditional probability of 1st LSB of stego equal 0 given LSB2=0 - p10[i] = p[i] / (p[i] + p[i + n]); // p10(i) = p(1,i)/(p(1,i)+p(2,i)); - else // % conditional probability of 1st LSB of stego equal 0 given LSB2=1 - p10[i] = p[i + 2 * n] / (p[i + 2 * n] + p[i + 3 * n]); // p10(i) = p(3,i)/(p(3,i)+p(4,i)); - num_msg_bits[0] = m_actual - num_msg_bits[1]; // msg_bits(1) = m_actual-msg_bits(2); % number of msg bits embedded into 1st LSBs - try { - stc_embed_trial( cover_length, p10, message + num_msg_bits[1], stc_constraint_height, num_msg_bits[0], perm1, stego1, trial, - max_trials, "cost1.txt" ); - } catch ( stc_exception& e ) { - delete[] p10; - delete[] p20; - delete[] perm1; - delete[] perm2; - delete[] stego1; - delete[] stego2; - align_delete< float > ( c ); - align_delete< float > ( p ); - throw e; - } - delete[] p10; - delete[] p20; - - /* FINAL CALCULATIONS */ - distortion = 0; - for ( uint i = 0; i < cover_length; i++ ) { - stego[i] = stego_values[4 * i + 2 * stego2[perm2[i]] + stego1[perm1[i]]]; - distortion += costs[4 * i + 2 * stego2[perm2[i]] + stego1[perm1[i]]]; - } - if ( coding_loss != 0 ) { - dist_coding_loss = 0; - for ( uint i = 0; i < cover_length; i++ ) - dist_coding_loss += c[i + n * (2 * stego2[perm2[i]] + stego1[perm1[i]])]; - float lambda_dist = get_lambda_distortion( n, 4, c, dist_coding_loss, lambda, 0, 20 ); // use 20 iterations to make lambda_dist precise - float max_payload = calc_entropy( n, 4, c, lambda_dist ); - (*coding_loss) = (max_payload - m_actual) / max_payload; // fraction of max_payload lost due to practical coding scheme - } - max_trials = trial; - - delete[] stego1; - delete[] stego2; - delete[] perm1; - delete[] perm2; - align_delete< float > ( c ); - align_delete< float > ( p ); - - return distortion; -} -// }}} - -// {{{ stc_ml3_embed() -// algorithm for embedding into 3 layers, both payload- and distortion-limited case -float stc_ml3_embed( uint cover_length, float* costs, int* stego_values, uint message_length, u8* message, float target_distortion, // input variables - uint stc_constraint_height, float expected_coding_loss, // other input parameters - int* stego, uint* num_msg_bits, uint &max_trials, float* coding_loss ) { // output and optional variables - - float distortion, dist_coding_loss, lambda = 0, m_max = 0; - uint m_actual = 0; - uint n = cover_length + 4 - (cover_length % 4); // cover length rounded to multiple of 4 - - check_costs( cover_length, 8, costs ); - float* c = align_new< float > ( 8 * n, 16 ); - std::fill_n( c, 8 * n, F_INF ); - std::fill_n( c, n, 0 ); - for ( uint i = 0; i < 8 * cover_length; i++ ) - c[n * (i % 8) + i / 8] = costs[i]; // copy and transpose data for better reading via SSE instructions - // write_matrix_to_file(n, 8, c, "cost_ml3.txt"); - for ( uint i = 0; i < n; i++ ) { // normalize such that minimal element is 0 - this helps numerical stability - float f_min = F_INF; - for ( uint j = 0; j < 8; j++ ) - f_min = std::min( f_min, c[j * n + i] ); - for ( uint j = 0; j < 8; j++ ) - c[j * n + i] -= f_min; - } - - if ( target_distortion != F_INF ) { - lambda = get_lambda_distortion( n, 8, c, target_distortion, 2.0 ); - m_max = (1 - expected_coding_loss) * calc_entropy( n, 8, c, lambda ); - m_actual = std::min( message_length, (uint) floor( m_max ) ); - } - if ( (target_distortion == F_INF) || (m_actual < floor( m_max )) ) { - m_actual = std::min( 3 * cover_length, message_length ); - lambda = get_lambda_entropy( n, 8, c, m_actual, 2.0 ); - } - /* - p = exp(-lambda*costs); - p = p./(ones(8,1)*sum(p)); - */ - float* p = align_new< float > ( 8 * n, 16 ); - __m128 v_lambda = _mm_set1_ps( -lambda ); - for ( uint i = 0; i < n / 4; i++ ) { - __m128 sum = _mm_setzero_ps(); - for ( uint j = 0; j < 8; j++ ) { - __m128 x = _mm_load_ps( c + j * n + 4 * i ); - x = exp_ps( _mm_mul_ps( v_lambda, x ) ); - _mm_store_ps( p + j * n + 4 * i, x ); - sum = _mm_add_ps( sum, x ); - } - for ( uint j = 0; j < 8; j++ ) { - __m128 x = _mm_load_ps( p + j * n + 4 * i ); - x = _mm_div_ps( x, sum ); - _mm_store_ps( p + j * n + 4 * i, x ); - } - } - // this is for debugging - // float payload_dbg = entropy_array(8*n, p); - - uint trial = 0; - float* p10 = new float[cover_length]; - float* p20 = new float[cover_length]; - float* p30 = new float[cover_length]; - u8* stego1 = new u8[cover_length]; - u8* stego2 = new u8[cover_length]; - u8* stego3 = new u8[cover_length]; - uint *perm1 = new uint[cover_length]; - uint *perm2 = new uint[cover_length]; - uint *perm3 = new uint[cover_length]; - - /* LAYER OF 3RD LSBs */ - for ( uint i = 0; i < cover_length; i++ ) - p30[i] = p[i] + p[i + n] + p[i + 2 * n] + p[i + 3 * n]; // - num_msg_bits[2] = (uint) floor( binary_entropy_array( cover_length, p30 ) ); // - try { - stc_embed_trial( cover_length, p30, message, stc_constraint_height, num_msg_bits[2], perm3, stego3, trial, max_trials, "cost3.txt" ); - } catch ( stc_exception& e ) { - delete[] p10; - delete[] p20; - delete[] p30; - delete[] perm1; - delete[] perm2; - delete[] perm3; - delete[] stego1; - delete[] stego2; - delete[] stego3; - align_delete< float > ( c ); - align_delete< float > ( p ); - throw e; - } - - /* LAYER OF 2ND LSBs */ - for ( uint i = 0; i < cover_length; i++ ) { // - int s = 4 * stego3[perm3[i]]; // % conditional probability of 2nd LSB of stego equal 0 given LSB3 - p20[i] = (p[i + s * n] + p[i + (s + 1) * n]) / (p[i + s * n] + p[i + (s + 1) * n] + p[i + (s + 2) * n] + p[i + (s + 3) * n]); - } - num_msg_bits[1] = (uint) floor( binary_entropy_array( cover_length, p20 ) );// msg_bits(2) = floor(sum(binary_entropy(p20))); % number of msg bits embedded into 2nd LSBs - try { - stc_embed_trial( cover_length, p20, message + num_msg_bits[2], stc_constraint_height, num_msg_bits[1], perm2, stego2, trial, - max_trials, "cost2.txt" ); - } catch ( stc_exception& e ) { - delete[] p10; - delete[] p20; - delete[] p30; - delete[] perm1; - delete[] perm2; - delete[] perm3; - delete[] stego1; - delete[] stego2; - delete[] stego3; - align_delete< float > ( c ); - align_delete< float > ( p ); - throw e; - } - - /* LAYER OF 1ST LSBs */ - for ( uint i = 0; i < cover_length; i++ ) { // - int s = 4 * stego3[perm3[i]] + 2 * stego2[perm2[i]]; // % conditional probability of 1st LSB of stego equal 0 given LSB3 and LSB2 - p10[i] = p[i + s * n] / (p[i + s * n] + p[i + (s + 1) * n]); - } - num_msg_bits[0] = m_actual - num_msg_bits[1] - num_msg_bits[2]; // msg_bits(1) = m_actual-msg_bits(2)-msg_bits(3); % number of msg bits embedded into 1st LSBs - try { - stc_embed_trial( cover_length, p10, message + num_msg_bits[1] + num_msg_bits[2], stc_constraint_height, num_msg_bits[0], perm1, - stego1, trial, max_trials, "cost1.txt" ); - } catch ( stc_exception& e ) { - delete[] p10; - delete[] p20; - delete[] p30; - delete[] perm1; - delete[] perm2; - delete[] perm3; - delete[] stego1; - delete[] stego2; - delete[] stego3; - align_delete< float > ( c ); - align_delete< float > ( p ); - throw e; - } - delete[] p10; - delete[] p20; - delete[] p30; - max_trials = trial; - - /* FINAL CALCULATIONS */ - distortion = 0; - for ( uint i = 0; i < cover_length; i++ ) { - stego[i] = stego_values[8 * i + 4 * stego3[perm3[i]] + 2 * stego2[perm2[i]] + stego1[perm1[i]]]; - distortion += costs[8 * i + 4 * stego3[perm3[i]] + 2 * stego2[perm2[i]] + stego1[perm1[i]]]; - } - if ( coding_loss != 0 ) { - dist_coding_loss = 0; - for ( uint i = 0; i < cover_length; i++ ) - dist_coding_loss += c[i + n * (4 * stego3[perm3[i]] + 2 * stego2[perm2[i]] + stego1[perm1[i]])]; - float lambda_dist = get_lambda_distortion( n, 8, c, dist_coding_loss, lambda, 0, 20 ); // use 20 iterations to make lambda_dist precise - float max_payload = calc_entropy( n, 8, c, lambda_dist ); - (*coding_loss) = (max_payload - m_actual) / max_payload; // fraction of max_payload lost due to practical coding scheme - } - - delete[] perm1; - delete[] perm2; - delete[] perm3; - delete[] stego1; - delete[] stego2; - delete[] stego3; - align_delete< float > ( c ); - align_delete< float > ( p ); - - return distortion; -} -// }}} - - -/* EXTRACTION ALGORITHMS */ - -// {{{ stc_ml_extract() -/** Extraction algorithm for any l-layered construction. - @param stego_length - ... - @param stego - ... - @param msg_bits - ... - @param stc_constraint_height - ... - @param message - ... - */ -void stc_ml_extract( uint stego_length, int* stego, uint num_of_layers, uint* num_msg_bits, // input variables - uint stc_constraint_height, // other input parameters - u8* message ) { // output variables - - u8* stego_bits = new u8[stego_length]; - u8* msg_ptr = message; - uint *perm = new uint[stego_length]; - - for ( uint l = num_of_layers; l > 0; l-- ) { // extract message from every layer starting from most significant ones - // extract bits from l-th LSB plane - if ( num_msg_bits[l - 1] > 0 ) { - randperm( stego_length, num_msg_bits[l - 1], perm ); - for ( uint i = 0; i < stego_length; i++ ) - stego_bits[perm[i]] = mod( stego[i], (1 << l) ) >> (l - 1); - stc_extract( stego_bits, stego_length, msg_ptr, num_msg_bits[l - 1], stc_constraint_height ); - msg_ptr += num_msg_bits[l - 1]; - } - } - - delete[] stego_bits; - delete[] perm; -} -// }}} - - diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/datasets/pipelines/__init__.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/datasets/pipelines/__init__.py deleted file mode 100644 index c6f424debd1623e7511dd77da464a6639d816745..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/datasets/pipelines/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform, - ContrastTransform, EqualizeTransform, Rotate, Shear, - Translate) -from .compose import Compose -from .formating import (Collect, DefaultFormatBundle, ImageToTensor, - ToDataContainer, ToTensor, Transpose, to_tensor) -from .instaboost import InstaBoost -from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam, - LoadMultiChannelImageFromFiles, LoadProposals) -from .test_time_aug import MultiScaleFlipAug -from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, Normalize, - Pad, PhotoMetricDistortion, RandomCenterCropPad, - RandomCrop, RandomFlip, Resize, SegRescale) - -__all__ = [ - 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer', - 'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations', - 'LoadImageFromFile', 'LoadImageFromWebcam', - 'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug', - 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale', - 'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu', - 'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear', - 'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform', - 'ContrastTransform', 'Translate' -] diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/datasets/pipelines/loading.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/datasets/pipelines/loading.py deleted file mode 100644 index cfae701da3dd48c9a02e11b6a6f7cc627221fede..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/datasets/pipelines/loading.py +++ /dev/null @@ -1,458 +0,0 @@ -import os.path as osp - -import mmcv -import numpy as np -import pycocotools.mask as maskUtils - -from annotator.uniformer.mmdet.core import BitmapMasks, PolygonMasks -from ..builder import PIPELINES - - -@PIPELINES.register_module() -class LoadImageFromFile(object): - """Load an image from file. - - Required keys are "img_prefix" and "img_info" (a dict that must contain the - key "filename"). Added or updated keys are "filename", "img", "img_shape", - "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`), - "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1). - - Args: - to_float32 (bool): Whether to convert the loaded image to a float32 - numpy array. If set to False, the loaded image is an uint8 array. - Defaults to False. - color_type (str): The flag argument for :func:`mmcv.imfrombytes`. - Defaults to 'color'. - file_client_args (dict): Arguments to instantiate a FileClient. - See :class:`mmcv.fileio.FileClient` for details. - Defaults to ``dict(backend='disk')``. - """ - - def __init__(self, - to_float32=False, - color_type='color', - file_client_args=dict(backend='disk')): - self.to_float32 = to_float32 - self.color_type = color_type - self.file_client_args = file_client_args.copy() - self.file_client = None - - def __call__(self, results): - """Call functions to load image and get image meta information. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded image and meta information. - """ - - if self.file_client is None: - self.file_client = mmcv.FileClient(**self.file_client_args) - - if results['img_prefix'] is not None: - filename = osp.join(results['img_prefix'], - results['img_info']['filename']) - else: - filename = results['img_info']['filename'] - - img_bytes = self.file_client.get(filename) - img = mmcv.imfrombytes(img_bytes, flag=self.color_type) - if self.to_float32: - img = img.astype(np.float32) - - results['filename'] = filename - results['ori_filename'] = results['img_info']['filename'] - results['img'] = img - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - results['img_fields'] = ['img'] - return results - - def __repr__(self): - repr_str = (f'{self.__class__.__name__}(' - f'to_float32={self.to_float32}, ' - f"color_type='{self.color_type}', " - f'file_client_args={self.file_client_args})') - return repr_str - - -@PIPELINES.register_module() -class LoadImageFromWebcam(LoadImageFromFile): - """Load an image from webcam. - - Similar with :obj:`LoadImageFromFile`, but the image read from webcam is in - ``results['img']``. - """ - - def __call__(self, results): - """Call functions to add image meta information. - - Args: - results (dict): Result dict with Webcam read image in - ``results['img']``. - - Returns: - dict: The dict contains loaded image and meta information. - """ - - img = results['img'] - if self.to_float32: - img = img.astype(np.float32) - - results['filename'] = None - results['ori_filename'] = None - results['img'] = img - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - results['img_fields'] = ['img'] - return results - - -@PIPELINES.register_module() -class LoadMultiChannelImageFromFiles(object): - """Load multi-channel images from a list of separate channel files. - - Required keys are "img_prefix" and "img_info" (a dict that must contain the - key "filename", which is expected to be a list of filenames). - Added or updated keys are "filename", "img", "img_shape", - "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`), - "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1). - - Args: - to_float32 (bool): Whether to convert the loaded image to a float32 - numpy array. If set to False, the loaded image is an uint8 array. - Defaults to False. - color_type (str): The flag argument for :func:`mmcv.imfrombytes`. - Defaults to 'color'. - file_client_args (dict): Arguments to instantiate a FileClient. - See :class:`mmcv.fileio.FileClient` for details. - Defaults to ``dict(backend='disk')``. - """ - - def __init__(self, - to_float32=False, - color_type='unchanged', - file_client_args=dict(backend='disk')): - self.to_float32 = to_float32 - self.color_type = color_type - self.file_client_args = file_client_args.copy() - self.file_client = None - - def __call__(self, results): - """Call functions to load multiple images and get images meta - information. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded images and meta information. - """ - - if self.file_client is None: - self.file_client = mmcv.FileClient(**self.file_client_args) - - if results['img_prefix'] is not None: - filename = [ - osp.join(results['img_prefix'], fname) - for fname in results['img_info']['filename'] - ] - else: - filename = results['img_info']['filename'] - - img = [] - for name in filename: - img_bytes = self.file_client.get(name) - img.append(mmcv.imfrombytes(img_bytes, flag=self.color_type)) - img = np.stack(img, axis=-1) - if self.to_float32: - img = img.astype(np.float32) - - results['filename'] = filename - results['ori_filename'] = results['img_info']['filename'] - results['img'] = img - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - # Set initial values for default meta_keys - results['pad_shape'] = img.shape - results['scale_factor'] = 1.0 - num_channels = 1 if len(img.shape) < 3 else img.shape[2] - results['img_norm_cfg'] = dict( - mean=np.zeros(num_channels, dtype=np.float32), - std=np.ones(num_channels, dtype=np.float32), - to_rgb=False) - return results - - def __repr__(self): - repr_str = (f'{self.__class__.__name__}(' - f'to_float32={self.to_float32}, ' - f"color_type='{self.color_type}', " - f'file_client_args={self.file_client_args})') - return repr_str - - -@PIPELINES.register_module() -class LoadAnnotations(object): - """Load mutiple types of annotations. - - Args: - with_bbox (bool): Whether to parse and load the bbox annotation. - Default: True. - with_label (bool): Whether to parse and load the label annotation. - Default: True. - with_mask (bool): Whether to parse and load the mask annotation. - Default: False. - with_seg (bool): Whether to parse and load the semantic segmentation - annotation. Default: False. - poly2mask (bool): Whether to convert the instance masks from polygons - to bitmaps. Default: True. - file_client_args (dict): Arguments to instantiate a FileClient. - See :class:`mmcv.fileio.FileClient` for details. - Defaults to ``dict(backend='disk')``. - """ - - def __init__(self, - with_bbox=True, - with_label=True, - with_mask=False, - with_seg=False, - poly2mask=True, - file_client_args=dict(backend='disk')): - self.with_bbox = with_bbox - self.with_label = with_label - self.with_mask = with_mask - self.with_seg = with_seg - self.poly2mask = poly2mask - self.file_client_args = file_client_args.copy() - self.file_client = None - - def _load_bboxes(self, results): - """Private function to load bounding box annotations. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded bounding box annotations. - """ - - ann_info = results['ann_info'] - results['gt_bboxes'] = ann_info['bboxes'].copy() - - gt_bboxes_ignore = ann_info.get('bboxes_ignore', None) - if gt_bboxes_ignore is not None: - results['gt_bboxes_ignore'] = gt_bboxes_ignore.copy() - results['bbox_fields'].append('gt_bboxes_ignore') - results['bbox_fields'].append('gt_bboxes') - return results - - def _load_labels(self, results): - """Private function to load label annotations. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded label annotations. - """ - - results['gt_labels'] = results['ann_info']['labels'].copy() - return results - - def _poly2mask(self, mask_ann, img_h, img_w): - """Private function to convert masks represented with polygon to - bitmaps. - - Args: - mask_ann (list | dict): Polygon mask annotation input. - img_h (int): The height of output mask. - img_w (int): The width of output mask. - - Returns: - numpy.ndarray: The decode bitmap mask of shape (img_h, img_w). - """ - - if isinstance(mask_ann, list): - # polygon -- a single object might consist of multiple parts - # we merge all parts into one mask rle code - rles = maskUtils.frPyObjects(mask_ann, img_h, img_w) - rle = maskUtils.merge(rles) - elif isinstance(mask_ann['counts'], list): - # uncompressed RLE - rle = maskUtils.frPyObjects(mask_ann, img_h, img_w) - else: - # rle - rle = mask_ann - mask = maskUtils.decode(rle) - return mask - - def process_polygons(self, polygons): - """Convert polygons to list of ndarray and filter invalid polygons. - - Args: - polygons (list[list]): Polygons of one instance. - - Returns: - list[numpy.ndarray]: Processed polygons. - """ - - polygons = [np.array(p) for p in polygons] - valid_polygons = [] - for polygon in polygons: - if len(polygon) % 2 == 0 and len(polygon) >= 6: - valid_polygons.append(polygon) - return valid_polygons - - def _load_masks(self, results): - """Private function to load mask annotations. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded mask annotations. - If ``self.poly2mask`` is set ``True``, `gt_mask` will contain - :obj:`PolygonMasks`. Otherwise, :obj:`BitmapMasks` is used. - """ - - h, w = results['img_info']['height'], results['img_info']['width'] - gt_masks = results['ann_info']['masks'] - if self.poly2mask: - gt_masks = BitmapMasks( - [self._poly2mask(mask, h, w) for mask in gt_masks], h, w) - else: - gt_masks = PolygonMasks( - [self.process_polygons(polygons) for polygons in gt_masks], h, - w) - results['gt_masks'] = gt_masks - results['mask_fields'].append('gt_masks') - return results - - def _load_semantic_seg(self, results): - """Private function to load semantic segmentation annotations. - - Args: - results (dict): Result dict from :obj:`dataset`. - - Returns: - dict: The dict contains loaded semantic segmentation annotations. - """ - - if self.file_client is None: - self.file_client = mmcv.FileClient(**self.file_client_args) - - filename = osp.join(results['seg_prefix'], - results['ann_info']['seg_map']) - img_bytes = self.file_client.get(filename) - results['gt_semantic_seg'] = mmcv.imfrombytes( - img_bytes, flag='unchanged').squeeze() - results['seg_fields'].append('gt_semantic_seg') - return results - - def __call__(self, results): - """Call function to load multiple types annotations. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded bounding box, label, mask and - semantic segmentation annotations. - """ - - if self.with_bbox: - results = self._load_bboxes(results) - if results is None: - return None - if self.with_label: - results = self._load_labels(results) - if self.with_mask: - results = self._load_masks(results) - if self.with_seg: - results = self._load_semantic_seg(results) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(with_bbox={self.with_bbox}, ' - repr_str += f'with_label={self.with_label}, ' - repr_str += f'with_mask={self.with_mask}, ' - repr_str += f'with_seg={self.with_seg}, ' - repr_str += f'poly2mask={self.poly2mask}, ' - repr_str += f'poly2mask={self.file_client_args})' - return repr_str - - -@PIPELINES.register_module() -class LoadProposals(object): - """Load proposal pipeline. - - Required key is "proposals". Updated keys are "proposals", "bbox_fields". - - Args: - num_max_proposals (int, optional): Maximum number of proposals to load. - If not specified, all proposals will be loaded. - """ - - def __init__(self, num_max_proposals=None): - self.num_max_proposals = num_max_proposals - - def __call__(self, results): - """Call function to load proposals from file. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded proposal annotations. - """ - - proposals = results['proposals'] - if proposals.shape[1] not in (4, 5): - raise AssertionError( - 'proposals should have shapes (n, 4) or (n, 5), ' - f'but found {proposals.shape}') - proposals = proposals[:, :4] - - if self.num_max_proposals is not None: - proposals = proposals[:self.num_max_proposals] - - if len(proposals) == 0: - proposals = np.array([[0, 0, 0, 0]], dtype=np.float32) - results['proposals'] = proposals - results['bbox_fields'].append('proposals') - return results - - def __repr__(self): - return self.__class__.__name__ + \ - f'(num_max_proposals={self.num_max_proposals})' - - -@PIPELINES.register_module() -class FilterAnnotations(object): - """Filter invalid annotations. - - Args: - min_gt_bbox_wh (tuple[int]): Minimum width and height of ground truth - boxes. - """ - - def __init__(self, min_gt_bbox_wh): - # TODO: add more filter options - self.min_gt_bbox_wh = min_gt_bbox_wh - - def __call__(self, results): - assert 'gt_bboxes' in results - gt_bboxes = results['gt_bboxes'] - w = gt_bboxes[:, 2] - gt_bboxes[:, 0] - h = gt_bboxes[:, 3] - gt_bboxes[:, 1] - keep = (w > self.min_gt_bbox_wh[0]) & (h > self.min_gt_bbox_wh[1]) - if not keep.any(): - return None - else: - keys = ('gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg') - for key in keys: - if key in results: - results[key] = results[key][keep] - return results diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/image/photometric.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/image/photometric.py deleted file mode 100644 index 5085d012019c0cbf56f66f421a378278c1a058ae..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/image/photometric.py +++ /dev/null @@ -1,428 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import cv2 -import numpy as np - -from ..utils import is_tuple_of -from .colorspace import bgr2gray, gray2bgr - - -def imnormalize(img, mean, std, to_rgb=True): - """Normalize an image with mean and std. - - Args: - img (ndarray): Image to be normalized. - mean (ndarray): The mean to be used for normalize. - std (ndarray): The std to be used for normalize. - to_rgb (bool): Whether to convert to rgb. - - Returns: - ndarray: The normalized image. - """ - img = img.copy().astype(np.float32) - return imnormalize_(img, mean, std, to_rgb) - - -def imnormalize_(img, mean, std, to_rgb=True): - """Inplace normalize an image with mean and std. - - Args: - img (ndarray): Image to be normalized. - mean (ndarray): The mean to be used for normalize. - std (ndarray): The std to be used for normalize. - to_rgb (bool): Whether to convert to rgb. - - Returns: - ndarray: The normalized image. - """ - # cv2 inplace normalization does not accept uint8 - assert img.dtype != np.uint8 - mean = np.float64(mean.reshape(1, -1)) - stdinv = 1 / np.float64(std.reshape(1, -1)) - if to_rgb: - cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace - cv2.subtract(img, mean, img) # inplace - cv2.multiply(img, stdinv, img) # inplace - return img - - -def imdenormalize(img, mean, std, to_bgr=True): - assert img.dtype != np.uint8 - mean = mean.reshape(1, -1).astype(np.float64) - std = std.reshape(1, -1).astype(np.float64) - img = cv2.multiply(img, std) # make a copy - cv2.add(img, mean, img) # inplace - if to_bgr: - cv2.cvtColor(img, cv2.COLOR_RGB2BGR, img) # inplace - return img - - -def iminvert(img): - """Invert (negate) an image. - - Args: - img (ndarray): Image to be inverted. - - Returns: - ndarray: The inverted image. - """ - return np.full_like(img, 255) - img - - -def solarize(img, thr=128): - """Solarize an image (invert all pixel values above a threshold) - - Args: - img (ndarray): Image to be solarized. - thr (int): Threshold for solarizing (0 - 255). - - Returns: - ndarray: The solarized image. - """ - img = np.where(img < thr, img, 255 - img) - return img - - -def posterize(img, bits): - """Posterize an image (reduce the number of bits for each color channel) - - Args: - img (ndarray): Image to be posterized. - bits (int): Number of bits (1 to 8) to use for posterizing. - - Returns: - ndarray: The posterized image. - """ - shift = 8 - bits - img = np.left_shift(np.right_shift(img, shift), shift) - return img - - -def adjust_color(img, alpha=1, beta=None, gamma=0): - r"""It blends the source image and its gray image: - - .. math:: - output = img * alpha + gray\_img * beta + gamma - - Args: - img (ndarray): The input source image. - alpha (int | float): Weight for the source image. Default 1. - beta (int | float): Weight for the converted gray image. - If None, it's assigned the value (1 - `alpha`). - gamma (int | float): Scalar added to each sum. - Same as :func:`cv2.addWeighted`. Default 0. - - Returns: - ndarray: Colored image which has the same size and dtype as input. - """ - gray_img = bgr2gray(img) - gray_img = np.tile(gray_img[..., None], [1, 1, 3]) - if beta is None: - beta = 1 - alpha - colored_img = cv2.addWeighted(img, alpha, gray_img, beta, gamma) - if not colored_img.dtype == np.uint8: - # Note when the dtype of `img` is not the default `np.uint8` - # (e.g. np.float32), the value in `colored_img` got from cv2 - # is not guaranteed to be in range [0, 255], so here clip - # is needed. - colored_img = np.clip(colored_img, 0, 255) - return colored_img - - -def imequalize(img): - """Equalize the image histogram. - - This function applies a non-linear mapping to the input image, - in order to create a uniform distribution of grayscale values - in the output image. - - Args: - img (ndarray): Image to be equalized. - - Returns: - ndarray: The equalized image. - """ - - def _scale_channel(im, c): - """Scale the data in the corresponding channel.""" - im = im[:, :, c] - # Compute the histogram of the image channel. - histo = np.histogram(im, 256, (0, 255))[0] - # For computing the step, filter out the nonzeros. - nonzero_histo = histo[histo > 0] - step = (np.sum(nonzero_histo) - nonzero_histo[-1]) // 255 - if not step: - lut = np.array(range(256)) - else: - # Compute the cumulative sum, shifted by step // 2 - # and then normalized by step. - lut = (np.cumsum(histo) + (step // 2)) // step - # Shift lut, prepending with 0. - lut = np.concatenate([[0], lut[:-1]], 0) - # handle potential integer overflow - lut[lut > 255] = 255 - # If step is zero, return the original image. - # Otherwise, index from lut. - return np.where(np.equal(step, 0), im, lut[im]) - - # Scales each channel independently and then stacks - # the result. - s1 = _scale_channel(img, 0) - s2 = _scale_channel(img, 1) - s3 = _scale_channel(img, 2) - equalized_img = np.stack([s1, s2, s3], axis=-1) - return equalized_img.astype(img.dtype) - - -def adjust_brightness(img, factor=1.): - """Adjust image brightness. - - This function controls the brightness of an image. An - enhancement factor of 0.0 gives a black image. - A factor of 1.0 gives the original image. This function - blends the source image and the degenerated black image: - - .. math:: - output = img * factor + degenerated * (1 - factor) - - Args: - img (ndarray): Image to be brightened. - factor (float): A value controls the enhancement. - Factor 1.0 returns the original image, lower - factors mean less color (brightness, contrast, - etc), and higher values more. Default 1. - - Returns: - ndarray: The brightened image. - """ - degenerated = np.zeros_like(img) - # Note manually convert the dtype to np.float32, to - # achieve as close results as PIL.ImageEnhance.Brightness. - # Set beta=1-factor, and gamma=0 - brightened_img = cv2.addWeighted( - img.astype(np.float32), factor, degenerated.astype(np.float32), - 1 - factor, 0) - brightened_img = np.clip(brightened_img, 0, 255) - return brightened_img.astype(img.dtype) - - -def adjust_contrast(img, factor=1.): - """Adjust image contrast. - - This function controls the contrast of an image. An - enhancement factor of 0.0 gives a solid grey - image. A factor of 1.0 gives the original image. It - blends the source image and the degenerated mean image: - - .. math:: - output = img * factor + degenerated * (1 - factor) - - Args: - img (ndarray): Image to be contrasted. BGR order. - factor (float): Same as :func:`mmcv.adjust_brightness`. - - Returns: - ndarray: The contrasted image. - """ - gray_img = bgr2gray(img) - hist = np.histogram(gray_img, 256, (0, 255))[0] - mean = round(np.sum(gray_img) / np.sum(hist)) - degenerated = (np.ones_like(img[..., 0]) * mean).astype(img.dtype) - degenerated = gray2bgr(degenerated) - contrasted_img = cv2.addWeighted( - img.astype(np.float32), factor, degenerated.astype(np.float32), - 1 - factor, 0) - contrasted_img = np.clip(contrasted_img, 0, 255) - return contrasted_img.astype(img.dtype) - - -def auto_contrast(img, cutoff=0): - """Auto adjust image contrast. - - This function maximize (normalize) image contrast by first removing cutoff - percent of the lightest and darkest pixels from the histogram and remapping - the image so that the darkest pixel becomes black (0), and the lightest - becomes white (255). - - Args: - img (ndarray): Image to be contrasted. BGR order. - cutoff (int | float | tuple): The cutoff percent of the lightest and - darkest pixels to be removed. If given as tuple, it shall be - (low, high). Otherwise, the single value will be used for both. - Defaults to 0. - - Returns: - ndarray: The contrasted image. - """ - - def _auto_contrast_channel(im, c, cutoff): - im = im[:, :, c] - # Compute the histogram of the image channel. - histo = np.histogram(im, 256, (0, 255))[0] - # Remove cut-off percent pixels from histo - histo_sum = np.cumsum(histo) - cut_low = histo_sum[-1] * cutoff[0] // 100 - cut_high = histo_sum[-1] - histo_sum[-1] * cutoff[1] // 100 - histo_sum = np.clip(histo_sum, cut_low, cut_high) - cut_low - histo = np.concatenate([[histo_sum[0]], np.diff(histo_sum)], 0) - - # Compute mapping - low, high = np.nonzero(histo)[0][0], np.nonzero(histo)[0][-1] - # If all the values have been cut off, return the origin img - if low >= high: - return im - scale = 255.0 / (high - low) - offset = -low * scale - lut = np.array(range(256)) - lut = lut * scale + offset - lut = np.clip(lut, 0, 255) - return lut[im] - - if isinstance(cutoff, (int, float)): - cutoff = (cutoff, cutoff) - else: - assert isinstance(cutoff, tuple), 'cutoff must be of type int, ' \ - f'float or tuple, but got {type(cutoff)} instead.' - # Auto adjusts contrast for each channel independently and then stacks - # the result. - s1 = _auto_contrast_channel(img, 0, cutoff) - s2 = _auto_contrast_channel(img, 1, cutoff) - s3 = _auto_contrast_channel(img, 2, cutoff) - contrasted_img = np.stack([s1, s2, s3], axis=-1) - return contrasted_img.astype(img.dtype) - - -def adjust_sharpness(img, factor=1., kernel=None): - """Adjust image sharpness. - - This function controls the sharpness of an image. An - enhancement factor of 0.0 gives a blurred image. A - factor of 1.0 gives the original image. And a factor - of 2.0 gives a sharpened image. It blends the source - image and the degenerated mean image: - - .. math:: - output = img * factor + degenerated * (1 - factor) - - Args: - img (ndarray): Image to be sharpened. BGR order. - factor (float): Same as :func:`mmcv.adjust_brightness`. - kernel (np.ndarray, optional): Filter kernel to be applied on the img - to obtain the degenerated img. Defaults to None. - - Note: - No value sanity check is enforced on the kernel set by users. So with - an inappropriate kernel, the ``adjust_sharpness`` may fail to perform - the function its name indicates but end up performing whatever - transform determined by the kernel. - - Returns: - ndarray: The sharpened image. - """ - - if kernel is None: - # adopted from PIL.ImageFilter.SMOOTH - kernel = np.array([[1., 1., 1.], [1., 5., 1.], [1., 1., 1.]]) / 13 - assert isinstance(kernel, np.ndarray), \ - f'kernel must be of type np.ndarray, but got {type(kernel)} instead.' - assert kernel.ndim == 2, \ - f'kernel must have a dimension of 2, but got {kernel.ndim} instead.' - - degenerated = cv2.filter2D(img, -1, kernel) - sharpened_img = cv2.addWeighted( - img.astype(np.float32), factor, degenerated.astype(np.float32), - 1 - factor, 0) - sharpened_img = np.clip(sharpened_img, 0, 255) - return sharpened_img.astype(img.dtype) - - -def adjust_lighting(img, eigval, eigvec, alphastd=0.1, to_rgb=True): - """AlexNet-style PCA jitter. - - This data augmentation is proposed in `ImageNet Classification with Deep - Convolutional Neural Networks - `_. - - Args: - img (ndarray): Image to be adjusted lighting. BGR order. - eigval (ndarray): the eigenvalue of the convariance matrix of pixel - values, respectively. - eigvec (ndarray): the eigenvector of the convariance matrix of pixel - values, respectively. - alphastd (float): The standard deviation for distribution of alpha. - Defaults to 0.1 - to_rgb (bool): Whether to convert img to rgb. - - Returns: - ndarray: The adjusted image. - """ - assert isinstance(eigval, np.ndarray) and isinstance(eigvec, np.ndarray), \ - f'eigval and eigvec should both be of type np.ndarray, got ' \ - f'{type(eigval)} and {type(eigvec)} instead.' - - assert eigval.ndim == 1 and eigvec.ndim == 2 - assert eigvec.shape == (3, eigval.shape[0]) - n_eigval = eigval.shape[0] - assert isinstance(alphastd, float), 'alphastd should be of type float, ' \ - f'got {type(alphastd)} instead.' - - img = img.copy().astype(np.float32) - if to_rgb: - cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace - - alpha = np.random.normal(0, alphastd, n_eigval) - alter = eigvec \ - * np.broadcast_to(alpha.reshape(1, n_eigval), (3, n_eigval)) \ - * np.broadcast_to(eigval.reshape(1, n_eigval), (3, n_eigval)) - alter = np.broadcast_to(alter.sum(axis=1).reshape(1, 1, 3), img.shape) - img_adjusted = img + alter - return img_adjusted - - -def lut_transform(img, lut_table): - """Transform array by look-up table. - - The function lut_transform fills the output array with values from the - look-up table. Indices of the entries are taken from the input array. - - Args: - img (ndarray): Image to be transformed. - lut_table (ndarray): look-up table of 256 elements; in case of - multi-channel input array, the table should either have a single - channel (in this case the same table is used for all channels) or - the same number of channels as in the input array. - - Returns: - ndarray: The transformed image. - """ - assert isinstance(img, np.ndarray) - assert 0 <= np.min(img) and np.max(img) <= 255 - assert isinstance(lut_table, np.ndarray) - assert lut_table.shape == (256, ) - - return cv2.LUT(np.array(img, dtype=np.uint8), lut_table) - - -def clahe(img, clip_limit=40.0, tile_grid_size=(8, 8)): - """Use CLAHE method to process the image. - - See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J]. - Graphics Gems, 1994:474-485.` for more information. - - Args: - img (ndarray): Image to be processed. - clip_limit (float): Threshold for contrast limiting. Default: 40.0. - tile_grid_size (tuple[int]): Size of grid for histogram equalization. - Input image will be divided into equally sized rectangular tiles. - It defines the number of tiles in row and column. Default: (8, 8). - - Returns: - ndarray: The processed image. - """ - assert isinstance(img, np.ndarray) - assert img.ndim == 2 - assert isinstance(clip_limit, (float, int)) - assert is_tuple_of(tile_grid_size, int) - assert len(tile_grid_size) == 2 - - clahe = cv2.createCLAHE(clip_limit, tile_grid_size) - return clahe.apply(np.array(img, dtype=np.uint8)) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/utils/se_layer.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/utils/se_layer.py deleted file mode 100644 index e4602417494ec4cd5c7ddb8dbaefaa277808042f..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/utils/se_layer.py +++ /dev/null @@ -1,69 +0,0 @@ -''' - * Copyright (c) 2023 Salesforce, Inc. - * All rights reserved. - * SPDX-License-Identifier: Apache License 2.0 - * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/ - * By Can Qin - * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet - * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala - * Modified from MMCV repo: From https://github.com/open-mmlab/mmcv - * Copyright (c) OpenMMLab. All rights reserved. -''' - -import annotator.uniformer.mmcv as mmcv -import torch.nn as nn -from annotator.uniformer.mmcv.cnn import ConvModule - -from .make_divisible import make_divisible - - -class SELayer(nn.Module): - """Squeeze-and-Excitation Module. - - Args: - channels (int): The input (and output) channels of the SE layer. - ratio (int): Squeeze ratio in SELayer, the intermediate channel will be - ``int(channels/ratio)``. Default: 16. - conv_cfg (None or dict): Config dict for convolution layer. - Default: None, which means using conv2d. - act_cfg (dict or Sequence[dict]): Config dict for activation layer. - If act_cfg is a dict, two activation layers will be configured - by this dict. If act_cfg is a sequence of dicts, the first - activation layer will be configured by the first dict and the - second activation layer will be configured by the second dict. - Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0, - divisor=6.0)). - """ - - def __init__(self, - channels, - ratio=16, - conv_cfg=None, - act_cfg=(dict(type='ReLU'), - dict(type='HSigmoid', bias=3.0, divisor=6.0))): - super(SELayer, self).__init__() - if isinstance(act_cfg, dict): - act_cfg = (act_cfg, act_cfg) - assert len(act_cfg) == 2 - assert mmcv.is_tuple_of(act_cfg, dict) - self.global_avgpool = nn.AdaptiveAvgPool2d(1) - self.conv1 = ConvModule( - in_channels=channels, - out_channels=make_divisible(channels // ratio, 8), - kernel_size=1, - stride=1, - conv_cfg=conv_cfg, - act_cfg=act_cfg[0]) - self.conv2 = ConvModule( - in_channels=make_divisible(channels // ratio, 8), - out_channels=channels, - kernel_size=1, - stride=1, - conv_cfg=conv_cfg, - act_cfg=act_cfg[1]) - - def forward(self, x): - out = self.global_avgpool(x) - out = self.conv1(out) - out = self.conv2(out) - return x * out diff --git a/spaces/akdeniz27/LLaMa-2-70b-chat-hf-with-EasyLLM/README.md b/spaces/akdeniz27/LLaMa-2-70b-chat-hf-with-EasyLLM/README.md deleted file mode 100644 index d392490a907c53bbf64c5ac01af1d1ad7741c10e..0000000000000000000000000000000000000000 --- a/spaces/akdeniz27/LLaMa-2-70b-chat-hf-with-EasyLLM/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: LLaMa 2 70b Chat Hf With EasyLLM -emoji: 🏆 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/akiraaaaaa/Waifu-Reina/README.md b/spaces/akiraaaaaa/Waifu-Reina/README.md deleted file mode 100644 index 60a8823cdbcc5ddebc50a7e33fcf122a057ed498..0000000000000000000000000000000000000000 --- a/spaces/akiraaaaaa/Waifu-Reina/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Hololive Rvc Models -emoji: 🎤🌸▶️ -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: megaaziib/hololive-rvc-models ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/requests/__init__.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/requests/__init__.py deleted file mode 100644 index 75a633bf9dc81ebb94775cb810d91d2b3cf48190..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/requests/__init__.py +++ /dev/null @@ -1,154 +0,0 @@ -# -*- coding: utf-8 -*- - -# __ -# /__) _ _ _ _ _/ _ -# / ( (- (/ (/ (- _) / _) -# / - -""" -Requests HTTP Library -~~~~~~~~~~~~~~~~~~~~~ - -Requests is an HTTP library, written in Python, for human beings. -Basic GET usage: - - >>> import requests - >>> r = requests.get('https://www.python.org') - >>> r.status_code - 200 - >>> b'Python is a programming language' in r.content - True - -... or POST: - - >>> payload = dict(key1='value1', key2='value2') - >>> r = requests.post('https://httpbin.org/post', data=payload) - >>> print(r.text) - { - ... - "form": { - "key1": "value1", - "key2": "value2" - }, - ... - } - -The other HTTP methods are supported - see `requests.api`. Full documentation -is at . - -:copyright: (c) 2017 by Kenneth Reitz. -:license: Apache 2.0, see LICENSE for more details. -""" - -from pip._vendor import urllib3 -import warnings -from .exceptions import RequestsDependencyWarning - -charset_normalizer_version = None - -try: - from pip._vendor.chardet import __version__ as chardet_version -except ImportError: - chardet_version = None - -def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version): - urllib3_version = urllib3_version.split('.') - assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git. - - # Sometimes, urllib3 only reports its version as 16.1. - if len(urllib3_version) == 2: - urllib3_version.append('0') - - # Check urllib3 for compatibility. - major, minor, patch = urllib3_version # noqa: F811 - major, minor, patch = int(major), int(minor), int(patch) - # urllib3 >= 1.21.1, <= 1.26 - assert major == 1 - assert minor >= 21 - assert minor <= 26 - - # Check charset_normalizer for compatibility. - if chardet_version: - major, minor, patch = chardet_version.split('.')[:3] - major, minor, patch = int(major), int(minor), int(patch) - # chardet_version >= 3.0.2, < 5.0.0 - assert (3, 0, 2) <= (major, minor, patch) < (5, 0, 0) - elif charset_normalizer_version: - major, minor, patch = charset_normalizer_version.split('.')[:3] - major, minor, patch = int(major), int(minor), int(patch) - # charset_normalizer >= 2.0.0 < 3.0.0 - assert (2, 0, 0) <= (major, minor, patch) < (3, 0, 0) - else: - raise Exception("You need either charset_normalizer or chardet installed") - -def _check_cryptography(cryptography_version): - # cryptography < 1.3.4 - try: - cryptography_version = list(map(int, cryptography_version.split('.'))) - except ValueError: - return - - if cryptography_version < [1, 3, 4]: - warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version) - warnings.warn(warning, RequestsDependencyWarning) - -# Check imported dependencies for compatibility. -try: - check_compatibility(urllib3.__version__, chardet_version, charset_normalizer_version) -except (AssertionError, ValueError): - warnings.warn("urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported " - "version!".format(urllib3.__version__, chardet_version, charset_normalizer_version), - RequestsDependencyWarning) - -# Attempt to enable urllib3's fallback for SNI support -# if the standard library doesn't support SNI or the -# 'ssl' library isn't available. -try: - # Note: This logic prevents upgrading cryptography on Windows, if imported - # as part of pip. - from pip._internal.utils.compat import WINDOWS - if not WINDOWS: - raise ImportError("pip internals: don't import cryptography on Windows") - try: - import ssl - except ImportError: - ssl = None - - if not getattr(ssl, "HAS_SNI", False): - from pip._vendor.urllib3.contrib import pyopenssl - pyopenssl.inject_into_urllib3() - - # Check cryptography version - from cryptography import __version__ as cryptography_version - _check_cryptography(cryptography_version) -except ImportError: - pass - -# urllib3's DependencyWarnings should be silenced. -from pip._vendor.urllib3.exceptions import DependencyWarning -warnings.simplefilter('ignore', DependencyWarning) - -from .__version__ import __title__, __description__, __url__, __version__ -from .__version__ import __build__, __author__, __author_email__, __license__ -from .__version__ import __copyright__, __cake__ - -from . import utils -from . import packages -from .models import Request, Response, PreparedRequest -from .api import request, get, head, post, patch, put, delete, options -from .sessions import session, Session -from .status_codes import codes -from .exceptions import ( - RequestException, Timeout, URLRequired, - TooManyRedirects, HTTPError, ConnectionError, - FileModeWarning, ConnectTimeout, ReadTimeout, JSONDecodeError -) - -# Set default logging handler to avoid "No handler found" warnings. -import logging -from logging import NullHandler - -logging.getLogger(__name__).addHandler(NullHandler()) - -# FileModeWarnings go off per the default. -warnings.simplefilter('default', FileModeWarning, append=True) diff --git a/spaces/aliabd/SummerTime/__init__.py b/spaces/aliabd/SummerTime/__init__.py deleted file mode 100644 index 8a4686f47d52f87f9561f7a9182a2e91d8cb1e0d..0000000000000000000000000000000000000000 --- a/spaces/aliabd/SummerTime/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -import SummerTime.model -import SummerTime.dataset.st_dataset as data -import SummerTime.evaluation diff --git a/spaces/alibaba-pai/easyphoto/app.py b/spaces/alibaba-pai/easyphoto/app.py deleted file mode 100644 index 5ca53068f57eeb05fe8fcb459386fbb707225200..0000000000000000000000000000000000000000 --- a/spaces/alibaba-pai/easyphoto/app.py +++ /dev/null @@ -1,346 +0,0 @@ -import os -import glob -import gradio as gr -import base64 -import cv2 -import numpy as np -import oss2 -import time - -from ai_service_python_sdk.client.api.ai_service_aigc_images_api import AIGCImagesApi -from ai_service_python_sdk.client.api.ai_service_job_api import AiServiceJobApi -from ai_service_python_sdk.client.api_client import ApiClient -from ai_service_python_sdk.test import appId, host, token - - -host = os.getenv("PAI_REC_HOST") -appId = os.getenv("PAI_REC_APP_ID") -token = os.getenv("PAI_REC_TOKEN") -access_key_id = os.getenv('OSS_ACCESS_KEY_ID') -access_key_secret = os.getenv('OSS_ACCESS_KEY_SECRET') -bucket_name = os.getenv('OSS_BUCKET') -endpoint = os.getenv('OSS_ENDPOINT') - - -def upload_file(files, current_files): - file_paths = [file_d['name'] for file_d in current_files] + [file.name for file in files] - return file_paths - - -def decode_image_from_base64jpeg(base64_image): - image_bytes = base64.b64decode(base64_image) - np_arr = np.frombuffer(image_bytes, np.uint8) - image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - return image - - -def upload(image_path, number): - bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name) - file_name = image_path.split('/')[-1] - ext = file_name.split('.')[-1] - file_name = str(number) + '.' + ext - timestamp = str(time.time()).split('.')[0] - bucket_folder = 'aigc-data/easyphoto_demo_data/' + timestamp + '_user_lora/' - oss_file_path = bucket_folder + file_name - bucket.put_object_from_file(oss_file_path, image_path) - file_url = 'https://' + bucket_name + '.' + endpoint + '/' + bucket_folder + file_name - return file_url - - -def upload_template(image_path): - bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name) - file_name = image_path.split('/')[-1] - timestamp = str(time.time()).split('.')[0] - bucket_folder = 'aigc-data/easyphoto_demo_data/' + timestamp + '_user_template/' - oss_file_path = bucket_folder + file_name - bucket.put_object_from_file(oss_file_path, image_path) - file_url = 'https://' + bucket_name + '.' + endpoint + '/' + bucket_folder + file_name - return file_url - - -def easyphoto_train(instance_images): - images = [] - if instance_images is None or len(instance_images)==0: - output = 'Status: no image updated!' - return output, [], [] - for number, image in enumerate(instance_images): - image_path = image['name'] - image_url = upload(image_path, number) - images.append(image_url) - client = ApiClient(host, appId, token) - api = AIGCImagesApi(client) - response = api.aigc_images_train(images, 'photog_train_freetier', None) - message = response.message - model_id = response.data['model_id'] - job_id = response.data['job_id'] - if message == 'success': - state = 'training job submitted.' - output = 'Status: ' + state - print("job id: " + str(job_id)) - print("model id: " + str(model_id)) - return output, job_id, model_id - else: - output = 'Status: submitting training job failed!' - return output, [], [] - - -def easyphoto_check(job_id): - client = ApiClient(host, appId, token) - api = AiServiceJobApi(client) - if job_id is None: - output = 'Status: checking training status failed! No job id.' - else: - try: - job_id = int(str(job_id).strip()) - response = api.get_async_job_with_id(job_id) - message = response.data['job']['message'] - output = 'Status: ' + message - except: - output = 'Status: checking training status failed!' - return output - - -def easyphoto_infer(model_id, selected_template_images, additional_prompt, seed, before_face_fusion_ratio, after_face_fusion_ratio, first_diffusion_steps, first_denoising_strength, second_diffusion_steps, second_denoising_strength, crop_face_preprocess, apply_face_fusion_before, apply_face_fusion_after, color_shift_middle, color_shift_last, background_restore): - image_urls = [] - if len(selected_template_images) == 0: - output_info = 'Status: no templete selected!' - return output_info, [] - selected_template_images = eval(selected_template_images) - for image in selected_template_images: - image_url = upload_template(image) - image_urls.append(image_url) - - client = ApiClient(host, appId, token) - api = AIGCImagesApi(client) - outputs = [] - output_info = None - - if model_id is None: - output_info = 'Status: no model id provided!' - return output_info, [] - - model_id = str(model_id).strip() - print('model id: ' + model_id) - - if job_id is None: - output_info = 'Status: no job id provided, please do model training first!' - return output_info, [] - - job_id = str(job_id).strip() - print('job id: ' + job_id) - - check_client = ApiClient(host, appId, token) - check_api = AiServiceJobApi(check_client) - try: - job_id = int(str(job_id).strip()) - response = check_api.get_async_job_with_id(job_id) - message = response.data['job']['message'] - if not message == 'success': - output = 'Status: model training incomplete!' - return output, [] - except: - output = 'Status: checking training status failed, please do model training first!' - return output, [] - - for image_url in image_urls: - try: - params = { - "additional_prompt": additional_prompt, - "seed": seed, - "before_face_fusion_ratio": before_face_fusion_ratio, - "after_face_fusion_ratio": after_face_fusion_ratio, - "first_diffusion_steps": first_diffusion_steps, - "first_denoising_strength": first_denoising_strength, - "second_diffusion_steps": second_diffusion_steps, - "second_denoising_strength": second_denoising_strength, - "crop_face_preprocess": crop_face_preprocess, - "apply_face_fusion_before": apply_face_fusion_before, - "apply_face_fusion_after": apply_face_fusion_after, - "color_shift_middle": color_shift_middle, - "color_shift_last": color_shift_last, - "background_restore": background_restore - } - response = api.aigc_images_create(model_id, image_url, 'photog_infer_freetier', params) - except: - output_info = 'Status: calling eas service failed!' - return output_info, [] - - data = response.data - message = response.message - if message == 'success': - image = data['image'] - image = decode_image_from_base64jpeg(image) - outputs.append(image) - output_info = 'Status: generating image succesfully!' - else: - output_info = 'Status: generating image failed!' - return output_info, [] - return output_info, outputs - - -with gr.Blocks() as easyphoto_demo: - model_id = gr.Textbox(visible=False) - job_id = gr.Textbox(visible=False) - with gr.TabItem('Training'): - with gr.Blocks(): - with gr.Row(): - with gr.Column(): - instance_images = gr.Gallery().style(columns=[4], rows=[2], object_fit="contain", height="auto") - with gr.Row(): - upload_button = gr.UploadButton( - "Upload Photos", file_types=["image"], file_count="multiple" - ) - clear_button = gr.Button("Clear Photos") - clear_button.click(fn=lambda: [], inputs=None, outputs=instance_images) - upload_button.upload(upload_file, inputs=[upload_button, instance_images], outputs=instance_images, queue=False) - gr.Markdown( - ''' - Training steps: - 1. Please upload 5-20 half-body photos or head and shoulder photos, ensuring that the facial proportions are not too small. - 2. Click the training button below to submit the training task. It will take approximately 15 minutes, and you can check the status of your training task. Please refrain from clicking the submit training task button multiple times! - 3. Once the model training is completed, the task status will display success. Switch to inference mode and generate photos based on the template. - 4. If you experience lag during uploading, please resize the uploaded images to a size below 1.5MB if possible. - 5. During the training or inference process, please do not refresh or close the window. - ''' - ) - - with gr.Row(): - run_button = gr.Button('Submit My Training Job') - check_button = gr.Button('Check My Training Job Status') - output_message = gr.Textbox(value="", label="Status", interactive=False) - - run_button.click(fn=easyphoto_train, - inputs=[instance_images], - outputs=[output_message, job_id, model_id]) - - check_button.click(fn=easyphoto_check, - inputs=[job_id], - outputs=[output_message]) - - - with gr.TabItem('Inference'): - templates = glob.glob(r'./*.jpg') - preset_template = list(templates) - - with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - template_gallery_list = [(i, i) for i in preset_template] - gallery = gr.Gallery(template_gallery_list).style(columns=[4], rows=[2], object_fit="contain", height="auto") - - def select_function(evt: gr.SelectData): - return [preset_template[evt.index]] - - selected_template_images = gr.Text(show_label=False, visible=False, placeholder="Selected") - gallery.select(select_function, None, selected_template_images) - - with gr.Accordion("Advanced Options", open=False): - additional_prompt = gr.Textbox( - label="Additional Prompt", - lines=3, - value='masterpiece, beauty', - interactive=True - ) - seed = gr.Textbox( - label="Seed", - value=12345, - ) - with gr.Row(): - before_face_fusion_ratio = gr.Slider( - minimum=0.2, maximum=0.8, value=0.50, - step=0.05, label='Face Fusion Ratio Before' - ) - after_face_fusion_ratio = gr.Slider( - minimum=0.2, maximum=0.8, value=0.50, - step=0.05, label='Face Fusion Ratio After' - ) - - with gr.Row(): - first_diffusion_steps = gr.Slider( - minimum=15, maximum=50, value=50, - step=1, label='First Diffusion steps' - ) - first_denoising_strength = gr.Slider( - minimum=0.30, maximum=0.60, value=0.45, - step=0.05, label='First Diffusion denoising strength' - ) - with gr.Row(): - second_diffusion_steps = gr.Slider( - minimum=15, maximum=50, value=20, - step=1, label='Second Diffusion steps' - ) - second_denoising_strength = gr.Slider( - minimum=0.20, maximum=0.40, value=0.30, - step=0.05, label='Second Diffusion denoising strength' - ) - with gr.Row(): - crop_face_preprocess = gr.Checkbox( - label="Crop Face Preprocess", - value=True - ) - apply_face_fusion_before = gr.Checkbox( - label="Apply Face Fusion Before", - value=True - ) - apply_face_fusion_after = gr.Checkbox( - label="Apply Face Fusion After", - value=True - ) - with gr.Row(): - color_shift_middle = gr.Checkbox( - label="Apply color shift first", - value=True - ) - color_shift_last = gr.Checkbox( - label="Apply color shift last", - value=True - ) - background_restore = gr.Checkbox( - label="Background Restore", - value=False - ) - - with gr.Box(): - gr.Markdown( - ''' - Parameters: - 1. **Face Fusion Ratio Before** represents the proportion of the first facial fusion, which is higher and more similar to the training object. - 2. **Face Fusion Ratio After** represents the proportion of the second facial fusion, which is higher and more similar to the training object. - 3. **Crop Face Preprocess** represents whether to crop the image before generation, which can adapt to images with smaller faces. - 4. **Apply Face Fusion Before** represents whether to perform the first facial fusion. - 5. **Apply Face Fusion After** represents whether to perform the second facial fusion. - ''' - ) - - with gr.Column(): - gr.Markdown('Generated Results') - output_images = gr.Gallery( - label='Output', - show_label=False - ).style(columns=[4], rows=[2], object_fit="contain", height="auto") - display_button = gr.Button('Start Generation') - infer_progress = gr.Textbox( - label="Generation Progress", - value="", - interactive=False - ) - - display_button.click( - fn=easyphoto_infer, - inputs=[model_id, selected_template_images, additional_prompt, seed, before_face_fusion_ratio, after_face_fusion_ratio, first_diffusion_steps, first_denoising_strength, second_diffusion_steps, second_denoising_strength, crop_face_preprocess, apply_face_fusion_before, apply_face_fusion_after, color_shift_middle, color_shift_last, background_restore], - outputs=[infer_progress, output_images] - ) - - gr.Markdown( - """ - Useful Links - - EasyPhoto GitHub: https://github.com/aigc-apps/sd-webui-EasyPhoto - - Alibaba Cloud Freetier: https://help.aliyun.com/document_detail/2567864.html - - PAI-DSW Gallery: https://gallery.pai-ml.com/#/preview/deepLearning/cv/stable_diffusion_easyphoto - """) - -easyphoto_demo.launch(share=False).queue() \ No newline at end of file diff --git a/spaces/amish1729/LFUNet/utils/face_detection.py b/spaces/amish1729/LFUNet/utils/face_detection.py deleted file mode 100644 index 41897081b9fe40d886117e432cc542b1b2215ae4..0000000000000000000000000000000000000000 --- a/spaces/amish1729/LFUNet/utils/face_detection.py +++ /dev/null @@ -1,111 +0,0 @@ -"""Functions for face detection""" -from math import pi -from typing import Tuple, Optional, Dict - -import tensorflow as tf -import matplotlib.patches as patches -import matplotlib.pyplot as plt -from PIL import Image -from mtcnn import MTCNN -from trianglesolver import solve - -from utils import image_to_array - - -def compute_slacks(height, width, hyp_ratio) -> Tuple[float, float]: - """Compute slacks to add to bounding box on each site""" - - # compute angle and side for hypotenuse - _, b, _, A, _, _ = solve(c=width, a=height, B=pi / 2) - - # compute new height and width - a, _, c, _, _, _ = solve(b=b * (1.0 + hyp_ratio), B=pi / 2, A=A) - - # compute slacks - return c - width, a - height - - -def get_face_keypoints_detecting_function(minimal_confidence: float = 0.8): - """Create function for face keypoints detection""" - - # face detector - detector = MTCNN() - - # detect faces and their keypoints - def get_keypoints(image: Image) -> Optional[Dict]: - - # run inference to detect faces (on CPU only) - with tf.device("/cpu:0"): - detection = detector.detect_faces(image_to_array(image)) - - # run detection and keep results with certain confidence only - results = [item for item in detection if item['confidence'] > minimal_confidence] - - # nothing found - if len(results) == 0: - return None - - # return result with highest confidence and size - return max(results, key=lambda item: item['confidence'] * item['box'][2] * item['box'][3]) - - # return function - return get_keypoints - - -def plot_face_detection(image: Image, ax, face_keypoints: Optional, hyp_ratio: float = 1 / 3): - """Plot faces with keypoints and bounding boxes""" - - # make annotations - if face_keypoints is not None: - - # get bounding box - x, y, width, height = face_keypoints['box'] - - # add rectangle patch for detected face - rectangle = patches.Rectangle((x, y), width, height, linewidth=1, edgecolor='r', facecolor='none') - ax.add_patch(rectangle) - - # add rectangle patch with slacks - w_s, h_s = compute_slacks(height, width, hyp_ratio) - rectangle = patches.Rectangle((x - w_s, y - h_s), width + 2 * w_s, height + 2 * h_s, linewidth=1, edgecolor='r', - facecolor='none') - ax.add_patch(rectangle) - - # add keypoints - for coordinates in face_keypoints['keypoints'].values(): - circle = plt.Circle(coordinates, 3, color='r') - ax.add_artist(circle) - - # add image - ax.imshow(image) - - -def get_crop_points(image: Image, face_keypoints: Optional, hyp_ratio: float = 1 / 3) -> Image: - """Find position where to crop face from image""" - if face_keypoints is None: - return 0, 0, image.width, image.height - - # get bounding box - x, y, width, height = face_keypoints['box'] - - # compute slacks - w_s, h_s = compute_slacks(height, width, hyp_ratio) - - # compute coordinates - left = min(max(0, x - w_s), image.width) - upper = min(max(0, y - h_s), image.height) - right = min(x + width + w_s, image.width) - lower = min(y + height + h_s, image.height) - - return left, upper, right, lower - - -def crop_face(image: Image, face_keypoints: Optional, hyp_ratio: float = 1 / 3) -> Image: - """Crop input image to just the face""" - if face_keypoints is None: - print("No keypoints detected on image") - return image - - left, upper, right, lower = get_crop_points(image, face_keypoints, hyp_ratio) - - return image.crop((left, upper, right, lower)) diff --git a/spaces/amsterdamNLP/contrastive-pairs/description.md b/spaces/amsterdamNLP/contrastive-pairs/description.md deleted file mode 100644 index ec7838ce84790e8e72f2f6d890f1caeb1dd0ae62..0000000000000000000000000000000000000000 --- a/spaces/amsterdamNLP/contrastive-pairs/description.md +++ /dev/null @@ -1,7 +0,0 @@ -# Detecting stereotypes in the GPT-2 language model using CrowS-Pairs - -*GPT-2* is a language model that can score how likely it is that some text is a valid English sentence: not only grammaticality, but also the 'meaning' of the sentence is part of this score. *CrowS-Pairs* is a dataset with pairs of more and less stereotypical examples for different social groups (e.g., gender and nationality stereotypes). - -Below, you can select a CrowS-Pairs bias type from the drop-down menu, and click `Sample` to sample 10 random pairs from CrowS-Pairs. Alternatively, type your own pair of sentences. The demo shows for each pair of sentences which one receives the higher score ('is more likely'). - -If a language model systematically prefers more stereotypical examples, this is taken as evidence that has learnt these stereotypes from the training data and shows undesirable bias. diff --git a/spaces/anzoutian/White-box-Cartoonization/app.py b/spaces/anzoutian/White-box-Cartoonization/app.py deleted file mode 100644 index c55ced56bd87a85f59d1c8ef84b7eca87422720f..0000000000000000000000000000000000000000 --- a/spaces/anzoutian/White-box-Cartoonization/app.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations -import argparse -import functools -import os -import pathlib -import sys -from typing import Callable -import uuid - -import gradio as gr -import huggingface_hub -import numpy as np -import PIL.Image - -from io import BytesIO -from wbc.cartoonize import Cartoonize - -ORIGINAL_REPO_URL = 'https://github.com/SystemErrorWang/White-box-Cartoonization' -TITLE = 'SystemErrorWang/White-box-Cartoonization' -DESCRIPTION = f"""This is a demo for {ORIGINAL_REPO_URL}. - -""" -ARTICLE = """ - -""" - -SAFEHASH = [x for x in "0123456789-abcdefghijklmnopqrstuvwxyz_ABCDEFGHIJKLMNOPQRSTUVWXYZ"] -def compress_UUID(): - ''' - 根据http://www.ietf.org/rfc/rfc1738.txt,由uuid编码扩bai大字符域生成du串 - 包括:[0-9a-zA-Z\-_]共64个 - 长度:(32-2)/3*2=20 - 备注:可在地球上人zhi人都用,使用100年不重复(2^120) - :return:String - ''' - row = str(uuid.uuid4()).replace('-', '') - safe_code = '' - for i in range(10): - enbin = "%012d" % int(bin(int(row[i * 3] + row[i * 3 + 1] + row[i * 3 + 2], 16))[2:], 10) - safe_code += (SAFEHASH[int(enbin[0:6], 2)] + SAFEHASH[int(enbin[6:12], 2)]) - safe_code = safe_code.replace('-', '') - return safe_code - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cpu') - parser.add_argument('--theme', type=str) - parser.add_argument('--live', action='store_true') - parser.add_argument('--share', action='store_true') - parser.add_argument('--port', type=int) - parser.add_argument('--disable-queue', - dest='enable_queue', - action='store_false') - parser.add_argument('--allow-flagging', type=str, default='never') - parser.add_argument('--allow-screenshot', action='store_true') - return parser.parse_args() - -def run( - image, - cartoonize : Cartoonize -) -> tuple[PIL.Image.Image]: - - out_path = compress_UUID()+'.png' - cartoonize.run_sigle(image.name, out_path) - - return PIL.Image.open(out_path) - - -def main(): - gr.close_all() - - args = parse_args() - - cartoonize = Cartoonize(os.path.join(os.path.dirname(os.path.abspath(__file__)),'wbc/saved_models/')) - - func = functools.partial(run, cartoonize=cartoonize) - func = functools.update_wrapper(func, run) - - gr.Interface( - func, - [ - gr.inputs.Image(type='file', label='Input Image'), - ], - [ - gr.outputs.Image( - type='pil', - label='Result'), - ], - # examples=examples, - theme=args.theme, - title=TITLE, - description=DESCRIPTION, - article=ARTICLE, - allow_screenshot=args.allow_screenshot, - allow_flagging=args.allow_flagging, - live=args.live, - ).launch( - enable_queue=args.enable_queue, - server_port=args.port, - share=args.share, - ) - - -if __name__ == '__main__': - main() diff --git a/spaces/aodianyun/stable-diffusion-webui/extensions-builtin/Lora/scripts/lora_script.py b/spaces/aodianyun/stable-diffusion-webui/extensions-builtin/Lora/scripts/lora_script.py deleted file mode 100644 index 29ec16018858f4210f00c83a6e18c0cb7adb5e40..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/stable-diffusion-webui/extensions-builtin/Lora/scripts/lora_script.py +++ /dev/null @@ -1,38 +0,0 @@ -import torch -import gradio as gr - -import lora -import extra_networks_lora -import ui_extra_networks_lora -from modules import script_callbacks, ui_extra_networks, extra_networks, shared - - -def unload(): - torch.nn.Linear.forward = torch.nn.Linear_forward_before_lora - torch.nn.Conv2d.forward = torch.nn.Conv2d_forward_before_lora - - -def before_ui(): - ui_extra_networks.register_page(ui_extra_networks_lora.ExtraNetworksPageLora()) - extra_networks.register_extra_network(extra_networks_lora.ExtraNetworkLora()) - - -if not hasattr(torch.nn, 'Linear_forward_before_lora'): - torch.nn.Linear_forward_before_lora = torch.nn.Linear.forward - -if not hasattr(torch.nn, 'Conv2d_forward_before_lora'): - torch.nn.Conv2d_forward_before_lora = torch.nn.Conv2d.forward - -torch.nn.Linear.forward = lora.lora_Linear_forward -torch.nn.Conv2d.forward = lora.lora_Conv2d_forward - -script_callbacks.on_model_loaded(lora.assign_lora_names_to_compvis_modules) -script_callbacks.on_script_unloaded(unload) -script_callbacks.on_before_ui(before_ui) - - -shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), { - "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": [""] + [x for x in lora.available_loras]}, refresh=lora.list_available_loras), - "lora_apply_to_outputs": shared.OptionInfo(False, "Apply Lora to outputs rather than inputs when possible (experimental)"), - -})) diff --git a/spaces/arnavkartikeya/SCRIPture-final/predict.py b/spaces/arnavkartikeya/SCRIPture-final/predict.py deleted file mode 100644 index 35426cadcbb3bf8c3d8cb9c910511c154e451f4e..0000000000000000000000000000000000000000 --- a/spaces/arnavkartikeya/SCRIPture-final/predict.py +++ /dev/null @@ -1,98 +0,0 @@ -""" -Download the weights in ./checkpoints beforehand for fast inference -wget https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_base_caption.pth -wget https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_vqa.pth -wget https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth -""" - -from pathlib import Path - -from PIL import Image -import torch -from torchvision import transforms -from torchvision.transforms.functional import InterpolationMode -import cog - -from models.blip import blip_decoder -from models.blip_vqa import blip_vqa -from models.blip_itm import blip_itm - - -class Predictor(cog.Predictor): - def setup(self): - self.device = "cuda:0" - - self.models = { - 'image_captioning': blip_decoder(pretrained='checkpoints/model*_base_caption.pth', - image_size=384, vit='base'), - 'visual_question_answering': blip_vqa(pretrained='checkpoints/model*_vqa.pth', - image_size=480, vit='base'), - 'image_text_matching': blip_itm(pretrained='checkpoints/model_base_retrieval_coco.pth', - image_size=384, vit='base') - } - - @cog.input( - "image", - type=Path, - help="input image", - ) - @cog.input( - "task", - type=str, - default='image_captioning', - options=['image_captioning', 'visual_question_answering', 'image_text_matching'], - help="Choose a task.", - ) - @cog.input( - "question", - type=str, - default=None, - help="Type question for the input image for visual question answering task.", - ) - @cog.input( - "caption", - type=str, - default=None, - help="Type caption for the input image for image text matching task.", - ) - def predict(self, image, task, question, caption): - if task == 'visual_question_answering': - assert question is not None, 'Please type a question for visual question answering task.' - if task == 'image_text_matching': - assert caption is not None, 'Please type a caption for mage text matching task.' - - im = load_image(image, image_size=480 if task == 'visual_question_answering' else 384, device=self.device) - model = self.models[task] - model.eval() - model = model.to(self.device) - - if task == 'image_captioning': - with torch.no_grad(): - caption = model.generate(im, sample=False, num_beams=3, max_length=20, min_length=5) - return 'Caption: ' + caption[0] - - if task == 'visual_question_answering': - with torch.no_grad(): - answer = model(im, question, train=False, inference='generate') - return 'Answer: ' + answer[0] - - # image_text_matching - itm_output = model(im, caption, match_head='itm') - itm_score = torch.nn.functional.softmax(itm_output, dim=1)[:, 1] - itc_score = model(im, caption, match_head='itc') - return f'The image and text is matched with a probability of {itm_score.item():.4f}.\n' \ - f'The image feature and text feature has a cosine similarity of {itc_score.item():.4f}.' - - -def load_image(image, image_size, device): - raw_image = Image.open(str(image)).convert('RGB') - - w, h = raw_image.size - - transform = transforms.Compose([ - transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC), - transforms.ToTensor(), - transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) - ]) - image = transform(raw_image).unsqueeze(0).to(device) - return image diff --git a/spaces/arxify/RVC-beta-v2-0618/docs/faiss_tips_ja.md b/spaces/arxify/RVC-beta-v2-0618/docs/faiss_tips_ja.md deleted file mode 100644 index 89cf5ba565d6c2ccdab5c8ec9566663bfdc3fbfc..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/docs/faiss_tips_ja.md +++ /dev/null @@ -1,101 +0,0 @@ -faiss tuning TIPS -================== -# about faiss -faissはfacebook researchの開発する、密なベクトルに対する近傍探索をまとめたライブラリで、多くの近似近傍探索の手法を効率的に実装しています。 -近似近傍探索はある程度精度を犠牲にしながら高速に類似するベクトルを探します。 - -## faiss in RVC -RVCではHuBERTで変換した特徴量のEmbeddingに対し、学習データから生成されたEmbeddingと類似するものを検索し、混ぜることでより元の音声に近い変換を実現しています。ただ、この検索は愚直に行うと時間がかかるため、近似近傍探索を用いることで高速な変換を実現しています。 - -# 実装のoverview -モデルが配置されている '/logs/your-experiment/3_feature256'には各音声データからHuBERTで抽出された特徴量が配置されています。 -ここからnpyファイルをファイル名でソートした順番で読み込み、ベクトルを連結してbig_npyを作成しfaissを学習させます。(このベクトルのshapeは[N, 256]です。) - -本Tipsではまずこれらのパラメータの意味を解説します。 - -# 手法の解説 -## index factory -index factoryは複数の近似近傍探索の手法を繋げるパイプラインをstringで表記するfaiss独自の記法です。 -これにより、index factoryの文字列を変更するだけで様々な近似近傍探索の手法を試せます。 -RVCでは以下のように使われています。 - -```python -index = faiss.index_factory(256, "IVF%s,Flat" % n_ivf) -``` -index_factoryの引数のうち、1つ目はベクトルの次元数、2つ目はindex factoryの文字列で、3つ目には用いる距離を指定することができます。 - -より詳細な記法については -https://github.com/facebookresearch/faiss/wiki/The-index-factory - -## 距離指標 -embeddingの類似度として用いられる代表的な指標として以下の二つがあります。 - -- ユークリッド距離(METRIC_L2) -- 内積(METRIC_INNER_PRODUCT) - -ユークリッド距離では各次元において二乗の差をとり、全次元の差を足してから平方根をとります。これは日常的に用いる2次元、3次元での距離と同じです。 -内積はこのままでは類似度の指標として用いず、一般的にはL2ノルムで正規化してから内積をとるコサイン類似度を用います。 - -どちらがよいかは場合によりますが、word2vec等で得られるembeddingやArcFace等で学習した類似画像検索のモデルではコサイン類似度が用いられることが多いです。ベクトルXに対してl2正規化をnumpyで行う場合は、0 divisionを避けるために十分に小さな値をepsとして以下のコードで可能です。 - -```python -X_normed = X / np.maximum(eps, np.linalg.norm(X, ord=2, axis=-1, keepdims=True)) -``` - -また、index factoryには第3引数に渡す値を選ぶことで計算に用いる距離指標を変更できます。 - -```python -index = faiss.index_factory(dimention, text, faiss.METRIC_INNER_PRODUCT) -``` - -## IVF -IVF(Inverted file indexes)は全文検索における転置インデックスと似たようなアルゴリズムです。 -学習時には検索対象に対してkmeansでクラスタリングを行い、クラスタ中心を用いてボロノイ分割を行います。各データ点には一つずつクラスタが割り当てられるので、クラスタからデータ点を逆引きする辞書を作成します。 - -例えば以下のようにクラスタが割り当てられた場合 -|index|クラスタ| -|-----|-------| -|1|A| -|2|B| -|3|A| -|4|C| -|5|B| - -作成される転置インデックスは以下のようになります。 - -|クラスタ|index| -|-------|-----| -|A|1, 3| -|B|2, 5| -|C|4| - -検索時にはまずクラスタからn_probe個のクラスタを検索し、次にそれぞれのクラスタに属するデータ点について距離を計算します。 - -# 推奨されるパラメータ -indexの選び方については公式にガイドラインがあるので、それに準じて説明します。 -https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index - -1M以下のデータセットにおいては4bit-PQが2023年4月時点ではfaissで利用できる最も効率的な手法です。 -これをIVFと組み合わせ、4bit-PQで候補を絞り、最後に正確な指標で距離を再計算するには以下のindex factoryを用いることで記載できます。 - -```python -index = faiss.index_factory(256, "IVF1024,PQ128x4fs,RFlat") -``` - -## IVFの推奨パラメータ -IVFの数が多すぎる場合、たとえばデータ数の数だけIVFによる粗量子化を行うと、これは愚直な全探索と同じになり効率が悪いです。 -1M以下の場合ではIVFの値はデータ点の数Nに対して4*sqrt(N) ~ 16*sqrt(N)に推奨しています。 - -n_probeはn_probeの数に比例して計算時間が増えるので、精度と相談して適切に選んでください。個人的にはRVCにおいてそこまで精度は必要ないと思うのでn_probe = 1で良いと思います。 - -## FastScan -FastScanは直積量子化で大まかに距離を近似するのを、レジスタ内で行うことにより高速に行うようにした手法です。 -直積量子化は学習時にd次元ごと(通常はd=2)に独立してクラスタリングを行い、クラスタ同士の距離を事前計算してlookup tableを作成します。予測時はlookup tableを見ることで各次元の距離をO(1)で計算できます。 -そのため、PQの次に指定する数字は通常ベクトルの半分の次元を指定します。 - -FastScanに関するより詳細な説明は公式のドキュメントを参照してください。 -https://github.com/facebookresearch/faiss/wiki/Fast-accumulation-of-PQ-and-AQ-codes-(FastScan) - -## RFlat -RFlatはFastScanで計算した大まかな距離を、index factoryの第三引数で指定した正確な距離で再計算する指示です。 -k個の近傍を取得する際は、k*k_factor個の点について再計算が行われます。 diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Cipher/test_EAX.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Cipher/test_EAX.py deleted file mode 100644 index fe93d719aaf04ffddbad064f58b62ac19ded4687..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Cipher/test_EAX.py +++ /dev/null @@ -1,773 +0,0 @@ -# =================================================================== -# -# Copyright (c) 2015, Legrandin -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# =================================================================== - -import unittest -from binascii import unhexlify - -from Crypto.SelfTest.st_common import list_test_cases -from Crypto.SelfTest.loader import load_test_vectors_wycheproof -from Crypto.Util.py3compat import tobytes, bchr -from Crypto.Cipher import AES, DES3 -from Crypto.Hash import SHAKE128 - -from Crypto.Util.strxor import strxor - - -def get_tag_random(tag, length): - return SHAKE128.new(data=tobytes(tag)).read(length) - - -class EaxTests(unittest.TestCase): - - key_128 = get_tag_random("key_128", 16) - key_192 = get_tag_random("key_192", 16) - nonce_96 = get_tag_random("nonce_128", 12) - data_128 = get_tag_random("data_128", 16) - - def test_loopback_128(self): - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - pt = get_tag_random("plaintext", 16 * 100) - ct = cipher.encrypt(pt) - - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - pt2 = cipher.decrypt(ct) - self.assertEqual(pt, pt2) - - def test_loopback_64(self): - cipher = DES3.new(self.key_192, DES3.MODE_EAX, nonce=self.nonce_96) - pt = get_tag_random("plaintext", 8 * 100) - ct = cipher.encrypt(pt) - - cipher = DES3.new(self.key_192, DES3.MODE_EAX, nonce=self.nonce_96) - pt2 = cipher.decrypt(ct) - self.assertEqual(pt, pt2) - - def test_nonce(self): - # If not passed, the nonce is created randomly - cipher = AES.new(self.key_128, AES.MODE_EAX) - nonce1 = cipher.nonce - cipher = AES.new(self.key_128, AES.MODE_EAX) - nonce2 = cipher.nonce - self.assertEqual(len(nonce1), 16) - self.assertNotEqual(nonce1, nonce2) - - cipher = AES.new(self.key_128, AES.MODE_EAX, self.nonce_96) - ct = cipher.encrypt(self.data_128) - - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - self.assertEqual(ct, cipher.encrypt(self.data_128)) - - def test_nonce_must_be_bytes(self): - self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_EAX, - nonce=u'test12345678') - - def test_nonce_length(self): - # nonce can be of any length (but not empty) - self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_EAX, - nonce=b"") - - for x in range(1, 128): - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=bchr(1) * x) - cipher.encrypt(bchr(1)) - - def test_block_size_128(self): - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - self.assertEqual(cipher.block_size, AES.block_size) - - def test_block_size_64(self): - cipher = DES3.new(self.key_192, AES.MODE_EAX, nonce=self.nonce_96) - self.assertEqual(cipher.block_size, DES3.block_size) - - def test_nonce_attribute(self): - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - self.assertEqual(cipher.nonce, self.nonce_96) - - # By default, a 16 bytes long nonce is randomly generated - nonce1 = AES.new(self.key_128, AES.MODE_EAX).nonce - nonce2 = AES.new(self.key_128, AES.MODE_EAX).nonce - self.assertEqual(len(nonce1), 16) - self.assertNotEqual(nonce1, nonce2) - - def test_unknown_parameters(self): - self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_EAX, - self.nonce_96, 7) - self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_EAX, - nonce=self.nonce_96, unknown=7) - - # But some are only known by the base cipher - # (e.g. use_aesni consumed by the AES module) - AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96, - use_aesni=False) - - def test_null_encryption_decryption(self): - for func in "encrypt", "decrypt": - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - result = getattr(cipher, func)(b"") - self.assertEqual(result, b"") - - def test_either_encrypt_or_decrypt(self): - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - cipher.encrypt(b"") - self.assertRaises(TypeError, cipher.decrypt, b"") - - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - cipher.decrypt(b"") - self.assertRaises(TypeError, cipher.encrypt, b"") - - def test_data_must_be_bytes(self): - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - self.assertRaises(TypeError, cipher.encrypt, u'test1234567890-*') - - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - self.assertRaises(TypeError, cipher.decrypt, u'test1234567890-*') - - def test_mac_len(self): - # Invalid MAC length - self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_EAX, - nonce=self.nonce_96, mac_len=3) - self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_EAX, - nonce=self.nonce_96, mac_len=16+1) - - # Valid MAC length - for mac_len in range(5, 16 + 1): - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96, - mac_len=mac_len) - _, mac = cipher.encrypt_and_digest(self.data_128) - self.assertEqual(len(mac), mac_len) - - # Default MAC length - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - _, mac = cipher.encrypt_and_digest(self.data_128) - self.assertEqual(len(mac), 16) - - def test_invalid_mac(self): - from Crypto.Util.strxor import strxor_c - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - ct, mac = cipher.encrypt_and_digest(self.data_128) - - invalid_mac = strxor_c(mac, 0x01) - - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - self.assertRaises(ValueError, cipher.decrypt_and_verify, ct, - invalid_mac) - - def test_hex_mac(self): - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - mac_hex = cipher.hexdigest() - self.assertEqual(cipher.digest(), unhexlify(mac_hex)) - - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - cipher.hexverify(mac_hex) - - def test_message_chunks(self): - # Validate that both associated data and plaintext/ciphertext - # can be broken up in chunks of arbitrary length - - auth_data = get_tag_random("authenticated data", 127) - plaintext = get_tag_random("plaintext", 127) - - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - cipher.update(auth_data) - ciphertext, ref_mac = cipher.encrypt_and_digest(plaintext) - - def break_up(data, chunk_length): - return [data[i:i+chunk_length] for i in range(0, len(data), - chunk_length)] - - # Encryption - for chunk_length in 1, 2, 3, 7, 10, 13, 16, 40, 80, 128: - - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - - for chunk in break_up(auth_data, chunk_length): - cipher.update(chunk) - pt2 = b"" - for chunk in break_up(ciphertext, chunk_length): - pt2 += cipher.decrypt(chunk) - self.assertEqual(plaintext, pt2) - cipher.verify(ref_mac) - - # Decryption - for chunk_length in 1, 2, 3, 7, 10, 13, 16, 40, 80, 128: - - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - - for chunk in break_up(auth_data, chunk_length): - cipher.update(chunk) - ct2 = b"" - for chunk in break_up(plaintext, chunk_length): - ct2 += cipher.encrypt(chunk) - self.assertEqual(ciphertext, ct2) - self.assertEqual(cipher.digest(), ref_mac) - - def test_bytearray(self): - - # Encrypt - key_ba = bytearray(self.key_128) - nonce_ba = bytearray(self.nonce_96) - header_ba = bytearray(self.data_128) - data_ba = bytearray(self.data_128) - - cipher1 = AES.new(self.key_128, - AES.MODE_EAX, - nonce=self.nonce_96) - cipher1.update(self.data_128) - ct = cipher1.encrypt(self.data_128) - tag = cipher1.digest() - - cipher2 = AES.new(key_ba, - AES.MODE_EAX, - nonce=nonce_ba) - key_ba[:3] = b'\xFF\xFF\xFF' - nonce_ba[:3] = b'\xFF\xFF\xFF' - cipher2.update(header_ba) - header_ba[:3] = b'\xFF\xFF\xFF' - ct_test = cipher2.encrypt(data_ba) - data_ba[:3] = b'\x99\x99\x99' - tag_test = cipher2.digest() - - self.assertEqual(ct, ct_test) - self.assertEqual(tag, tag_test) - self.assertEqual(cipher1.nonce, cipher2.nonce) - - # Decrypt - key_ba = bytearray(self.key_128) - nonce_ba = bytearray(self.nonce_96) - header_ba = bytearray(self.data_128) - ct_ba = bytearray(ct) - tag_ba = bytearray(tag) - del data_ba - - cipher3 = AES.new(key_ba, - AES.MODE_EAX, - nonce=nonce_ba) - key_ba[:3] = b'\xFF\xFF\xFF' - nonce_ba[:3] = b'\xFF\xFF\xFF' - cipher3.update(header_ba) - header_ba[:3] = b'\xFF\xFF\xFF' - pt_test = cipher3.decrypt(ct_ba) - ct_ba[:3] = b'\xFF\xFF\xFF' - cipher3.verify(tag_ba) - - self.assertEqual(pt_test, self.data_128) - - def test_memoryview(self): - - # Encrypt - key_mv = memoryview(bytearray(self.key_128)) - nonce_mv = memoryview(bytearray(self.nonce_96)) - header_mv = memoryview(bytearray(self.data_128)) - data_mv = memoryview(bytearray(self.data_128)) - - cipher1 = AES.new(self.key_128, - AES.MODE_EAX, - nonce=self.nonce_96) - cipher1.update(self.data_128) - ct = cipher1.encrypt(self.data_128) - tag = cipher1.digest() - - cipher2 = AES.new(key_mv, - AES.MODE_EAX, - nonce=nonce_mv) - key_mv[:3] = b'\xFF\xFF\xFF' - nonce_mv[:3] = b'\xFF\xFF\xFF' - cipher2.update(header_mv) - header_mv[:3] = b'\xFF\xFF\xFF' - ct_test = cipher2.encrypt(data_mv) - data_mv[:3] = b'\x99\x99\x99' - tag_test = cipher2.digest() - - self.assertEqual(ct, ct_test) - self.assertEqual(tag, tag_test) - self.assertEqual(cipher1.nonce, cipher2.nonce) - - # Decrypt - key_mv = memoryview(bytearray(self.key_128)) - nonce_mv = memoryview(bytearray(self.nonce_96)) - header_mv = memoryview(bytearray(self.data_128)) - ct_mv = memoryview(bytearray(ct)) - tag_mv = memoryview(bytearray(tag)) - del data_mv - - cipher3 = AES.new(key_mv, - AES.MODE_EAX, - nonce=nonce_mv) - key_mv[:3] = b'\xFF\xFF\xFF' - nonce_mv[:3] = b'\xFF\xFF\xFF' - cipher3.update(header_mv) - header_mv[:3] = b'\xFF\xFF\xFF' - pt_test = cipher3.decrypt(ct_mv) - ct_mv[:3] = b'\x99\x99\x99' - cipher3.verify(tag_mv) - - self.assertEqual(pt_test, self.data_128) - - def test_output_param(self): - - pt = b'5' * 128 - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - ct = cipher.encrypt(pt) - tag = cipher.digest() - - output = bytearray(128) - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - res = cipher.encrypt(pt, output=output) - self.assertEqual(ct, output) - self.assertEqual(res, None) - - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - res = cipher.decrypt(ct, output=output) - self.assertEqual(pt, output) - self.assertEqual(res, None) - - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - res, tag_out = cipher.encrypt_and_digest(pt, output=output) - self.assertEqual(ct, output) - self.assertEqual(res, None) - self.assertEqual(tag, tag_out) - - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - res = cipher.decrypt_and_verify(ct, tag, output=output) - self.assertEqual(pt, output) - self.assertEqual(res, None) - - def test_output_param_memoryview(self): - - pt = b'5' * 128 - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - ct = cipher.encrypt(pt) - - output = memoryview(bytearray(128)) - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - cipher.encrypt(pt, output=output) - self.assertEqual(ct, output) - - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - cipher.decrypt(ct, output=output) - self.assertEqual(pt, output) - - def test_output_param_neg(self): - LEN_PT = 16 - - pt = b'5' * LEN_PT - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - ct = cipher.encrypt(pt) - - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - self.assertRaises(TypeError, cipher.encrypt, pt, output=b'0' * LEN_PT) - - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - self.assertRaises(TypeError, cipher.decrypt, ct, output=b'0' * LEN_PT) - - shorter_output = bytearray(LEN_PT - 1) - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - self.assertRaises(ValueError, cipher.encrypt, pt, output=shorter_output) - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - self.assertRaises(ValueError, cipher.decrypt, ct, output=shorter_output) - - -class EaxFSMTests(unittest.TestCase): - - key_128 = get_tag_random("key_128", 16) - nonce_96 = get_tag_random("nonce_128", 12) - data_128 = get_tag_random("data_128", 16) - - def test_valid_init_encrypt_decrypt_digest_verify(self): - # No authenticated data, fixed plaintext - # Verify path INIT->ENCRYPT->DIGEST - cipher = AES.new(self.key_128, AES.MODE_EAX, - nonce=self.nonce_96) - ct = cipher.encrypt(self.data_128) - mac = cipher.digest() - - # Verify path INIT->DECRYPT->VERIFY - cipher = AES.new(self.key_128, AES.MODE_EAX, - nonce=self.nonce_96) - cipher.decrypt(ct) - cipher.verify(mac) - - def test_valid_init_update_digest_verify(self): - # No plaintext, fixed authenticated data - # Verify path INIT->UPDATE->DIGEST - cipher = AES.new(self.key_128, AES.MODE_EAX, - nonce=self.nonce_96) - cipher.update(self.data_128) - mac = cipher.digest() - - # Verify path INIT->UPDATE->VERIFY - cipher = AES.new(self.key_128, AES.MODE_EAX, - nonce=self.nonce_96) - cipher.update(self.data_128) - cipher.verify(mac) - - def test_valid_full_path(self): - # Fixed authenticated data, fixed plaintext - # Verify path INIT->UPDATE->ENCRYPT->DIGEST - cipher = AES.new(self.key_128, AES.MODE_EAX, - nonce=self.nonce_96) - cipher.update(self.data_128) - ct = cipher.encrypt(self.data_128) - mac = cipher.digest() - - # Verify path INIT->UPDATE->DECRYPT->VERIFY - cipher = AES.new(self.key_128, AES.MODE_EAX, - nonce=self.nonce_96) - cipher.update(self.data_128) - cipher.decrypt(ct) - cipher.verify(mac) - - def test_valid_init_digest(self): - # Verify path INIT->DIGEST - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - cipher.digest() - - def test_valid_init_verify(self): - # Verify path INIT->VERIFY - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - mac = cipher.digest() - - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - cipher.verify(mac) - - def test_valid_multiple_encrypt_or_decrypt(self): - for method_name in "encrypt", "decrypt": - for auth_data in (None, b"333", self.data_128, - self.data_128 + b"3"): - if auth_data is None: - assoc_len = None - else: - assoc_len = len(auth_data) - cipher = AES.new(self.key_128, AES.MODE_EAX, - nonce=self.nonce_96) - if auth_data is not None: - cipher.update(auth_data) - method = getattr(cipher, method_name) - method(self.data_128) - method(self.data_128) - method(self.data_128) - method(self.data_128) - - def test_valid_multiple_digest_or_verify(self): - # Multiple calls to digest - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - cipher.update(self.data_128) - first_mac = cipher.digest() - for x in range(4): - self.assertEqual(first_mac, cipher.digest()) - - # Multiple calls to verify - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - cipher.update(self.data_128) - for x in range(5): - cipher.verify(first_mac) - - def test_valid_encrypt_and_digest_decrypt_and_verify(self): - # encrypt_and_digest - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - cipher.update(self.data_128) - ct, mac = cipher.encrypt_and_digest(self.data_128) - - # decrypt_and_verify - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - cipher.update(self.data_128) - pt = cipher.decrypt_and_verify(ct, mac) - self.assertEqual(self.data_128, pt) - - def test_invalid_mixing_encrypt_decrypt(self): - # Once per method, with or without assoc. data - for method1_name, method2_name in (("encrypt", "decrypt"), - ("decrypt", "encrypt")): - for assoc_data_present in (True, False): - cipher = AES.new(self.key_128, AES.MODE_EAX, - nonce=self.nonce_96) - if assoc_data_present: - cipher.update(self.data_128) - getattr(cipher, method1_name)(self.data_128) - self.assertRaises(TypeError, getattr(cipher, method2_name), - self.data_128) - - def test_invalid_encrypt_or_update_after_digest(self): - for method_name in "encrypt", "update": - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - cipher.encrypt(self.data_128) - cipher.digest() - self.assertRaises(TypeError, getattr(cipher, method_name), - self.data_128) - - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - cipher.encrypt_and_digest(self.data_128) - - def test_invalid_decrypt_or_update_after_verify(self): - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - ct = cipher.encrypt(self.data_128) - mac = cipher.digest() - - for method_name in "decrypt", "update": - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - cipher.decrypt(ct) - cipher.verify(mac) - self.assertRaises(TypeError, getattr(cipher, method_name), - self.data_128) - - cipher = AES.new(self.key_128, AES.MODE_EAX, nonce=self.nonce_96) - cipher.decrypt_and_verify(ct, mac) - self.assertRaises(TypeError, getattr(cipher, method_name), - self.data_128) - - -class TestVectorsPaper(unittest.TestCase): - """Class exercising the EAX test vectors found in - http://www.cs.ucdavis.edu/~rogaway/papers/eax.pdf""" - - test_vectors_hex = [ - ( '6bfb914fd07eae6b', - '', - '', - 'e037830e8389f27b025a2d6527e79d01', - '233952dee4d5ed5f9b9c6d6ff80ff478', - '62EC67F9C3A4A407FCB2A8C49031A8B3' - ), - ( - 'fa3bfd4806eb53fa', - 'f7fb', - '19dd', - '5c4c9331049d0bdab0277408f67967e5', - '91945d3f4dcbee0bf45ef52255f095a4', - 'BECAF043B0A23D843194BA972C66DEBD' - ), - ( '234a3463c1264ac6', - '1a47cb4933', - 'd851d5bae0', - '3a59f238a23e39199dc9266626c40f80', - '01f74ad64077f2e704c0f60ada3dd523', - '70C3DB4F0D26368400A10ED05D2BFF5E' - ), - ( - '33cce2eabff5a79d', - '481c9e39b1', - '632a9d131a', - 'd4c168a4225d8e1ff755939974a7bede', - 'd07cf6cbb7f313bdde66b727afd3c5e8', - '8408DFFF3C1A2B1292DC199E46B7D617' - ), - ( - 'aeb96eaebe2970e9', - '40d0c07da5e4', - '071dfe16c675', - 'cb0677e536f73afe6a14b74ee49844dd', - '35b6d0580005bbc12b0587124557d2c2', - 'FDB6B06676EEDC5C61D74276E1F8E816' - ), - ( - 'd4482d1ca78dce0f', - '4de3b35c3fc039245bd1fb7d', - '835bb4f15d743e350e728414', - 'abb8644fd6ccb86947c5e10590210a4f', - 'bd8e6e11475e60b268784c38c62feb22', - '6EAC5C93072D8E8513F750935E46DA1B' - ), - ( - '65d2017990d62528', - '8b0a79306c9ce7ed99dae4f87f8dd61636', - '02083e3979da014812f59f11d52630da30', - '137327d10649b0aa6e1c181db617d7f2', - '7c77d6e813bed5ac98baa417477a2e7d', - '1A8C98DCD73D38393B2BF1569DEEFC19' - ), - ( - '54b9f04e6a09189a', - '1bda122bce8a8dbaf1877d962b8592dd2d56', - '2ec47b2c4954a489afc7ba4897edcdae8cc3', - '3b60450599bd02c96382902aef7f832a', - '5fff20cafab119ca2fc73549e20f5b0d', - 'DDE59B97D722156D4D9AFF2BC7559826' - ), - ( - '899a175897561d7e', - '6cf36720872b8513f6eab1a8a44438d5ef11', - '0de18fd0fdd91e7af19f1d8ee8733938b1e8', - 'e7f6d2231618102fdb7fe55ff1991700', - 'a4a4782bcffd3ec5e7ef6d8c34a56123', - 'B781FCF2F75FA5A8DE97A9CA48E522EC' - ), - ( - '126735fcc320d25a', - 'ca40d7446e545ffaed3bd12a740a659ffbbb3ceab7', - 'cb8920f87a6c75cff39627b56e3ed197c552d295a7', - 'cfc46afc253b4652b1af3795b124ab6e', - '8395fcf1e95bebd697bd010bc766aac3', - '22E7ADD93CFC6393C57EC0B3C17D6B44' - ), - ] - - test_vectors = [[unhexlify(x) for x in tv] for tv in test_vectors_hex] - - def runTest(self): - for assoc_data, pt, ct, mac, key, nonce in self.test_vectors: - # Encrypt - cipher = AES.new(key, AES.MODE_EAX, nonce, mac_len=len(mac)) - cipher.update(assoc_data) - ct2, mac2 = cipher.encrypt_and_digest(pt) - self.assertEqual(ct, ct2) - self.assertEqual(mac, mac2) - - # Decrypt - cipher = AES.new(key, AES.MODE_EAX, nonce, mac_len=len(mac)) - cipher.update(assoc_data) - pt2 = cipher.decrypt_and_verify(ct, mac) - self.assertEqual(pt, pt2) - - -class TestVectorsWycheproof(unittest.TestCase): - - def __init__(self, wycheproof_warnings): - unittest.TestCase.__init__(self) - self._wycheproof_warnings = wycheproof_warnings - self._id = "None" - - def setUp(self): - - def filter_tag(group): - return group['tagSize'] // 8 - - self.tv = load_test_vectors_wycheproof(("Cipher", "wycheproof"), - "aes_eax_test.json", - "Wycheproof EAX", - group_tag={'tag_size': filter_tag}) - - def shortDescription(self): - return self._id - - def warn(self, tv): - if tv.warning and self._wycheproof_warnings: - import warnings - warnings.warn("Wycheproof warning: %s (%s)" % (self._id, tv.comment)) - - def test_encrypt(self, tv): - self._id = "Wycheproof Encrypt EAX Test #" + str(tv.id) - - try: - cipher = AES.new(tv.key, AES.MODE_EAX, tv.iv, mac_len=tv.tag_size) - except ValueError as e: - assert len(tv.iv) == 0 and "Nonce cannot be empty" in str(e) - return - - cipher.update(tv.aad) - ct, tag = cipher.encrypt_and_digest(tv.msg) - if tv.valid: - self.assertEqual(ct, tv.ct) - self.assertEqual(tag, tv.tag) - self.warn(tv) - - def test_decrypt(self, tv): - self._id = "Wycheproof Decrypt EAX Test #" + str(tv.id) - - try: - cipher = AES.new(tv.key, AES.MODE_EAX, tv.iv, mac_len=tv.tag_size) - except ValueError as e: - assert len(tv.iv) == 0 and "Nonce cannot be empty" in str(e) - return - - cipher.update(tv.aad) - try: - pt = cipher.decrypt_and_verify(tv.ct, tv.tag) - except ValueError: - assert not tv.valid - else: - assert tv.valid - self.assertEqual(pt, tv.msg) - self.warn(tv) - - def test_corrupt_decrypt(self, tv): - self._id = "Wycheproof Corrupt Decrypt EAX Test #" + str(tv.id) - if len(tv.iv) == 0 or len(tv.ct) < 1: - return - cipher = AES.new(tv.key, AES.MODE_EAX, tv.iv, mac_len=tv.tag_size) - cipher.update(tv.aad) - ct_corrupt = strxor(tv.ct, b"\x00" * (len(tv.ct) - 1) + b"\x01") - self.assertRaises(ValueError, cipher.decrypt_and_verify, ct_corrupt, tv.tag) - - def runTest(self): - - for tv in self.tv: - self.test_encrypt(tv) - self.test_decrypt(tv) - self.test_corrupt_decrypt(tv) - - -class TestOtherCiphers(unittest.TestCase): - - @classmethod - def create_test(cls, name, factory, key_size): - - def test_template(self, factory=factory, key_size=key_size): - cipher = factory.new(get_tag_random("cipher", key_size), - factory.MODE_EAX, - nonce=b"nonce") - ct, mac = cipher.encrypt_and_digest(b"plaintext") - - cipher = factory.new(get_tag_random("cipher", key_size), - factory.MODE_EAX, - nonce=b"nonce") - pt2 = cipher.decrypt_and_verify(ct, mac) - - self.assertEqual(b"plaintext", pt2) - - setattr(cls, "test_" + name, test_template) - - -from Crypto.Cipher import DES, DES3, ARC2, CAST, Blowfish - -TestOtherCiphers.create_test("DES_" + str(DES.key_size), DES, DES.key_size) -for ks in DES3.key_size: - TestOtherCiphers.create_test("DES3_" + str(ks), DES3, ks) -for ks in ARC2.key_size: - TestOtherCiphers.create_test("ARC2_" + str(ks), ARC2, ks) -for ks in CAST.key_size: - TestOtherCiphers.create_test("CAST_" + str(ks), CAST, ks) -for ks in Blowfish.key_size: - TestOtherCiphers.create_test("Blowfish_" + str(ks), Blowfish, ks) - - -def get_tests(config={}): - wycheproof_warnings = config.get('wycheproof_warnings') - - tests = [] - tests += list_test_cases(EaxTests) - tests += list_test_cases(EaxFSMTests) - tests += [ TestVectorsPaper() ] - tests += [ TestVectorsWycheproof(wycheproof_warnings) ] - tests += list_test_cases(TestOtherCiphers) - return tests - - -if __name__ == '__main__': - suite = lambda: unittest.TestSuite(get_tests()) - unittest.main(defaultTest='suite') diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/web_runner.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/web_runner.py deleted file mode 100644 index 9282bb93d37a4a3a0ada346ec7534de0ea0e893d..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/web_runner.py +++ /dev/null @@ -1,381 +0,0 @@ -import asyncio -import signal -import socket -from abc import ABC, abstractmethod -from typing import Any, List, Optional, Set - -from yarl import URL - -from .web_app import Application -from .web_server import Server - -try: - from ssl import SSLContext -except ImportError: - SSLContext = object # type: ignore[misc,assignment] - - -__all__ = ( - "BaseSite", - "TCPSite", - "UnixSite", - "NamedPipeSite", - "SockSite", - "BaseRunner", - "AppRunner", - "ServerRunner", - "GracefulExit", -) - - -class GracefulExit(SystemExit): - code = 1 - - -def _raise_graceful_exit() -> None: - raise GracefulExit() - - -class BaseSite(ABC): - __slots__ = ("_runner", "_shutdown_timeout", "_ssl_context", "_backlog", "_server") - - def __init__( - self, - runner: "BaseRunner", - *, - shutdown_timeout: float = 60.0, - ssl_context: Optional[SSLContext] = None, - backlog: int = 128, - ) -> None: - if runner.server is None: - raise RuntimeError("Call runner.setup() before making a site") - self._runner = runner - self._shutdown_timeout = shutdown_timeout - self._ssl_context = ssl_context - self._backlog = backlog - self._server: Optional[asyncio.AbstractServer] = None - - @property - @abstractmethod - def name(self) -> str: - pass # pragma: no cover - - @abstractmethod - async def start(self) -> None: - self._runner._reg_site(self) - - async def stop(self) -> None: - self._runner._check_site(self) - if self._server is None: - self._runner._unreg_site(self) - return # not started yet - self._server.close() - # named pipes do not have wait_closed property - if hasattr(self._server, "wait_closed"): - await self._server.wait_closed() - await self._runner.shutdown() - assert self._runner.server - await self._runner.server.shutdown(self._shutdown_timeout) - self._runner._unreg_site(self) - - -class TCPSite(BaseSite): - __slots__ = ("_host", "_port", "_reuse_address", "_reuse_port") - - def __init__( - self, - runner: "BaseRunner", - host: Optional[str] = None, - port: Optional[int] = None, - *, - shutdown_timeout: float = 60.0, - ssl_context: Optional[SSLContext] = None, - backlog: int = 128, - reuse_address: Optional[bool] = None, - reuse_port: Optional[bool] = None, - ) -> None: - super().__init__( - runner, - shutdown_timeout=shutdown_timeout, - ssl_context=ssl_context, - backlog=backlog, - ) - self._host = host - if port is None: - port = 8443 if self._ssl_context else 8080 - self._port = port - self._reuse_address = reuse_address - self._reuse_port = reuse_port - - @property - def name(self) -> str: - scheme = "https" if self._ssl_context else "http" - host = "0.0.0.0" if self._host is None else self._host - return str(URL.build(scheme=scheme, host=host, port=self._port)) - - async def start(self) -> None: - await super().start() - loop = asyncio.get_event_loop() - server = self._runner.server - assert server is not None - self._server = await loop.create_server( - server, - self._host, - self._port, - ssl=self._ssl_context, - backlog=self._backlog, - reuse_address=self._reuse_address, - reuse_port=self._reuse_port, - ) - - -class UnixSite(BaseSite): - __slots__ = ("_path",) - - def __init__( - self, - runner: "BaseRunner", - path: str, - *, - shutdown_timeout: float = 60.0, - ssl_context: Optional[SSLContext] = None, - backlog: int = 128, - ) -> None: - super().__init__( - runner, - shutdown_timeout=shutdown_timeout, - ssl_context=ssl_context, - backlog=backlog, - ) - self._path = path - - @property - def name(self) -> str: - scheme = "https" if self._ssl_context else "http" - return f"{scheme}://unix:{self._path}:" - - async def start(self) -> None: - await super().start() - loop = asyncio.get_event_loop() - server = self._runner.server - assert server is not None - self._server = await loop.create_unix_server( - server, self._path, ssl=self._ssl_context, backlog=self._backlog - ) - - -class NamedPipeSite(BaseSite): - __slots__ = ("_path",) - - def __init__( - self, runner: "BaseRunner", path: str, *, shutdown_timeout: float = 60.0 - ) -> None: - loop = asyncio.get_event_loop() - if not isinstance( - loop, asyncio.ProactorEventLoop # type: ignore[attr-defined] - ): - raise RuntimeError( - "Named Pipes only available in proactor" "loop under windows" - ) - super().__init__(runner, shutdown_timeout=shutdown_timeout) - self._path = path - - @property - def name(self) -> str: - return self._path - - async def start(self) -> None: - await super().start() - loop = asyncio.get_event_loop() - server = self._runner.server - assert server is not None - _server = await loop.start_serving_pipe( # type: ignore[attr-defined] - server, self._path - ) - self._server = _server[0] - - -class SockSite(BaseSite): - __slots__ = ("_sock", "_name") - - def __init__( - self, - runner: "BaseRunner", - sock: socket.socket, - *, - shutdown_timeout: float = 60.0, - ssl_context: Optional[SSLContext] = None, - backlog: int = 128, - ) -> None: - super().__init__( - runner, - shutdown_timeout=shutdown_timeout, - ssl_context=ssl_context, - backlog=backlog, - ) - self._sock = sock - scheme = "https" if self._ssl_context else "http" - if hasattr(socket, "AF_UNIX") and sock.family == socket.AF_UNIX: - name = f"{scheme}://unix:{sock.getsockname()}:" - else: - host, port = sock.getsockname()[:2] - name = str(URL.build(scheme=scheme, host=host, port=port)) - self._name = name - - @property - def name(self) -> str: - return self._name - - async def start(self) -> None: - await super().start() - loop = asyncio.get_event_loop() - server = self._runner.server - assert server is not None - self._server = await loop.create_server( - server, sock=self._sock, ssl=self._ssl_context, backlog=self._backlog - ) - - -class BaseRunner(ABC): - __slots__ = ("_handle_signals", "_kwargs", "_server", "_sites") - - def __init__(self, *, handle_signals: bool = False, **kwargs: Any) -> None: - self._handle_signals = handle_signals - self._kwargs = kwargs - self._server: Optional[Server] = None - self._sites: List[BaseSite] = [] - - @property - def server(self) -> Optional[Server]: - return self._server - - @property - def addresses(self) -> List[Any]: - ret: List[Any] = [] - for site in self._sites: - server = site._server - if server is not None: - sockets = server.sockets - if sockets is not None: - for sock in sockets: - ret.append(sock.getsockname()) - return ret - - @property - def sites(self) -> Set[BaseSite]: - return set(self._sites) - - async def setup(self) -> None: - loop = asyncio.get_event_loop() - - if self._handle_signals: - try: - loop.add_signal_handler(signal.SIGINT, _raise_graceful_exit) - loop.add_signal_handler(signal.SIGTERM, _raise_graceful_exit) - except NotImplementedError: # pragma: no cover - # add_signal_handler is not implemented on Windows - pass - - self._server = await self._make_server() - - @abstractmethod - async def shutdown(self) -> None: - pass # pragma: no cover - - async def cleanup(self) -> None: - loop = asyncio.get_event_loop() - - # The loop over sites is intentional, an exception on gather() - # leaves self._sites in unpredictable state. - # The loop guaranties that a site is either deleted on success or - # still present on failure - for site in list(self._sites): - await site.stop() - await self._cleanup_server() - self._server = None - if self._handle_signals: - try: - loop.remove_signal_handler(signal.SIGINT) - loop.remove_signal_handler(signal.SIGTERM) - except NotImplementedError: # pragma: no cover - # remove_signal_handler is not implemented on Windows - pass - - @abstractmethod - async def _make_server(self) -> Server: - pass # pragma: no cover - - @abstractmethod - async def _cleanup_server(self) -> None: - pass # pragma: no cover - - def _reg_site(self, site: BaseSite) -> None: - if site in self._sites: - raise RuntimeError(f"Site {site} is already registered in runner {self}") - self._sites.append(site) - - def _check_site(self, site: BaseSite) -> None: - if site not in self._sites: - raise RuntimeError(f"Site {site} is not registered in runner {self}") - - def _unreg_site(self, site: BaseSite) -> None: - if site not in self._sites: - raise RuntimeError(f"Site {site} is not registered in runner {self}") - self._sites.remove(site) - - -class ServerRunner(BaseRunner): - """Low-level web server runner""" - - __slots__ = ("_web_server",) - - def __init__( - self, web_server: Server, *, handle_signals: bool = False, **kwargs: Any - ) -> None: - super().__init__(handle_signals=handle_signals, **kwargs) - self._web_server = web_server - - async def shutdown(self) -> None: - pass - - async def _make_server(self) -> Server: - return self._web_server - - async def _cleanup_server(self) -> None: - pass - - -class AppRunner(BaseRunner): - """Web Application runner""" - - __slots__ = ("_app",) - - def __init__( - self, app: Application, *, handle_signals: bool = False, **kwargs: Any - ) -> None: - super().__init__(handle_signals=handle_signals, **kwargs) - if not isinstance(app, Application): - raise TypeError( - "The first argument should be web.Application " - "instance, got {!r}".format(app) - ) - self._app = app - - @property - def app(self) -> Application: - return self._app - - async def shutdown(self) -> None: - await self._app.shutdown() - - async def _make_server(self) -> Server: - loop = asyncio.get_event_loop() - self._app._set_loop(loop) - self._app.on_startup.freeze() - await self._app.startup() - self._app.freeze() - - return self._app._make_handler(loop=loop, **self._kwargs) - - async def _cleanup_server(self) -> None: - await self._app.cleanup() diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/bar_chart_with_highlighted_bar.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/bar_chart_with_highlighted_bar.py deleted file mode 100644 index d277bf9f3eccae3b8cb5b1b734a231be760f05ac..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/bar_chart_with_highlighted_bar.py +++ /dev/null @@ -1,21 +0,0 @@ -""" -Bar Chart with Highlighted Bar ------------------------------- -This example shows a basic bar chart with a single bar highlighted. -""" -# category: bar charts -import altair as alt -from vega_datasets import data - -source = data.wheat() - -alt.Chart(source).mark_bar().encode( - x='year:O', - y="wheat:Q", - # The highlight will be set on the result of a conditional statement - color=alt.condition( - alt.datum.year == 1810, # If the year is 1810 this test returns True, - alt.value('orange'), # which sets the bar orange. - alt.value('steelblue') # And if it's not true it sets the bar steelblue. - ) -).properties(width=600) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/cffi/vengine_gen.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/cffi/vengine_gen.py deleted file mode 100644 index 26421526f62a07e04419cd57f1f19a64ecd36452..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/cffi/vengine_gen.py +++ /dev/null @@ -1,675 +0,0 @@ -# -# DEPRECATED: implementation for ffi.verify() -# -import sys, os -import types - -from . import model -from .error import VerificationError - - -class VGenericEngine(object): - _class_key = 'g' - _gen_python_module = False - - def __init__(self, verifier): - self.verifier = verifier - self.ffi = verifier.ffi - self.export_symbols = [] - self._struct_pending_verification = {} - - def patch_extension_kwds(self, kwds): - # add 'export_symbols' to the dictionary. Note that we add the - # list before filling it. When we fill it, it will thus also show - # up in kwds['export_symbols']. - kwds.setdefault('export_symbols', self.export_symbols) - - def find_module(self, module_name, path, so_suffixes): - for so_suffix in so_suffixes: - basename = module_name + so_suffix - if path is None: - path = sys.path - for dirname in path: - filename = os.path.join(dirname, basename) - if os.path.isfile(filename): - return filename - - def collect_types(self): - pass # not needed in the generic engine - - def _prnt(self, what=''): - self._f.write(what + '\n') - - def write_source_to_f(self): - prnt = self._prnt - # first paste some standard set of lines that are mostly '#include' - prnt(cffimod_header) - # then paste the C source given by the user, verbatim. - prnt(self.verifier.preamble) - # - # call generate_gen_xxx_decl(), for every xxx found from - # ffi._parser._declarations. This generates all the functions. - self._generate('decl') - # - # on Windows, distutils insists on putting init_cffi_xyz in - # 'export_symbols', so instead of fighting it, just give up and - # give it one - if sys.platform == 'win32': - if sys.version_info >= (3,): - prefix = 'PyInit_' - else: - prefix = 'init' - modname = self.verifier.get_module_name() - prnt("void %s%s(void) { }\n" % (prefix, modname)) - - def load_library(self, flags=0): - # import it with the CFFI backend - backend = self.ffi._backend - # needs to make a path that contains '/', on Posix - filename = os.path.join(os.curdir, self.verifier.modulefilename) - module = backend.load_library(filename, flags) - # - # call loading_gen_struct() to get the struct layout inferred by - # the C compiler - self._load(module, 'loading') - - # build the FFILibrary class and instance, this is a module subclass - # because modules are expected to have usually-constant-attributes and - # in PyPy this means the JIT is able to treat attributes as constant, - # which we want. - class FFILibrary(types.ModuleType): - _cffi_generic_module = module - _cffi_ffi = self.ffi - _cffi_dir = [] - def __dir__(self): - return FFILibrary._cffi_dir - library = FFILibrary("") - # - # finally, call the loaded_gen_xxx() functions. This will set - # up the 'library' object. - self._load(module, 'loaded', library=library) - return library - - def _get_declarations(self): - lst = [(key, tp) for (key, (tp, qual)) in - self.ffi._parser._declarations.items()] - lst.sort() - return lst - - def _generate(self, step_name): - for name, tp in self._get_declarations(): - kind, realname = name.split(' ', 1) - try: - method = getattr(self, '_generate_gen_%s_%s' % (kind, - step_name)) - except AttributeError: - raise VerificationError( - "not implemented in verify(): %r" % name) - try: - method(tp, realname) - except Exception as e: - model.attach_exception_info(e, name) - raise - - def _load(self, module, step_name, **kwds): - for name, tp in self._get_declarations(): - kind, realname = name.split(' ', 1) - method = getattr(self, '_%s_gen_%s' % (step_name, kind)) - try: - method(tp, realname, module, **kwds) - except Exception as e: - model.attach_exception_info(e, name) - raise - - def _generate_nothing(self, tp, name): - pass - - def _loaded_noop(self, tp, name, module, **kwds): - pass - - # ---------- - # typedefs: generates no code so far - - _generate_gen_typedef_decl = _generate_nothing - _loading_gen_typedef = _loaded_noop - _loaded_gen_typedef = _loaded_noop - - # ---------- - # function declarations - - def _generate_gen_function_decl(self, tp, name): - assert isinstance(tp, model.FunctionPtrType) - if tp.ellipsis: - # cannot support vararg functions better than this: check for its - # exact type (including the fixed arguments), and build it as a - # constant function pointer (no _cffi_f_%s wrapper) - self._generate_gen_const(False, name, tp) - return - prnt = self._prnt - numargs = len(tp.args) - argnames = [] - for i, type in enumerate(tp.args): - indirection = '' - if isinstance(type, model.StructOrUnion): - indirection = '*' - argnames.append('%sx%d' % (indirection, i)) - context = 'argument of %s' % name - arglist = [type.get_c_name(' %s' % arg, context) - for type, arg in zip(tp.args, argnames)] - tpresult = tp.result - if isinstance(tpresult, model.StructOrUnion): - arglist.insert(0, tpresult.get_c_name(' *r', context)) - tpresult = model.void_type - arglist = ', '.join(arglist) or 'void' - wrappername = '_cffi_f_%s' % name - self.export_symbols.append(wrappername) - if tp.abi: - abi = tp.abi + ' ' - else: - abi = '' - funcdecl = ' %s%s(%s)' % (abi, wrappername, arglist) - context = 'result of %s' % name - prnt(tpresult.get_c_name(funcdecl, context)) - prnt('{') - # - if isinstance(tp.result, model.StructOrUnion): - result_code = '*r = ' - elif not isinstance(tp.result, model.VoidType): - result_code = 'return ' - else: - result_code = '' - prnt(' %s%s(%s);' % (result_code, name, ', '.join(argnames))) - prnt('}') - prnt() - - _loading_gen_function = _loaded_noop - - def _loaded_gen_function(self, tp, name, module, library): - assert isinstance(tp, model.FunctionPtrType) - if tp.ellipsis: - newfunction = self._load_constant(False, tp, name, module) - else: - indirections = [] - base_tp = tp - if (any(isinstance(typ, model.StructOrUnion) for typ in tp.args) - or isinstance(tp.result, model.StructOrUnion)): - indirect_args = [] - for i, typ in enumerate(tp.args): - if isinstance(typ, model.StructOrUnion): - typ = model.PointerType(typ) - indirections.append((i, typ)) - indirect_args.append(typ) - indirect_result = tp.result - if isinstance(indirect_result, model.StructOrUnion): - if indirect_result.fldtypes is None: - raise TypeError("'%s' is used as result type, " - "but is opaque" % ( - indirect_result._get_c_name(),)) - indirect_result = model.PointerType(indirect_result) - indirect_args.insert(0, indirect_result) - indirections.insert(0, ("result", indirect_result)) - indirect_result = model.void_type - tp = model.FunctionPtrType(tuple(indirect_args), - indirect_result, tp.ellipsis) - BFunc = self.ffi._get_cached_btype(tp) - wrappername = '_cffi_f_%s' % name - newfunction = module.load_function(BFunc, wrappername) - for i, typ in indirections: - newfunction = self._make_struct_wrapper(newfunction, i, typ, - base_tp) - setattr(library, name, newfunction) - type(library)._cffi_dir.append(name) - - def _make_struct_wrapper(self, oldfunc, i, tp, base_tp): - backend = self.ffi._backend - BType = self.ffi._get_cached_btype(tp) - if i == "result": - ffi = self.ffi - def newfunc(*args): - res = ffi.new(BType) - oldfunc(res, *args) - return res[0] - else: - def newfunc(*args): - args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:] - return oldfunc(*args) - newfunc._cffi_base_type = base_tp - return newfunc - - # ---------- - # named structs - - def _generate_gen_struct_decl(self, tp, name): - assert name == tp.name - self._generate_struct_or_union_decl(tp, 'struct', name) - - def _loading_gen_struct(self, tp, name, module): - self._loading_struct_or_union(tp, 'struct', name, module) - - def _loaded_gen_struct(self, tp, name, module, **kwds): - self._loaded_struct_or_union(tp) - - def _generate_gen_union_decl(self, tp, name): - assert name == tp.name - self._generate_struct_or_union_decl(tp, 'union', name) - - def _loading_gen_union(self, tp, name, module): - self._loading_struct_or_union(tp, 'union', name, module) - - def _loaded_gen_union(self, tp, name, module, **kwds): - self._loaded_struct_or_union(tp) - - def _generate_struct_or_union_decl(self, tp, prefix, name): - if tp.fldnames is None: - return # nothing to do with opaque structs - checkfuncname = '_cffi_check_%s_%s' % (prefix, name) - layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) - cname = ('%s %s' % (prefix, name)).strip() - # - prnt = self._prnt - prnt('static void %s(%s *p)' % (checkfuncname, cname)) - prnt('{') - prnt(' /* only to generate compile-time warnings or errors */') - prnt(' (void)p;') - for fname, ftype, fbitsize, fqual in tp.enumfields(): - if (isinstance(ftype, model.PrimitiveType) - and ftype.is_integer_type()) or fbitsize >= 0: - # accept all integers, but complain on float or double - prnt(' (void)((p->%s) << 1);' % fname) - else: - # only accept exactly the type declared. - try: - prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), - fname)) - except VerificationError as e: - prnt(' /* %s */' % str(e)) # cannot verify it, ignore - prnt('}') - self.export_symbols.append(layoutfuncname) - prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,)) - prnt('{') - prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) - prnt(' static intptr_t nums[] = {') - prnt(' sizeof(%s),' % cname) - prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize, fqual in tp.enumfields(): - if fbitsize >= 0: - continue # xxx ignore fbitsize for now - prnt(' offsetof(%s, %s),' % (cname, fname)) - if isinstance(ftype, model.ArrayType) and ftype.length is None: - prnt(' 0, /* %s */' % ftype._get_c_name()) - else: - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) - prnt(' -1') - prnt(' };') - prnt(' return nums[i];') - prnt(' /* the next line is not executed, but compiled */') - prnt(' %s(0);' % (checkfuncname,)) - prnt('}') - prnt() - - def _loading_struct_or_union(self, tp, prefix, name, module): - if tp.fldnames is None: - return # nothing to do with opaque structs - layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) - # - BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0] - function = module.load_function(BFunc, layoutfuncname) - layout = [] - num = 0 - while True: - x = function(num) - if x < 0: break - layout.append(x) - num += 1 - if isinstance(tp, model.StructOrUnion) and tp.partial: - # use the function()'s sizes and offsets to guide the - # layout of the struct - totalsize = layout[0] - totalalignment = layout[1] - fieldofs = layout[2::2] - fieldsize = layout[3::2] - tp.force_flatten() - assert len(fieldofs) == len(fieldsize) == len(tp.fldnames) - tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment - else: - cname = ('%s %s' % (prefix, name)).strip() - self._struct_pending_verification[tp] = layout, cname - - def _loaded_struct_or_union(self, tp): - if tp.fldnames is None: - return # nothing to do with opaque structs - self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered - - if tp in self._struct_pending_verification: - # check that the layout sizes and offsets match the real ones - def check(realvalue, expectedvalue, msg): - if realvalue != expectedvalue: - raise VerificationError( - "%s (we have %d, but C compiler says %d)" - % (msg, expectedvalue, realvalue)) - ffi = self.ffi - BStruct = ffi._get_cached_btype(tp) - layout, cname = self._struct_pending_verification.pop(tp) - check(layout[0], ffi.sizeof(BStruct), "wrong total size") - check(layout[1], ffi.alignof(BStruct), "wrong total alignment") - i = 2 - for fname, ftype, fbitsize, fqual in tp.enumfields(): - if fbitsize >= 0: - continue # xxx ignore fbitsize for now - check(layout[i], ffi.offsetof(BStruct, fname), - "wrong offset for field %r" % (fname,)) - if layout[i+1] != 0: - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) - i += 2 - assert i == len(layout) - - # ---------- - # 'anonymous' declarations. These are produced for anonymous structs - # or unions; the 'name' is obtained by a typedef. - - def _generate_gen_anonymous_decl(self, tp, name): - if isinstance(tp, model.EnumType): - self._generate_gen_enum_decl(tp, name, '') - else: - self._generate_struct_or_union_decl(tp, '', name) - - def _loading_gen_anonymous(self, tp, name, module): - if isinstance(tp, model.EnumType): - self._loading_gen_enum(tp, name, module, '') - else: - self._loading_struct_or_union(tp, '', name, module) - - def _loaded_gen_anonymous(self, tp, name, module, **kwds): - if isinstance(tp, model.EnumType): - self._loaded_gen_enum(tp, name, module, **kwds) - else: - self._loaded_struct_or_union(tp) - - # ---------- - # constants, likely declared with '#define' - - def _generate_gen_const(self, is_int, name, tp=None, category='const', - check_value=None): - prnt = self._prnt - funcname = '_cffi_%s_%s' % (category, name) - self.export_symbols.append(funcname) - if check_value is not None: - assert is_int - assert category == 'const' - prnt('int %s(char *out_error)' % funcname) - prnt('{') - self._check_int_constant_value(name, check_value) - prnt(' return 0;') - prnt('}') - elif is_int: - assert category == 'const' - prnt('int %s(long long *out_value)' % funcname) - prnt('{') - prnt(' *out_value = (long long)(%s);' % (name,)) - prnt(' return (%s) <= 0;' % (name,)) - prnt('}') - else: - assert tp is not None - assert check_value is None - if category == 'var': - ampersand = '&' - else: - ampersand = '' - extra = '' - if category == 'const' and isinstance(tp, model.StructOrUnion): - extra = 'const *' - ampersand = '&' - prnt(tp.get_c_name(' %s%s(void)' % (extra, funcname), name)) - prnt('{') - prnt(' return (%s%s);' % (ampersand, name)) - prnt('}') - prnt() - - def _generate_gen_constant_decl(self, tp, name): - is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() - self._generate_gen_const(is_int, name, tp) - - _loading_gen_constant = _loaded_noop - - def _load_constant(self, is_int, tp, name, module, check_value=None): - funcname = '_cffi_const_%s' % name - if check_value is not None: - assert is_int - self._load_known_int_constant(module, funcname) - value = check_value - elif is_int: - BType = self.ffi._typeof_locked("long long*")[0] - BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] - function = module.load_function(BFunc, funcname) - p = self.ffi.new(BType) - negative = function(p) - value = int(p[0]) - if value < 0 and not negative: - BLongLong = self.ffi._typeof_locked("long long")[0] - value += (1 << (8*self.ffi.sizeof(BLongLong))) - else: - assert check_value is None - fntypeextra = '(*)(void)' - if isinstance(tp, model.StructOrUnion): - fntypeextra = '*' + fntypeextra - BFunc = self.ffi._typeof_locked(tp.get_c_name(fntypeextra, name))[0] - function = module.load_function(BFunc, funcname) - value = function() - if isinstance(tp, model.StructOrUnion): - value = value[0] - return value - - def _loaded_gen_constant(self, tp, name, module, library): - is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() - value = self._load_constant(is_int, tp, name, module) - setattr(library, name, value) - type(library)._cffi_dir.append(name) - - # ---------- - # enums - - def _check_int_constant_value(self, name, value): - prnt = self._prnt - if value <= 0: - prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( - name, name, value)) - else: - prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( - name, name, value)) - prnt(' char buf[64];') - prnt(' if ((%s) <= 0)' % name) - prnt(' sprintf(buf, "%%ld", (long)(%s));' % name) - prnt(' else') - prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % - name) - prnt(' sprintf(out_error, "%s has the real value %s, not %s",') - prnt(' "%s", buf, "%d");' % (name[:100], value)) - prnt(' return -1;') - prnt(' }') - - def _load_known_int_constant(self, module, funcname): - BType = self.ffi._typeof_locked("char[]")[0] - BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] - function = module.load_function(BFunc, funcname) - p = self.ffi.new(BType, 256) - if function(p) < 0: - error = self.ffi.string(p) - if sys.version_info >= (3,): - error = str(error, 'utf-8') - raise VerificationError(error) - - def _enum_funcname(self, prefix, name): - # "$enum_$1" => "___D_enum____D_1" - name = name.replace('$', '___D_') - return '_cffi_e_%s_%s' % (prefix, name) - - def _generate_gen_enum_decl(self, tp, name, prefix='enum'): - if tp.partial: - for enumerator in tp.enumerators: - self._generate_gen_const(True, enumerator) - return - # - funcname = self._enum_funcname(prefix, name) - self.export_symbols.append(funcname) - prnt = self._prnt - prnt('int %s(char *out_error)' % funcname) - prnt('{') - for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - self._check_int_constant_value(enumerator, enumvalue) - prnt(' return 0;') - prnt('}') - prnt() - - def _loading_gen_enum(self, tp, name, module, prefix='enum'): - if tp.partial: - enumvalues = [self._load_constant(True, tp, enumerator, module) - for enumerator in tp.enumerators] - tp.enumvalues = tuple(enumvalues) - tp.partial_resolved = True - else: - funcname = self._enum_funcname(prefix, name) - self._load_known_int_constant(module, funcname) - - def _loaded_gen_enum(self, tp, name, module, library): - for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - setattr(library, enumerator, enumvalue) - type(library)._cffi_dir.append(enumerator) - - # ---------- - # macros: for now only for integers - - def _generate_gen_macro_decl(self, tp, name): - if tp == '...': - check_value = None - else: - check_value = tp # an integer - self._generate_gen_const(True, name, check_value=check_value) - - _loading_gen_macro = _loaded_noop - - def _loaded_gen_macro(self, tp, name, module, library): - if tp == '...': - check_value = None - else: - check_value = tp # an integer - value = self._load_constant(True, tp, name, module, - check_value=check_value) - setattr(library, name, value) - type(library)._cffi_dir.append(name) - - # ---------- - # global variables - - def _generate_gen_variable_decl(self, tp, name): - if isinstance(tp, model.ArrayType): - if tp.length_is_unknown(): - prnt = self._prnt - funcname = '_cffi_sizeof_%s' % (name,) - self.export_symbols.append(funcname) - prnt("size_t %s(void)" % funcname) - prnt("{") - prnt(" return sizeof(%s);" % (name,)) - prnt("}") - tp_ptr = model.PointerType(tp.item) - self._generate_gen_const(False, name, tp_ptr) - else: - tp_ptr = model.PointerType(tp) - self._generate_gen_const(False, name, tp_ptr, category='var') - - _loading_gen_variable = _loaded_noop - - def _loaded_gen_variable(self, tp, name, module, library): - if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the - # sense that "a=..." is forbidden - if tp.length_is_unknown(): - funcname = '_cffi_sizeof_%s' % (name,) - BFunc = self.ffi._typeof_locked('size_t(*)(void)')[0] - function = module.load_function(BFunc, funcname) - size = function() - BItemType = self.ffi._get_cached_btype(tp.item) - length, rest = divmod(size, self.ffi.sizeof(BItemType)) - if rest != 0: - raise VerificationError( - "bad size: %r does not seem to be an array of %s" % - (name, tp.item)) - tp = tp.resolve_length(length) - tp_ptr = model.PointerType(tp.item) - value = self._load_constant(False, tp_ptr, name, module) - # 'value' is a which we have to replace with - # a if the N is actually known - if tp.length is not None: - BArray = self.ffi._get_cached_btype(tp) - value = self.ffi.cast(BArray, value) - setattr(library, name, value) - type(library)._cffi_dir.append(name) - return - # remove ptr= from the library instance, and replace - # it by a property on the class, which reads/writes into ptr[0]. - funcname = '_cffi_var_%s' % name - BFunc = self.ffi._typeof_locked(tp.get_c_name('*(*)(void)', name))[0] - function = module.load_function(BFunc, funcname) - ptr = function() - def getter(library): - return ptr[0] - def setter(library, value): - ptr[0] = value - setattr(type(library), name, property(getter, setter)) - type(library)._cffi_dir.append(name) - -cffimod_header = r''' -#include -#include -#include -#include -#include /* XXX for ssize_t on some platforms */ - -/* this block of #ifs should be kept exactly identical between - c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py - and cffi/_cffi_include.h */ -#if defined(_MSC_VER) -# include /* for alloca() */ -# if _MSC_VER < 1600 /* MSVC < 2010 */ - typedef __int8 int8_t; - typedef __int16 int16_t; - typedef __int32 int32_t; - typedef __int64 int64_t; - typedef unsigned __int8 uint8_t; - typedef unsigned __int16 uint16_t; - typedef unsigned __int32 uint32_t; - typedef unsigned __int64 uint64_t; - typedef __int8 int_least8_t; - typedef __int16 int_least16_t; - typedef __int32 int_least32_t; - typedef __int64 int_least64_t; - typedef unsigned __int8 uint_least8_t; - typedef unsigned __int16 uint_least16_t; - typedef unsigned __int32 uint_least32_t; - typedef unsigned __int64 uint_least64_t; - typedef __int8 int_fast8_t; - typedef __int16 int_fast16_t; - typedef __int32 int_fast32_t; - typedef __int64 int_fast64_t; - typedef unsigned __int8 uint_fast8_t; - typedef unsigned __int16 uint_fast16_t; - typedef unsigned __int32 uint_fast32_t; - typedef unsigned __int64 uint_fast64_t; - typedef __int64 intmax_t; - typedef unsigned __int64 uintmax_t; -# else -# include -# endif -# if _MSC_VER < 1800 /* MSVC < 2013 */ -# ifndef __cplusplus - typedef unsigned char _Bool; -# endif -# endif -#else -# include -# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux) -# include -# endif -#endif -''' diff --git a/spaces/asescodes/midjourney-prompt-generator-using-chatgpt/app.py b/spaces/asescodes/midjourney-prompt-generator-using-chatgpt/app.py deleted file mode 100644 index 61e203816b7e4d0df07fc5226de85ef57d15a814..0000000000000000000000000000000000000000 --- a/spaces/asescodes/midjourney-prompt-generator-using-chatgpt/app.py +++ /dev/null @@ -1,65 +0,0 @@ -import gradio as gr -import openai -from gradio.components import Textbox - -def generate_prompt(keywords,api_key): - openai.api_key = api_key - messages = [ - {"role": "system", "content": ''' - this is an example prompt i got from the keyword "Astronout walking on mars": "Astronaut walking on the red dusty surface of Mars, collecting samples of its soil, rocks, and atmosphere. With every step, they are discovering new and exciting things about this alien world." here is another example of "A dragon taking over the world": "Dragon, flying across the world. Its wings were so large that they cast a shadow over the land, blocking out the sun. Its fire was so powerful that it could turn entire cities to ash in a matter of moments." now i will give you some keywords and generate the prompt based on those keywords like those above example. now i will give you some keywords. and also keep the prompt like keywords based that will enhance the image - '''}, - {"role": "user", "content": keywords}, - - ] - response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages) - return response.choices[0].message.content - - - -api_key_input = Textbox(label="Then go to this https://platform.openai.com/account/api-keys generate secret key and paste it here",placeholder = "eg: sk-I2Q9qY3riJAfZI5xunVkT3BlbkFJ34b2I5Y8EGsdv2R9EoTX8Gl") - -keywords_input = Textbox(label="Keywords you want in your description", placeholder="eg: A dragon taking over the world") - - -output_text = Textbox(label="Prompt",placeholder= '''Dragon, flying across the world. Its wings were so large that they cast a shadow over the land, blocking out the sun. Its fire was so powerful that it could turn entire cities to ash in a matter of moments. -''') - - -description = "Use OpenAI's GPT-3.5 turbo model (ChatGPT) to generate a midjourney prompt based on user keywords." - -examples = [ - ["A backpack made entirely of human hair."], - ["A bicycle with wheels made out of jelly."], - ["A jacket made entirely out of old computer keyboards."], - ["A bookshelf made entirely out of ice that slowly melts over time."], - ["a chair made out of pizza slices"], - ["a banana with a bow tie and a top hat"], - ["a surreal landscape with floating islands"], - ["a cat made out of clouds"], - ["a piano that also functions as a fish tank"], - ["a treehouse made entirely of recycled materials"], - ["a motorcycle with 8 wheels"], - ["a sculpture of a giant snail made entirely of cheese"], - ["a house with a living tree growing through the middle"], - ["a car covered entirely in mirrors"], - ["a suit made out of recycled plastic bottles"], - ["a bicycle with square wheels"], - ["a set of cutlery made entirely of candy"], - ["a chandelier made of human hair"], - ["a giant rubber duck that can be used as a bathtub"], - ["a coat made of feathers from every bird species in the world"], - ["a lamp that is also a plant"], - ["a chair that is also a rocking horse"], - ["a pair of glasses that allow you to see through walls"], - ["a house with a slide instead of stairs"], - ["a motorcycle with a built-in hot tub"], - ["a dress made entirely of recycled paper"], -] - -gr.Interface( - fn=generate_prompt, - inputs=[keywords_input,api_key_input], - outputs=output_text, - description=description, - examples= examples -).launch() \ No newline at end of file diff --git a/spaces/avans06/whisper-webui-translate/src/prompts/prependPromptStrategy.py b/spaces/avans06/whisper-webui-translate/src/prompts/prependPromptStrategy.py deleted file mode 100644 index 6f8b6eba5b98310f57a656db73b5e415de3af958..0000000000000000000000000000000000000000 --- a/spaces/avans06/whisper-webui-translate/src/prompts/prependPromptStrategy.py +++ /dev/null @@ -1,31 +0,0 @@ -from src.config import VadInitialPromptMode -from src.prompts.abstractPromptStrategy import AbstractPromptStrategy - -class PrependPromptStrategy(AbstractPromptStrategy): - """ - A simple prompt strategy that prepends a single prompt to all segments of audio, or prepends the prompt to the first segment of audio. - """ - def __init__(self, initial_prompt: str, initial_prompt_mode: VadInitialPromptMode): - """ - Parameters - ---------- - initial_prompt: str - The initial prompt to use for the transcription. - initial_prompt_mode: VadInitialPromptMode - The mode to use for the initial prompt. If set to PREPEND_FIRST_SEGMENT, the initial prompt will be prepended to the first segment of audio. - If set to PREPEND_ALL_SEGMENTS, the initial prompt will be prepended to all segments of audio. - """ - self.initial_prompt = initial_prompt - self.initial_prompt_mode = initial_prompt_mode - - # This is a simple prompt strategy, so we only support these two modes - if initial_prompt_mode not in [VadInitialPromptMode.PREPEND_ALL_SEGMENTS, VadInitialPromptMode.PREPREND_FIRST_SEGMENT]: - raise ValueError(f"Unsupported initial prompt mode {initial_prompt_mode}") - - def get_segment_prompt(self, segment_index: int, whisper_prompt: str, detected_language: str) -> str: - if (self.initial_prompt_mode == VadInitialPromptMode.PREPEND_ALL_SEGMENTS): - return self._concat_prompt(self.initial_prompt, whisper_prompt) - elif (self.initial_prompt_mode == VadInitialPromptMode.PREPREND_FIRST_SEGMENT): - return self._concat_prompt(self.initial_prompt, whisper_prompt) if segment_index == 0 else whisper_prompt - else: - raise ValueError(f"Unknown initial prompt mode {self.initial_prompt_mode}") \ No newline at end of file diff --git a/spaces/avivdm1/AutoGPT/tests/integration/weaviate_memory_tests.py b/spaces/avivdm1/AutoGPT/tests/integration/weaviate_memory_tests.py deleted file mode 100644 index 015eab05484f485aeb8ee035e92ad7811e9dddd4..0000000000000000000000000000000000000000 --- a/spaces/avivdm1/AutoGPT/tests/integration/weaviate_memory_tests.py +++ /dev/null @@ -1,117 +0,0 @@ -import os -import sys -import unittest -from unittest import mock -from uuid import uuid4 - -from weaviate import Client -from weaviate.util import get_valid_uuid - -from autogpt.config import Config -from autogpt.memory.base import get_ada_embedding -from autogpt.memory.weaviate import WeaviateMemory - - -class TestWeaviateMemory(unittest.TestCase): - cfg = None - client = None - index = None - - @classmethod - def setUpClass(cls): - # only create the connection to weaviate once - cls.cfg = Config() - - if cls.cfg.use_weaviate_embedded: - from weaviate.embedded import EmbeddedOptions - - cls.client = Client( - embedded_options=EmbeddedOptions( - hostname=cls.cfg.weaviate_host, - port=int(cls.cfg.weaviate_port), - persistence_data_path=cls.cfg.weaviate_embedded_path, - ) - ) - else: - cls.client = Client( - f"{cls.cfg.weaviate_protocol}://{cls.cfg.weaviate_host}:{self.cfg.weaviate_port}" - ) - - cls.index = WeaviateMemory.format_classname(cls.cfg.memory_index) - - """ - In order to run these tests you will need a local instance of - Weaviate running. Refer to https://weaviate.io/developers/weaviate/installation/docker-compose - for creating local instances using docker. - Alternatively in your .env file set the following environmental variables to run Weaviate embedded (see: https://weaviate.io/developers/weaviate/installation/embedded): - - USE_WEAVIATE_EMBEDDED=True - WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate" - """ - - def setUp(self): - try: - self.client.schema.delete_class(self.index) - except: - pass - - self.memory = WeaviateMemory(self.cfg) - - def test_add(self): - doc = "You are a Titan name Thanos and you are looking for the Infinity Stones" - self.memory.add(doc) - result = self.client.query.get(self.index, ["raw_text"]).do() - actual = result["data"]["Get"][self.index] - - self.assertEqual(len(actual), 1) - self.assertEqual(actual[0]["raw_text"], doc) - - def test_get(self): - doc = "You are an Avenger and swore to defend the Galaxy from a menace called Thanos" - - with self.client.batch as batch: - batch.add_data_object( - uuid=get_valid_uuid(uuid4()), - data_object={"raw_text": doc}, - class_name=self.index, - vector=get_ada_embedding(doc), - ) - - batch.flush() - - actual = self.memory.get(doc) - - self.assertEqual(len(actual), 1) - self.assertEqual(actual[0], doc) - - def test_get_stats(self): - docs = [ - "You are now about to count the number of docs in this index", - "And then you about to find out if you can count correctly", - ] - - [self.memory.add(doc) for doc in docs] - - stats = self.memory.get_stats() - - self.assertTrue(stats) - self.assertTrue("count" in stats) - self.assertEqual(stats["count"], 2) - - def test_clear(self): - docs = [ - "Shame this is the last test for this class", - "Testing is fun when someone else is doing it", - ] - - [self.memory.add(doc) for doc in docs] - - self.assertEqual(self.memory.get_stats()["count"], 2) - - self.memory.clear() - - self.assertEqual(self.memory.get_stats()["count"], 0) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/awacke1/ArtNovelComicBookComposer/README.md b/spaces/awacke1/ArtNovelComicBookComposer/README.md deleted file mode 100644 index 2b9cff46ff1186ab50c29360fe18127427637eb8..0000000000000000000000000000000000000000 --- a/spaces/awacke1/ArtNovelComicBookComposer/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: 👁🥽 Art Novel Comic Book Composer 📱👁 Gradio -emoji: 👁🥽📱👁 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/CarePlanQnAWithContext2/app.py b/spaces/awacke1/CarePlanQnAWithContext2/app.py deleted file mode 100644 index c9b28a42fe96971b5952f9f1b63a6d7563be8138..0000000000000000000000000000000000000000 --- a/spaces/awacke1/CarePlanQnAWithContext2/app.py +++ /dev/null @@ -1,37 +0,0 @@ -import gradio as gr - -context = "What should be documented in a care plan?\n" -context = context + "Regardless of what your preferences are, your care plan should include:\n" -context = context + "What your assessed care needs are.\n" -context = context + "What type of support you should receive.\n" -context = context + "Your desired outcomes.\n" -context = context + "Who should provide care.\n" -context = context + "When care and support should be provided.\n" -context = context + "Records of care provided.\n" -context = context + "Your wishes and personal preferences.\n" -context = context + "The costs of the services.\n" - -context = context + "Dimensions\n" -context = context + "1-Ontology of Plan\n" -context = context + "2-Problems as evidenced by Signs of Systems\n" -context = context + "3-Assessment of Needs\n" -context = context + "4-Questions about problems faced\n" -context = context + "5-Goals for long and short term improvements\n" -context = context + "6-Knowledge-Behavior-Status Quality Measures\n" -context = context + "7-Intervention List of Options\n" -context = context + "8-Quality Measures\n" -context = context + "9-Pathways Available\n" - -with open('WritingCarePlans.txt', 'r') as file: - context = file.read() - -question = "What should be documented in a care plan?" - -gr.Interface.load( - "huggingface/deepset/roberta-base-squad2", - theme="default", - css=".footer{display:none !important}", - inputs=[gr.inputs.Textbox(lines=12, default=context, label="Context paragraph"), gr.inputs.Textbox(lines=3, default=question, label="Question")], - outputs=[gr.outputs.Textbox(label="Answer"), gr.outputs.Textbox(label="Score")], - title=None, - description="Provide your own paragraph and ask any question about the text. How well does the model answer?").launch() \ No newline at end of file diff --git a/spaces/awacke1/DockerImageRecognitionToText/README.md b/spaces/awacke1/DockerImageRecognitionToText/README.md deleted file mode 100644 index 22fa85d8d0593cf8e9801a24bff327154a2fa9f0..0000000000000000000000000000000000000000 --- a/spaces/awacke1/DockerImageRecognitionToText/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: 🚢📷Image Recognition To Text Caption Docker -emoji: 🚢Img -colorFrom: pink -colorTo: purple -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/MultiRhymeLyricSmith/app.py b/spaces/awacke1/MultiRhymeLyricSmith/app.py deleted file mode 100644 index f314ad389f4edccff35245b83dd1f6b712dc2029..0000000000000000000000000000000000000000 --- a/spaces/awacke1/MultiRhymeLyricSmith/app.py +++ /dev/null @@ -1,108 +0,0 @@ -import copy -import logging -import transformers -import rhyme_with_ai - -from typing import List - -import streamlit as st -from transformers import BertTokenizer, TFAutoModelForMaskedLM - -from rhyme_with_ai.utils import color_new_words, sanitize -from rhyme_with_ai.rhyme import query_rhyme_words -from rhyme_with_ai.rhyme_generator import RhymeGenerator - - -DEFAULT_QUERY = "Machines will take over the world soon" -N_RHYMES = 10 - - -LANGUAGE = st.sidebar.radio("Language", ["english", "dutch"],0) -if LANGUAGE == "english": - MODEL_PATH = "bert-large-cased-whole-word-masking" - ITER_FACTOR = 5 -elif LANGUAGE == "dutch": - MODEL_PATH = "GroNLP/bert-base-dutch-cased" - ITER_FACTOR = 10 # Faster model -else: - raise NotImplementedError(f"Unsupported language ({LANGUAGE}) expected 'english' or 'dutch'.") - -def main(): - st.markdown( - "Created with " - "[Datamuse](https://www.datamuse.com/api/), " - "[Mick's rijmwoordenboek](https://rijmwoordenboek.nl), " - "[Hugging Face](https://huggingface.co/), " - "[Streamlit](https://streamlit.io/) and " - "[App Engine](https://cloud.google.com/appengine/)." - " Read our [blog](https://blog.godatadriven.com/rhyme-with-ai) " - "or check the " - "[source](https://github.com/godatadriven/rhyme-with-ai).", - unsafe_allow_html=True, - ) - st.title("Rhyme with AI") - query = get_query() - if not query: - query = DEFAULT_QUERY - rhyme_words_options = query_rhyme_words(query, n_rhymes=N_RHYMES,language=LANGUAGE) - if rhyme_words_options: - logging.getLogger(__name__).info("Got rhyme words: %s", rhyme_words_options) - start_rhyming(query, rhyme_words_options) - else: - st.write("No rhyme words found") - - -def get_query(): - q = sanitize( - st.text_input("Write your first line and press ENTER to rhyme:", DEFAULT_QUERY) - ) - if not q: - return DEFAULT_QUERY - return q - - -def start_rhyming(query, rhyme_words_options): - st.markdown("## My Suggestions:") - - progress_bar = st.progress(0) - status_text = st.empty() - max_iter = len(query.split()) * ITER_FACTOR - - rhyme_words = rhyme_words_options[:N_RHYMES] - - model, tokenizer = load_model(MODEL_PATH) - sentence_generator = RhymeGenerator(model, tokenizer) - sentence_generator.start(query, rhyme_words) - - current_sentences = [" " for _ in range(N_RHYMES)] - for i in range(max_iter): - previous_sentences = copy.deepcopy(current_sentences) - current_sentences = sentence_generator.mutate() - display_output(status_text, query, current_sentences, previous_sentences) - progress_bar.progress(i / (max_iter - 1)) - st.balloons() - - -@st.cache(allow_output_mutation=True) -def load_model(model_path): - return ( - TFAutoModelForMaskedLM.from_pretrained(model_path), - BertTokenizer.from_pretrained(model_path), - ) - - -def display_output(status_text, query, current_sentences, previous_sentences): - print_sentences = [] - for new, old in zip(current_sentences, previous_sentences): - formatted = color_new_words(new, old) - after_comma = "
  • " + formatted.split(",")[1][:-2] + "
  • " - print_sentences.append(after_comma) - status_text.markdown( - query + ",
    " + "".join(print_sentences), unsafe_allow_html=True - ) - - - -if __name__ == "__main__": - logging.basicConfig(level=logging.INFO) - main() \ No newline at end of file diff --git a/spaces/badongtakla/ithaca/README.md b/spaces/badongtakla/ithaca/README.md deleted file mode 100644 index 91b170badd202308a4c331cd2b6996cae27e4400..0000000000000000000000000000000000000000 --- a/spaces/badongtakla/ithaca/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Ithaca -emoji: 🐨 -colorFrom: yellow -colorTo: pink -sdk: gradio -sdk_version: 2.8.14 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/balaramas/s2t_translator/app.py b/spaces/balaramas/s2t_translator/app.py deleted file mode 100644 index 256b16c224477af19b54fdd976e90a5f05236f37..0000000000000000000000000000000000000000 --- a/spaces/balaramas/s2t_translator/app.py +++ /dev/null @@ -1,171 +0,0 @@ -""" -Script to translate given single english audio file to corresponding hindi text -Usage : python s2t_en2hi.py -""" - - - -import gradio as gr -import sys -import os -import subprocess -from pydub import AudioSegment -import yaml -import wave - - - -def get_wav_duration(file_path): - with wave.open(file_path, 'rb') as wav_file: - frames = wav_file.getnframes() - rate = wav_file.getframerate() - duration = frames / float(rate) - return duration - - - -def install_fairseq(): - try: - # Run pip install command to install fairseq - subprocess.check_call(["pip", "install", "fairseq"]) - subprocess.check_call(["pip", "install", "sentencepiece"]) - subprocess.check_call(["pip", "install", "soundfile"]) - return "fairseq successfully installed!" - except subprocess.CalledProcessError as e: - return f"An error occurred while installing fairseq: {str(e)}" - -def convert_audio_to_16k_wav(audio_input): - sound = AudioSegment.from_file(audio_input) - sample_rate = sound.frame_rate - num_channels = sound.channels - num_frames = int(sound.frame_count()) - filename = audio_input.split("/")[-1] - print("original file is at:", audio_input) - if (num_channels > 1) or (sample_rate != 16000): # convert to mono-channel 16k wav - if num_channels > 1: - sound = sound.set_channels(1) - if sample_rate != 16000: - sound = sound.set_frame_rate(16000) - num_frames = int(sound.frame_count()) - filename = filename.replace(".wav", "") + "_16k.wav" - sound.export(f"{filename}", format="wav") - return filename - - -def run_my_code(input_text, language): - # TODO better argument handling - audio=convert_audio_to_16k_wav(input_text) - hi_wav = audio - - - data_root="" - model_checkpoint="" - d_r="" - yam="" - - if(language=="Hindi"): - model_checkpoint = "./models/hindi_model.pt" - data_root="./MUSTC_ROOT_hindi/en-hi/" - d_r="MUSTC_ROOT_hindi/" - yam="./MUSTC_ROOT_hindi/en-hi/data/tst-COMMON/txt/tst-COMMON.yaml" - if(language=="French"): - model_checkpoint = "./models/french_model.pt" - data_root="./MUSTC_ROOT_french/en-fr/" - d_r="MUSTC_ROOT_french/" - yam="./MUSTC_ROOT_french/en-fr/data/tst-COMMON/txt/tst-COMMON.yaml" - if(language=="German"): - model_checkpoint = "./models/german_model.pt" - data_root="./MUSTC_ROOT_german/en-de/" - d_r="MUSTC_ROOT_german/" - yam="./MUSTC_ROOT_german/en-de/data/tst-COMMON/txt/tst-COMMON.yaml" - if(language=="Bengali"): - model_checkpoint = "./models/bengali_model.pt" - data_root="./MUSTC_ROOT_bengali/en-bn/" - d_r="MUSTC_ROOT_bengali/" - yam="./MUSTC_ROOT_bengali/en-bn/data/tst-COMMON/txt/tst-COMMON.yaml" - if(language=="Nepali"): - model_checkpoint = "./models/nepali_model.pt" - data_root="./MUSTC_ROOT_nepali/en-ne/" - d_r="MUSTC_ROOT_nepali/" - yam="./MUSTC_ROOT_nepali/en-ne/data/tst-COMMON/txt/tst-COMMON.yaml" - if(language=="Gujrati"): - model_checkpoint = "./models/gujrati_model.pt" - data_root="./MUSTC_ROOT_gujrati/en-gj/" - d_r="MUSTC_ROOT_gujrati/" - yam="./MUSTC_ROOT_gujrati/en-gj/data/tst-COMMON/txt/tst-COMMON.yaml" - if(language=="Tamil"): - model_checkpoint = "./models/tamil_model.pt" - data_root="./MUSTC_ROOT_tamil/en-tm/" - d_r="MUSTC_ROOT_tamil/" - yam="./MUSTC_ROOT_tamil/en-tm/data/tst-COMMON/txt/tst-COMMON.yaml" - if(language=="Marathi"): - model_checkpoint = "./models/marathi_model.pt" - data_root="./MUSTC_ROOT_marathi/en-mt/" - d_r="MUSTC_ROOT_marathi/" - yam="./MUSTC_ROOT_marathi/en-mt/data/tst-COMMON/txt/tst-COMMON.yaml" - - - - - - #code to change the duration of the yaml file accordign to the audio input - with open(yam, 'r') as yaml_file: - data = yaml.safe_load(yaml_file) - data[0]['duration']=get_wav_duration(hi_wav) - with open(yam, 'w') as yaml_file: - yaml.dump(data, yaml_file) - - os.system(f"cp {hi_wav} {data_root}data/tst-COMMON/wav/test.wav") - - print("------Starting data prepration------") - subprocess.run(["python", "prep_mustc_data_hindi_single.py", "--data-root", d_r, "--task", "st", "--vocab-type", "unigram", "--vocab-size", "8000"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) - - #For testing - #subprocess.run(["python", "prep_mustc_data_hindi_single.py", "--data-root", d_r, "--task", "st", "--vocab-type", "unigram", "--vocab-size", "8000"]) - - print("------Performing translation------") - #subprocess.run(["python", "generate.py", data_root, "--config-yaml", "config_st.yaml", "--gen-subset", "tst-COMMON_st", "--task", "speech_to_text", "--path", model_checkpoint]) - translation_result = subprocess.run(["python", "generate.py", data_root, "--config-yaml", "config_st.yaml", "--gen-subset", "tst-COMMON_st", "--task", "speech_to_text", "--path", model_checkpoint], capture_output=True, text=True) - translation_result_text = translation_result.stdout - - lines = translation_result_text.split("\n") - - - #just for checking the duration from the yaml file of the current input audio - with open(yam, 'r') as yaml_file: - data = yaml.safe_load(yaml_file) - print(data[0]['duration'], " seconds duration") - - output_text="" - print("\n\n------Translation results are:\n") - for i in lines: - if (i.startswith("D-0")): - print(i.split("\t")[2]) - output_text=i.split("\t")[2] - break - - os.system(f"rm {data_root}data/tst-COMMON/wav/test.wav") - return output_text - -install_fairseq() - -# Define the input and output interfaces for Gradio -#inputs = [ - # gr.inputs.Audio(source="microphone", type="filepath", label="Record something (in English)..."), - # gr.inputs.Dropdown(list(LANGUAGE_CODES.keys()), default="Hindi", label="From English to Languages X..."), - # ] - -#input_textbox = gr.inputs.Textbox(label="test2.wav") -#input=gr.inputs.Audio(source="microphone", type="filepath", label="Record something (in English)...") -#audio=convert_audio_to_16k_wav(input) -output_textbox = gr.outputs.Textbox(label="The Translated Text is:") - -# Create a Gradio interface -iface = gr.Interface( - fn=run_my_code, - inputs=[gr.inputs.Audio(source="microphone", type="filepath", label="Record something (in American/British English Accent)..."), gr.inputs.Radio(["Hindi", "Bengali", "Marathi", "Gujrati", "Tamil", "Nepali", "French", "German"], label="Language To be Translated To")], - outputs=output_textbox, - title="English Speech To Multilingual Text Translator") - -# Launch the interface -iface.launch() \ No newline at end of file diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/jsm/exporters/ColladaExporter.js b/spaces/banana-projects/web3d/node_modules/three/examples/jsm/exporters/ColladaExporter.js deleted file mode 100644 index 936516990d057f5c210b853668aa6c35f315b031..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/jsm/exporters/ColladaExporter.js +++ /dev/null @@ -1,635 +0,0 @@ -/** - * @author Garrett Johnson / http://gkjohnson.github.io/ - * https://github.com/gkjohnson/collada-exporter-js - * - * Usage: - * var exporter = new ColladaExporter(); - * - * var data = exporter.parse(mesh); - * - * Format Definition: - * https://www.khronos.org/collada/ - */ - -import { - BufferGeometry, - Color, - DoubleSide, - Geometry, - Matrix4, - Mesh, - MeshBasicMaterial, - MeshLambertMaterial -} from "../../../build/three.module.js"; - -var ColladaExporter = function () {}; - -ColladaExporter.prototype = { - - constructor: ColladaExporter, - - parse: function ( object, onDone, options = {} ) { - - options = Object.assign( { - version: '1.4.1', - author: null, - textureDirectory: '', - }, options ); - - if ( options.textureDirectory !== '' ) { - - options.textureDirectory = `${ options.textureDirectory }/` - .replace( /\\/g, '/' ) - .replace( /\/+/g, '/' ); - - } - - var version = options.version; - if ( version !== '1.4.1' && version !== '1.5.0' ) { - - console.warn( `ColladaExporter : Version ${ version } not supported for export. Only 1.4.1 and 1.5.0.` ); - return null; - - } - - // Convert the urdf xml into a well-formatted, indented format - function format( urdf ) { - - var IS_END_TAG = /^<\//; - var IS_SELF_CLOSING = /(\?>$)|(\/>$)/; - var HAS_TEXT = /<[^>]+>[^<]*<\/[^<]+>/; - - var pad = ( ch, num ) => ( num > 0 ? ch + pad( ch, num - 1 ) : '' ); - - var tagnum = 0; - return urdf - .match( /(<[^>]+>[^<]+<\/[^<]+>)|(<[^>]+>)/g ) - .map( tag => { - - if ( ! HAS_TEXT.test( tag ) && ! IS_SELF_CLOSING.test( tag ) && IS_END_TAG.test( tag ) ) { - - tagnum --; - - } - - var res = `${ pad( ' ', tagnum ) }${ tag }`; - - if ( ! HAS_TEXT.test( tag ) && ! IS_SELF_CLOSING.test( tag ) && ! IS_END_TAG.test( tag ) ) { - - tagnum ++; - - } - - return res; - - } ) - .join( '\n' ); - - } - - // Convert an image into a png format for saving - function base64ToBuffer( str ) { - - var b = atob( str ); - var buf = new Uint8Array( b.length ); - - for ( var i = 0, l = buf.length; i < l; i ++ ) { - - buf[ i ] = b.charCodeAt( i ); - - } - - return buf; - - } - - var canvas, ctx; - function imageToData( image, ext ) { - - canvas = canvas || document.createElement( 'canvas' ); - ctx = ctx || canvas.getContext( '2d' ); - - canvas.width = image.naturalWidth; - canvas.height = image.naturalHeight; - - ctx.drawImage( image, 0, 0 ); - - // Get the base64 encoded data - var base64data = canvas - .toDataURL( `image/${ ext }`, 1 ) - .replace( /^data:image\/(png|jpg);base64,/, '' ); - - // Convert to a uint8 array - return base64ToBuffer( base64data ); - - } - - // gets the attribute array. Generate a new array if the attribute is interleaved - var getFuncs = [ 'getX', 'getY', 'getZ', 'getW' ]; - function attrBufferToArray( attr ) { - - if ( attr.isInterleavedBufferAttribute ) { - - // use the typed array constructor to save on memory - var arr = new attr.array.constructor( attr.count * attr.itemSize ); - var size = attr.itemSize; - for ( var i = 0, l = attr.count; i < l; i ++ ) { - - for ( var j = 0; j < size; j ++ ) { - - arr[ i * size + j ] = attr[ getFuncs[ j ] ]( i ); - - } - - } - - return arr; - - } else { - - return attr.array; - - } - - } - - // Returns an array of the same type starting at the `st` index, - // and `ct` length - function subArray( arr, st, ct ) { - - if ( Array.isArray( arr ) ) return arr.slice( st, st + ct ); - else return new arr.constructor( arr.buffer, st * arr.BYTES_PER_ELEMENT, ct ); - - } - - // Returns the string for a geometry's attribute - function getAttribute( attr, name, params, type ) { - - var array = attrBufferToArray( attr ); - var res = - `` + - - `` + - array.join( ' ' ) + - '' + - - '' + - `` + - - params.map( n => `` ).join( '' ) + - - '' + - '' + - ''; - - return res; - - } - - // Returns the string for a node's transform information - var transMat; - function getTransform( o ) { - - // ensure the object's matrix is up to date - // before saving the transform - o.updateMatrix(); - - transMat = transMat || new Matrix4(); - transMat.copy( o.matrix ); - transMat.transpose(); - return `${ transMat.toArray().join( ' ' ) }`; - - } - - // Process the given piece of geometry into the geometry library - // Returns the mesh id - function processGeometry( g ) { - - var info = geometryInfo.get( g ); - - if ( ! info ) { - - // convert the geometry to bufferGeometry if it isn't already - var bufferGeometry = g; - if ( bufferGeometry instanceof Geometry ) { - - bufferGeometry = ( new BufferGeometry() ).fromGeometry( bufferGeometry ); - - } - - var meshid = `Mesh${ libraryGeometries.length + 1 }`; - - var indexCount = - bufferGeometry.index ? - bufferGeometry.index.count * bufferGeometry.index.itemSize : - bufferGeometry.attributes.position.count; - - var groups = - bufferGeometry.groups != null && bufferGeometry.groups.length !== 0 ? - bufferGeometry.groups : - [ { start: 0, count: indexCount, materialIndex: 0 } ]; - - var gnode = ``; - - // define the geometry node and the vertices for the geometry - var posName = `${ meshid }-position`; - var vertName = `${ meshid }-vertices`; - gnode += getAttribute( bufferGeometry.attributes.position, posName, [ 'X', 'Y', 'Z' ], 'float' ); - gnode += ``; - - // NOTE: We're not optimizing the attribute arrays here, so they're all the same length and - // can therefore share the same triangle indices. However, MeshLab seems to have trouble opening - // models with attributes that share an offset. - // MeshLab Bug#424: https://sourceforge.net/p/meshlab/bugs/424/ - - // serialize normals - var triangleInputs = ``; - if ( 'normal' in bufferGeometry.attributes ) { - - var normName = `${ meshid }-normal`; - gnode += getAttribute( bufferGeometry.attributes.normal, normName, [ 'X', 'Y', 'Z' ], 'float' ); - triangleInputs += ``; - - } - - // serialize uvs - if ( 'uv' in bufferGeometry.attributes ) { - - var uvName = `${ meshid }-texcoord`; - gnode += getAttribute( bufferGeometry.attributes.uv, uvName, [ 'S', 'T' ], 'float' ); - triangleInputs += ``; - - } - - // serialize colors - if ( 'color' in bufferGeometry.attributes ) { - - var colName = `${ meshid }-color`; - gnode += getAttribute( bufferGeometry.attributes.color, colName, [ 'X', 'Y', 'Z' ], 'uint8' ); - triangleInputs += ``; - - } - - var indexArray = null; - if ( bufferGeometry.index ) { - - indexArray = attrBufferToArray( bufferGeometry.index ); - - } else { - - indexArray = new Array( indexCount ); - for ( var i = 0, l = indexArray.length; i < l; i ++ ) indexArray[ i ] = i; - - } - - for ( var i = 0, l = groups.length; i < l; i ++ ) { - - var group = groups[ i ]; - var subarr = subArray( indexArray, group.start, group.count ); - var polycount = subarr.length / 3; - gnode += ``; - gnode += triangleInputs; - - gnode += `

    ${ subarr.join( ' ' ) }

    `; - gnode += '
    '; - - } - - gnode += `
    `; - - libraryGeometries.push( gnode ); - - info = { meshid: meshid, bufferGeometry: bufferGeometry }; - geometryInfo.set( g, info ); - - } - - return info; - - } - - // Process the given texture into the image library - // Returns the image library - function processTexture( tex ) { - - var texid = imageMap.get( tex ); - if ( texid == null ) { - - texid = `image-${ libraryImages.length + 1 }`; - - var ext = 'png'; - var name = tex.name || texid; - var imageNode = ``; - - if ( version === '1.5.0' ) { - - imageNode += `${ options.textureDirectory }${ name }.${ ext }`; - - } else { - - // version image node 1.4.1 - imageNode += `${ options.textureDirectory }${ name }.${ ext }`; - - } - - imageNode += ''; - - libraryImages.push( imageNode ); - imageMap.set( tex, texid ); - textures.push( { - directory: options.textureDirectory, - name, - ext, - data: imageToData( tex.image, ext ), - original: tex - } ); - - } - - return texid; - - } - - // Process the given material into the material and effect libraries - // Returns the material id - function processMaterial( m ) { - - var matid = materialMap.get( m ); - - if ( matid == null ) { - - matid = `Mat${ libraryEffects.length + 1 }`; - - var type = 'phong'; - - if ( m instanceof MeshLambertMaterial ) { - - type = 'lambert'; - - } else if ( m instanceof MeshBasicMaterial ) { - - type = 'constant'; - - if ( m.map !== null ) { - - // The Collada spec does not support diffuse texture maps with the - // constant shader type. - // mrdoob/three.js#15469 - console.warn( 'ColladaExporter: Texture maps not supported with MeshBasicMaterial.' ); - - } - - } - - var emissive = m.emissive ? m.emissive : new Color( 0, 0, 0 ); - var diffuse = m.color ? m.color : new Color( 0, 0, 0 ); - var specular = m.specular ? m.specular : new Color( 1, 1, 1 ); - var shininess = m.shininess || 0; - var reflectivity = m.reflectivity || 0; - - // Do not export and alpha map for the reasons mentioned in issue (#13792) - // in THREE.js alpha maps are black and white, but collada expects the alpha - // channel to specify the transparency - var transparencyNode = ''; - if ( m.transparent === true ) { - - transparencyNode += - `` + - ( - m.map ? - `` : - '1' - ) + - ''; - - if ( m.opacity < 1 ) { - - transparencyNode += `${ m.opacity }`; - - } - - } - - var techniqueNode = `<${ type }>` + - - '' + - - ( - m.emissiveMap ? - '' : - `${ emissive.r } ${ emissive.g } ${ emissive.b } 1` - ) + - - '' + - - ( - type !== 'constant' ? - '' + - - ( - m.map ? - '' : - `${ diffuse.r } ${ diffuse.g } ${ diffuse.b } 1` - ) + - '' - : '' - ) + - - ( - type === 'phong' ? - `${ specular.r } ${ specular.g } ${ specular.b } 1` + - - '' + - - ( - m.specularMap ? - '' : - `${ shininess }` - ) + - - '' - : '' - ) + - - `${ diffuse.r } ${ diffuse.g } ${ diffuse.b } 1` + - - `${ reflectivity }` + - - transparencyNode + - - ``; - - var effectnode = - `` + - '' + - - ( - m.map ? - '' + - `${ processTexture( m.map ) }` + - '' + - 'diffuse-surface' : - '' - ) + - - ( - m.specularMap ? - '' + - `${ processTexture( m.specularMap ) }` + - '' + - 'specular-surface' : - '' - ) + - - ( - m.emissiveMap ? - '' + - `${ processTexture( m.emissiveMap ) }` + - '' + - 'emissive-surface' : - '' - ) + - - techniqueNode + - - ( - m.side === DoubleSide ? - `1` : - '' - ) + - - '' + - - ''; - - libraryMaterials.push( `` ); - libraryEffects.push( effectnode ); - materialMap.set( m, matid ); - - } - - return matid; - - } - - // Recursively process the object into a scene - function processObject( o ) { - - var node = ``; - - node += getTransform( o ); - - if ( o instanceof Mesh && o.geometry != null ) { - - // function returns the id associated with the mesh and a "BufferGeometry" version - // of the geometry in case it's not a geometry. - var geomInfo = processGeometry( o.geometry ); - var meshid = geomInfo.meshid; - var geometry = geomInfo.bufferGeometry; - - // ids of the materials to bind to the geometry - var matids = null; - var matidsArray = []; - - // get a list of materials to bind to the sub groups of the geometry. - // If the amount of subgroups is greater than the materials, than reuse - // the materials. - var mat = o.material || new MeshBasicMaterial(); - var materials = Array.isArray( mat ) ? mat : [ mat ]; - if ( geometry.groups.length > materials.length ) { - matidsArray = new Array( geometry.groups.length ); - } else { - matidsArray = new Array( materials.length ) - } - matids = matidsArray.fill() - .map( ( v, i ) => processMaterial( materials[ i % materials.length ] ) ); - - node += - `` + - - ( - matids != null ? - '' + - matids.map( ( id, i ) => - - `` + - - '' + - - '' - ).join( '' ) + - '' : - '' - ) + - - ''; - - } - - o.children.forEach( c => node += processObject( c ) ); - - node += ''; - - return node; - - } - - var geometryInfo = new WeakMap(); - var materialMap = new WeakMap(); - var imageMap = new WeakMap(); - var textures = []; - - var libraryImages = []; - var libraryGeometries = []; - var libraryEffects = []; - var libraryMaterials = []; - var libraryVisualScenes = processObject( object ); - - var specLink = version === '1.4.1' ? 'http://www.collada.org/2005/11/COLLADASchema' : 'https://www.khronos.org/collada/'; - var dae = - '' + - `` + - '' + - ( - '' + - 'THREE.js Collada Exporter' + - ( options.author !== null ? `${ options.author }` : '' ) + - '' + - `${ ( new Date() ).toISOString() }` + - `${ ( new Date() ).toISOString() }` + - 'Y_UP' - ) + - ''; - - dae += `${ libraryImages.join( '' ) }`; - - dae += `${ libraryEffects.join( '' ) }`; - - dae += `${ libraryMaterials.join( '' ) }`; - - dae += `${ libraryGeometries.join( '' ) }`; - - dae += `${ libraryVisualScenes }`; - - dae += ''; - - dae += ''; - - var res = { - data: format( dae ), - textures - }; - - if ( typeof onDone === 'function' ) { - - requestAnimationFrame( () => onDone( res ) ); - - } - - return res; - - } - -}; - -export { ColladaExporter }; diff --git a/spaces/bdp-AI/03-ImageSearchSimilar/README.md b/spaces/bdp-AI/03-ImageSearchSimilar/README.md deleted file mode 100644 index 9f5c1117145a451726b2f5740afa5d360380bc36..0000000000000000000000000000000000000000 --- a/spaces/bdp-AI/03-ImageSearchSimilar/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 03 ImageSearchSimilar -emoji: 🏢 -colorFrom: gray -colorTo: blue -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/archs/rcan_arch.py b/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/archs/rcan_arch.py deleted file mode 100644 index f15e9215e3ce402a1b71ab82061421658896b0e8..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/archs/rcan_arch.py +++ /dev/null @@ -1,135 +0,0 @@ -import torch -from torch import nn as nn - -from basicsr.utils.registry import ARCH_REGISTRY -from .arch_util import Upsample, make_layer - - -class ChannelAttention(nn.Module): - """Channel attention used in RCAN. - - Args: - num_feat (int): Channel number of intermediate features. - squeeze_factor (int): Channel squeeze factor. Default: 16. - """ - - def __init__(self, num_feat, squeeze_factor=16): - super(ChannelAttention, self).__init__() - self.attention = nn.Sequential( - nn.AdaptiveAvgPool2d(1), nn.Conv2d(num_feat, num_feat // squeeze_factor, 1, padding=0), - nn.ReLU(inplace=True), nn.Conv2d(num_feat // squeeze_factor, num_feat, 1, padding=0), nn.Sigmoid()) - - def forward(self, x): - y = self.attention(x) - return x * y - - -class RCAB(nn.Module): - """Residual Channel Attention Block (RCAB) used in RCAN. - - Args: - num_feat (int): Channel number of intermediate features. - squeeze_factor (int): Channel squeeze factor. Default: 16. - res_scale (float): Scale the residual. Default: 1. - """ - - def __init__(self, num_feat, squeeze_factor=16, res_scale=1): - super(RCAB, self).__init__() - self.res_scale = res_scale - - self.rcab = nn.Sequential( - nn.Conv2d(num_feat, num_feat, 3, 1, 1), nn.ReLU(True), nn.Conv2d(num_feat, num_feat, 3, 1, 1), - ChannelAttention(num_feat, squeeze_factor)) - - def forward(self, x): - res = self.rcab(x) * self.res_scale - return res + x - - -class ResidualGroup(nn.Module): - """Residual Group of RCAB. - - Args: - num_feat (int): Channel number of intermediate features. - num_block (int): Block number in the body network. - squeeze_factor (int): Channel squeeze factor. Default: 16. - res_scale (float): Scale the residual. Default: 1. - """ - - def __init__(self, num_feat, num_block, squeeze_factor=16, res_scale=1): - super(ResidualGroup, self).__init__() - - self.residual_group = make_layer( - RCAB, num_block, num_feat=num_feat, squeeze_factor=squeeze_factor, res_scale=res_scale) - self.conv = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - - def forward(self, x): - res = self.conv(self.residual_group(x)) - return res + x - - -@ARCH_REGISTRY.register() -class RCAN(nn.Module): - """Residual Channel Attention Networks. - - Paper: Image Super-Resolution Using Very Deep Residual Channel Attention - Networks - Ref git repo: https://github.com/yulunzhang/RCAN. - - Args: - num_in_ch (int): Channel number of inputs. - num_out_ch (int): Channel number of outputs. - num_feat (int): Channel number of intermediate features. - Default: 64. - num_group (int): Number of ResidualGroup. Default: 10. - num_block (int): Number of RCAB in ResidualGroup. Default: 16. - squeeze_factor (int): Channel squeeze factor. Default: 16. - upscale (int): Upsampling factor. Support 2^n and 3. - Default: 4. - res_scale (float): Used to scale the residual in residual block. - Default: 1. - img_range (float): Image range. Default: 255. - rgb_mean (tuple[float]): Image mean in RGB orders. - Default: (0.4488, 0.4371, 0.4040), calculated from DIV2K dataset. - """ - - def __init__(self, - num_in_ch, - num_out_ch, - num_feat=64, - num_group=10, - num_block=16, - squeeze_factor=16, - upscale=4, - res_scale=1, - img_range=255., - rgb_mean=(0.4488, 0.4371, 0.4040)): - super(RCAN, self).__init__() - - self.img_range = img_range - self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1) - - self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1) - self.body = make_layer( - ResidualGroup, - num_group, - num_feat=num_feat, - num_block=num_block, - squeeze_factor=squeeze_factor, - res_scale=res_scale) - self.conv_after_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - self.upsample = Upsample(upscale, num_feat) - self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) - - def forward(self, x): - self.mean = self.mean.type_as(x) - - x = (x - self.mean) * self.img_range - x = self.conv_first(x) - res = self.conv_after_body(self.body(x)) - res += x - - x = self.conv_last(self.upsample(res)) - x = x / self.img_range + self.mean - - return x diff --git a/spaces/bell-tommy/SG161222-Realistic_Vision_V1.4/app.py b/spaces/bell-tommy/SG161222-Realistic_Vision_V1.4/app.py deleted file mode 100644 index a3cc9b493946644ef46fa95cde231d3773b98d0c..0000000000000000000000000000000000000000 --- a/spaces/bell-tommy/SG161222-Realistic_Vision_V1.4/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/SG161222/Realistic_Vision_V1.4").launch() \ No newline at end of file diff --git a/spaces/bhautikj/sd_clip_bias/app.py b/spaces/bhautikj/sd_clip_bias/app.py deleted file mode 100644 index 5e534b51e70652befe28aa4b716c27bead538f03..0000000000000000000000000000000000000000 --- a/spaces/bhautikj/sd_clip_bias/app.py +++ /dev/null @@ -1,179 +0,0 @@ -#initial fork from https://huggingface.co/spaces/Manjushri/SD-2.1-CPU/raw/main/app.py - -import gradio as gr -import torch -import numpy as np - -import matplotlib -matplotlib.use("Agg") -import matplotlib.pyplot as plt - -from PIL import Image -import collections -import numpy as np -import io - -from datasets import load_dataset -from diffusers import DiffusionPipeline, EulerDiscreteScheduler - -device = "cuda" if torch.cuda.is_available() else "cpu" - -scheduler = EulerDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-2-1-base", subfolder="scheduler", prediction_type="v_prediction") -pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base", scheduler=scheduler) -pipe = pipe.to(device) - -tokenizer = pipe.tokenizer -text_encoder = pipe.text_encoder - -# -# AUTHORS NOTE: -# Being absolutely explicit: the genders and ethnicities listed here -# are reductive. This is done not to erase but make analysis here -# simpler, tractable and easier to understand -# -GENDERS = ["male", "female"] -ETHNICITIES = ["black", "white", "asian"] -LOOKS = ["beautiful", "stunning", "handsome", "ugly", "plain", "repulsive", "arrogant", "trustworthy"] -JOBS = ["CEO", "doctor", "nurse", "cashier", "janitor", "engineer", "pilot", "dentist", "leader"] -RENDERPREFIX = "a high quality photo of a" - -def echoToken(token): - res = getMostSimilar(tokenizer, text_encoder, token) - return ",".join(res) - -def getEmbeddingForToken(tokenizer, token): - token_ids = tokenizer.encode(token)[1:-1] - if len(token_ids) != 1: - print(len(token_ids)) - raise - token_id = token_ids[0] - return token_id, text_encoder.get_input_embeddings().weight.data[token_id].unsqueeze(0) - -def getMostSimilar(tokenizer, text_encoder, token, numResults=50): - internal_embs = text_encoder.text_model.embeddings.token_embedding.weight - tID, tok = getEmbeddingForToken(tokenizer, token) - - cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6) - scores = cos(internal_embs.to("cpu").to(torch.float32), tok.to("cpu").to(torch.float32)) - sorted_scores, sorted_ids = torch.sort(scores, descending=True) - best_ids = sorted_ids[0:numResults].detach().numpy() - best_scores = sorted_scores[0:numResults].detach().numpy() - - res = [] - for best_id, best_score in zip(best_ids, best_scores): - #res.append((tokenizer.decode(best_id), best_score)) - res.append("[" + tokenizer.decode(best_id) + "," + str(best_score) + "]") - return res[1:] - -def computeTermSimilarity(tokenizer, text_encoder, termA, termB): - inputs = tokenizer([termA, termB], padding=True, return_tensors="pt").to("cpu") - outputs = text_encoder(**inputs) - cos = torch.nn.CosineSimilarity(dim=-1, eps=1e-6) - val = cos(outputs.pooler_output[0], outputs.pooler_output[1]).item() - return float(val) - -def computeJob(tokenizer, text_encoder, job): - res = {} - neutralPrompt = " ".join([RENDERPREFIX, job]) - titleText = neutralPrompt - for gender in GENDERS: - for ethnicity in ETHNICITIES: - prompt = " ".join([RENDERPREFIX, ethnicity, gender, job]) - val = computeTermSimilarity(tokenizer, text_encoder, prompt, neutralPrompt) - res[prompt] = val - - return titleText, sorted(res.items(), reverse=True) - -def computeLook(tokenizer, text_encoder, look): - res = {} - titleText = " ".join([RENDERPREFIX, - look, - "[", - "|".join(GENDERS), - "]"]) - - for gender in GENDERS: - neutralPromptGender = " ".join([RENDERPREFIX, look, gender]) - for ethnicity in ETHNICITIES: - prompt = " ".join([RENDERPREFIX, look, ethnicity, gender]) - val = computeTermSimilarity(tokenizer, text_encoder, prompt, neutralPromptGender) - res[prompt] = val - - return titleText, sorted(res.items(), reverse=True) - -# via https://stackoverflow.com/questions/57316491/how-to-convert-matplotlib-figure-to-pil-image-object-without-saving-image -def fig2img(fig): - """Convert a Matplotlib figure to a PIL Image and return it""" - buf = io.BytesIO() - fig.savefig(buf) - buf.seek(0) - img = Image.open(buf) - return img - -def computePlot(title, results, scaleXAxis=True): - x = list(map(lambda x:x[0], results)) - y = list(map(lambda x:x[1], results)) - - fig, ax = plt.subplots(1, 1, figsize=(10, 5)) - y_pos = np.arange(len(x)) - - hbars = ax.barh(y_pos, y, left=0, align='center') - ax.set_yticks(y_pos, labels=x) - ax.invert_yaxis() # labels read top-to-bottom - ax.set_xlabel('Cosine similarity - take care to note compressed X-axis') - ax.set_title('Similarity to "' + title + '"') - - # Label with specially formatted floats - ax.bar_label(hbars, fmt='%.3f') - minR = np.min(y) - maxR = np.max(y) - diffR = maxR-minR - - if scaleXAxis: - ax.set_xlim(left=minR-0.1*diffR, right=maxR+0.1*diffR) - else: - ax.set_xlim(left=0.0, right=1.0) - plt.tight_layout() - plt.close() - return fig2img(fig) - -def computeJobBias(job): - title, results = computeJob(tokenizer, text_encoder, job) - return computePlot(title, results) - -def computeLookBias(look): - title, results = computeLook(tokenizer, text_encoder, look) - return computePlot(title, results) - -disclaimerString = "" - -jobInterface = gr.Interface(fn=computeJobBias, - inputs=[gr.Dropdown(JOBS, label="job")], - outputs='image', - description="Select Job", - article = "notes: @bhautikj") - -affectInterface = gr.Interface(fn=computeLookBias, - inputs=[gr.Dropdown(LOOKS, label="affect")], - outputs='image', - description="Select Affect", - article = "notes: @bhautikj") - -jobInterfaceManual = gr.Interface(fn=computeJobBias, - inputs=[gr.inputs.Textbox()], - outputs='image', - description="Enter Job", - article = "notes: @bhautikj") - -affectInterfaceManual = gr.Interface(fn=computeLookBias, - inputs=[gr.inputs.Textbox()], - outputs='image', - description="Enter Affect", - article = "notes: @bhautikj") - - -gr.TabbedInterface( - [jobInterface, affectInterface, jobInterfaceManual, affectInterfaceManual], - ["Job Bias", "Affect Bias", "Job Bias (manual)", "Affect Bias (manual)"], - title = "Stable Diffusion CLIP Bias interrogator" -).launch() diff --git a/spaces/bigjoker/stable-diffusion-webui/modules/safe.py b/spaces/bigjoker/stable-diffusion-webui/modules/safe.py deleted file mode 100644 index b51ee885014e4070537f16d35da381402db0db6c..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/modules/safe.py +++ /dev/null @@ -1,192 +0,0 @@ -# this code is adapted from the script contributed by anon from /h/ - -import io -import pickle -import collections -import sys -import traceback - -import torch -import numpy -import _codecs -import zipfile -import re - - -# PyTorch 1.13 and later have _TypedStorage renamed to TypedStorage -TypedStorage = torch.storage.TypedStorage if hasattr(torch.storage, 'TypedStorage') else torch.storage._TypedStorage - - -def encode(*args): - out = _codecs.encode(*args) - return out - - -class RestrictedUnpickler(pickle.Unpickler): - extra_handler = None - - def persistent_load(self, saved_id): - assert saved_id[0] == 'storage' - return TypedStorage() - - def find_class(self, module, name): - if self.extra_handler is not None: - res = self.extra_handler(module, name) - if res is not None: - return res - - if module == 'collections' and name == 'OrderedDict': - return getattr(collections, name) - if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter', '_rebuild_device_tensor_from_numpy']: - return getattr(torch._utils, name) - if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage', 'DoubleStorage', 'ByteStorage', 'float32']: - return getattr(torch, name) - if module == 'torch.nn.modules.container' and name in ['ParameterDict']: - return getattr(torch.nn.modules.container, name) - if module == 'numpy.core.multiarray' and name in ['scalar', '_reconstruct']: - return getattr(numpy.core.multiarray, name) - if module == 'numpy' and name in ['dtype', 'ndarray']: - return getattr(numpy, name) - if module == '_codecs' and name == 'encode': - return encode - if module == "pytorch_lightning.callbacks" and name == 'model_checkpoint': - import pytorch_lightning.callbacks - return pytorch_lightning.callbacks.model_checkpoint - if module == "pytorch_lightning.callbacks.model_checkpoint" and name == 'ModelCheckpoint': - import pytorch_lightning.callbacks.model_checkpoint - return pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint - if module == "__builtin__" and name == 'set': - return set - - # Forbid everything else. - raise Exception(f"global '{module}/{name}' is forbidden") - - -# Regular expression that accepts 'dirname/version', 'dirname/data.pkl', and 'dirname/data/' -allowed_zip_names_re = re.compile(r"^([^/]+)/((data/\d+)|version|(data\.pkl))$") -data_pkl_re = re.compile(r"^([^/]+)/data\.pkl$") - -def check_zip_filenames(filename, names): - for name in names: - if allowed_zip_names_re.match(name): - continue - - raise Exception(f"bad file inside {filename}: {name}") - - -def check_pt(filename, extra_handler): - try: - - # new pytorch format is a zip file - with zipfile.ZipFile(filename) as z: - check_zip_filenames(filename, z.namelist()) - - # find filename of data.pkl in zip file: '/data.pkl' - data_pkl_filenames = [f for f in z.namelist() if data_pkl_re.match(f)] - if len(data_pkl_filenames) == 0: - raise Exception(f"data.pkl not found in {filename}") - if len(data_pkl_filenames) > 1: - raise Exception(f"Multiple data.pkl found in {filename}") - with z.open(data_pkl_filenames[0]) as file: - unpickler = RestrictedUnpickler(file) - unpickler.extra_handler = extra_handler - unpickler.load() - - except zipfile.BadZipfile: - - # if it's not a zip file, it's an olf pytorch format, with five objects written to pickle - with open(filename, "rb") as file: - unpickler = RestrictedUnpickler(file) - unpickler.extra_handler = extra_handler - for i in range(5): - unpickler.load() - - -def load(filename, *args, **kwargs): - return load_with_extra(filename, extra_handler=global_extra_handler, *args, **kwargs) - - -def load_with_extra(filename, extra_handler=None, *args, **kwargs): - """ - this function is intended to be used by extensions that want to load models with - some extra classes in them that the usual unpickler would find suspicious. - - Use the extra_handler argument to specify a function that takes module and field name as text, - and returns that field's value: - - ```python - def extra(module, name): - if module == 'collections' and name == 'OrderedDict': - return collections.OrderedDict - - return None - - safe.load_with_extra('model.pt', extra_handler=extra) - ``` - - The alternative to this is just to use safe.unsafe_torch_load('model.pt'), which as the name implies is - definitely unsafe. - """ - - from modules import shared - - try: - if not shared.cmd_opts.disable_safe_unpickle: - check_pt(filename, extra_handler) - - except pickle.UnpicklingError: - print(f"Error verifying pickled file from {filename}:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - print("-----> !!!! The file is most likely corrupted !!!! <-----", file=sys.stderr) - print("You can skip this check with --disable-safe-unpickle commandline argument, but that is not going to help you.\n\n", file=sys.stderr) - return None - - except Exception: - print(f"Error verifying pickled file from {filename}:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - print("\nThe file may be malicious, so the program is not going to read it.", file=sys.stderr) - print("You can skip this check with --disable-safe-unpickle commandline argument.\n\n", file=sys.stderr) - return None - - return unsafe_torch_load(filename, *args, **kwargs) - - -class Extra: - """ - A class for temporarily setting the global handler for when you can't explicitly call load_with_extra - (because it's not your code making the torch.load call). The intended use is like this: - -``` -import torch -from modules import safe - -def handler(module, name): - if module == 'torch' and name in ['float64', 'float16']: - return getattr(torch, name) - - return None - -with safe.Extra(handler): - x = torch.load('model.pt') -``` - """ - - def __init__(self, handler): - self.handler = handler - - def __enter__(self): - global global_extra_handler - - assert global_extra_handler is None, 'already inside an Extra() block' - global_extra_handler = self.handler - - def __exit__(self, exc_type, exc_val, exc_tb): - global global_extra_handler - - global_extra_handler = None - - -unsafe_torch_load = torch.load -torch.load = load -global_extra_handler = None - diff --git a/spaces/bilby/bilby-retrievalqa/Dockerfile b/spaces/bilby/bilby-retrievalqa/Dockerfile deleted file mode 100644 index 1774ca451c08fbbff50f695513a93a7fe7d904d9..0000000000000000000000000000000000000000 --- a/spaces/bilby/bilby-retrievalqa/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM python:3.10-slim - -RUN apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y git wget unzip openssh-client nano bzip2 ca-certificates curl && \ - apt-get install -y gcc libopenblas-dev && \ - apt-get install -y libglib2.0-0 libsm6 libsndfile1-dev libxext6 libxrender-dev libgl1-mesa-glx ffmpeg && \ - apt-get install -y gcc python3-dev python3-pip libxml2-dev libxslt1-dev zlib1g-dev g++ - -COPY ./ /app -WORKDIR /app -RUN pip install -r requirements.txt -ENV GRADIO_SERVER_NAME '0.0.0.0' -ENV GRADIO_DEBUG '1' -ENTRYPOINT ["python"] -CMD ["retrievalqademo.py"] diff --git a/spaces/bioriAsaeru/text-to-voice/Dhoondte Reh Jaoge 2 Movie Mp3 Songs Free Download Dont Miss the Chance to Hear the Amazing Music by Jatin-Lalit.md b/spaces/bioriAsaeru/text-to-voice/Dhoondte Reh Jaoge 2 Movie Mp3 Songs Free Download Dont Miss the Chance to Hear the Amazing Music by Jatin-Lalit.md deleted file mode 100644 index ce3f9cca8aea20a1c87648549d05076fd37889ab..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Dhoondte Reh Jaoge 2 Movie Mp3 Songs Free Download Dont Miss the Chance to Hear the Amazing Music by Jatin-Lalit.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Dhoondte Reh Jaoge 2 Movie Mp3 Songs Free Download


    DOWNLOAD ✔✔✔ https://urloso.com/2uyOlE



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/bioriAsaeru/text-to-voice/Download Goliyon Ki Raasleela Ram-leela 5 in Hindi The Climax of the Modern-Day Romeo and Juliet.md b/spaces/bioriAsaeru/text-to-voice/Download Goliyon Ki Raasleela Ram-leela 5 in Hindi The Climax of the Modern-Day Romeo and Juliet.md deleted file mode 100644 index e64c0a6286912fd78a04e477941e00e542cca090..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Download Goliyon Ki Raasleela Ram-leela 5 in Hindi The Climax of the Modern-Day Romeo and Juliet.md +++ /dev/null @@ -1,6 +0,0 @@ -

    download Goliyon Ki Raasleela Ram-leela 5 in hindi


    DOWNLOAD >> https://urloso.com/2uyQzn



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/bioriAsaeru/text-to-voice/Dreamboxcccamclinetestdownload.md b/spaces/bioriAsaeru/text-to-voice/Dreamboxcccamclinetestdownload.md deleted file mode 100644 index f73e8c7b3efac6eb6ce5794b2d103c9aa40dfd28..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Dreamboxcccamclinetestdownload.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Dreamboxcccamclinetestdownload


    Download ->->->-> https://urloso.com/2uyREt



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/adversarial/discriminators/mpd.py b/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/adversarial/discriminators/mpd.py deleted file mode 100644 index 8debd1fa72d77ca03df680facb60bdf79638cade..0000000000000000000000000000000000000000 --- a/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/adversarial/discriminators/mpd.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from ...modules import NormConv2d -from .base import MultiDiscriminator, MultiDiscriminatorOutputType - - -def get_padding(kernel_size: int, dilation: int = 1) -> int: - return int((kernel_size * dilation - dilation) / 2) - - -class PeriodDiscriminator(nn.Module): - """Period sub-discriminator. - - Args: - period (int): Period between samples of audio. - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - n_layers (int): Number of convolutional layers. - kernel_sizes (list of int): Kernel sizes for convolutions. - stride (int): Stride for convolutions. - filters (int): Initial number of filters in convolutions. - filters_scale (int): Multiplier of number of filters as we increase depth. - max_filters (int): Maximum number of filters. - norm (str): Normalization method. - activation (str): Activation function. - activation_params (dict): Parameters to provide to the activation function. - """ - def __init__(self, period: int, in_channels: int = 1, out_channels: int = 1, - n_layers: int = 5, kernel_sizes: tp.List[int] = [5, 3], stride: int = 3, - filters: int = 8, filters_scale: int = 4, max_filters: int = 1024, - norm: str = 'weight_norm', activation: str = 'LeakyReLU', - activation_params: dict = {'negative_slope': 0.2}): - super().__init__() - self.period = period - self.n_layers = n_layers - self.activation = getattr(torch.nn, activation)(**activation_params) - self.convs = nn.ModuleList() - in_chs = in_channels - for i in range(self.n_layers): - out_chs = min(filters * (filters_scale ** (i + 1)), max_filters) - eff_stride = 1 if i == self.n_layers - 1 else stride - self.convs.append(NormConv2d(in_chs, out_chs, kernel_size=(kernel_sizes[0], 1), stride=(eff_stride, 1), - padding=((kernel_sizes[0] - 1) // 2, 0), norm=norm)) - in_chs = out_chs - self.conv_post = NormConv2d(in_chs, out_channels, kernel_size=(kernel_sizes[1], 1), stride=1, - padding=((kernel_sizes[1] - 1) // 2, 0), norm=norm) - - def forward(self, x: torch.Tensor): - fmap = [] - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), 'reflect') - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for conv in self.convs: - x = conv(x) - x = self.activation(x) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - # x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(MultiDiscriminator): - """Multi-Period (MPD) Discriminator. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - periods (Sequence[int]): Periods between samples of audio for the sub-discriminators. - **kwargs: Additional args for `PeriodDiscriminator` - """ - def __init__(self, in_channels: int = 1, out_channels: int = 1, - periods: tp.Sequence[int] = [2, 3, 5, 7, 11], **kwargs): - super().__init__() - self.discriminators = nn.ModuleList([ - PeriodDiscriminator(p, in_channels, out_channels, **kwargs) for p in periods - ]) - - @property - def num_discriminators(self): - return len(self.discriminators) - - def forward(self, x: torch.Tensor) -> MultiDiscriminatorOutputType: - logits = [] - fmaps = [] - for disc in self.discriminators: - logit, fmap = disc(x) - logits.append(logit) - fmaps.append(fmap) - return logits, fmaps diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/export/__init__.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/export/__init__.py deleted file mode 100644 index 5a58758f64aae6071fa688be4400622ce6036efa..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/export/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# -*- coding: utf-8 -*- - -import warnings - -from .flatten import TracingAdapter -from .torchscript import dump_torchscript_IR, scripting_with_instances - -try: - from caffe2.proto import caffe2_pb2 as _tmp - from caffe2.python import core - - # caffe2 is optional -except ImportError: - pass -else: - from .api import * - - -# TODO: Update ONNX Opset version and run tests when a newer PyTorch is supported -STABLE_ONNX_OPSET_VERSION = 11 - - -def add_export_config(cfg): - warnings.warn( - "add_export_config has been deprecated and behaves as no-op function.", DeprecationWarning - ) - return cfg - - -__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/modeling/meta_arch/rcnn.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/modeling/meta_arch/rcnn.py deleted file mode 100644 index edcbda553a619c314d6175638b485ee5c791a176..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/modeling/meta_arch/rcnn.py +++ /dev/null @@ -1,341 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import logging -import numpy as np -from typing import Dict, List, Optional, Tuple -import torch -from torch import nn - -from detectron2.config import configurable -from detectron2.data.detection_utils import convert_image_to_rgb -from detectron2.layers import move_device_like -from detectron2.structures import ImageList, Instances -from detectron2.utils.events import get_event_storage -from detectron2.utils.logger import log_first_n - -from ..backbone import Backbone, build_backbone -from ..postprocessing import detector_postprocess -from ..proposal_generator import build_proposal_generator -from ..roi_heads import build_roi_heads -from .build import META_ARCH_REGISTRY - -__all__ = ["GeneralizedRCNN", "ProposalNetwork"] - - -@META_ARCH_REGISTRY.register() -class GeneralizedRCNN(nn.Module): - """ - Generalized R-CNN. Any models that contains the following three components: - 1. Per-image feature extraction (aka backbone) - 2. Region proposal generation - 3. Per-region feature extraction and prediction - """ - - @configurable - def __init__( - self, - *, - backbone: Backbone, - proposal_generator: nn.Module, - roi_heads: nn.Module, - pixel_mean: Tuple[float], - pixel_std: Tuple[float], - input_format: Optional[str] = None, - vis_period: int = 0, - ): - """ - Args: - backbone: a backbone module, must follow detectron2's backbone interface - proposal_generator: a module that generates proposals using backbone features - roi_heads: a ROI head that performs per-region computation - pixel_mean, pixel_std: list or tuple with #channels element, representing - the per-channel mean and std to be used to normalize the input image - input_format: describe the meaning of channels of input. Needed by visualization - vis_period: the period to run visualization. Set to 0 to disable. - """ - super().__init__() - self.backbone = backbone - self.proposal_generator = proposal_generator - self.roi_heads = roi_heads - - self.input_format = input_format - self.vis_period = vis_period - if vis_period > 0: - assert input_format is not None, "input_format is required for visualization!" - - self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) - self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) - assert ( - self.pixel_mean.shape == self.pixel_std.shape - ), f"{self.pixel_mean} and {self.pixel_std} have different shapes!" - - @classmethod - def from_config(cls, cfg): - backbone = build_backbone(cfg) - return { - "backbone": backbone, - "proposal_generator": build_proposal_generator(cfg, backbone.output_shape()), - "roi_heads": build_roi_heads(cfg, backbone.output_shape()), - "input_format": cfg.INPUT.FORMAT, - "vis_period": cfg.VIS_PERIOD, - "pixel_mean": cfg.MODEL.PIXEL_MEAN, - "pixel_std": cfg.MODEL.PIXEL_STD, - } - - @property - def device(self): - return self.pixel_mean.device - - def _move_to_current_device(self, x): - return move_device_like(x, self.pixel_mean) - - def visualize_training(self, batched_inputs, proposals): - """ - A function used to visualize images and proposals. It shows ground truth - bounding boxes on the original image and up to 20 top-scoring predicted - object proposals on the original image. Users can implement different - visualization functions for different models. - - Args: - batched_inputs (list): a list that contains input to the model. - proposals (list): a list that contains predicted proposals. Both - batched_inputs and proposals should have the same length. - """ - from detectron2.utils.visualizer import Visualizer - - storage = get_event_storage() - max_vis_prop = 20 - - for input, prop in zip(batched_inputs, proposals): - img = input["image"] - img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format) - v_gt = Visualizer(img, None) - v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes) - anno_img = v_gt.get_image() - box_size = min(len(prop.proposal_boxes), max_vis_prop) - v_pred = Visualizer(img, None) - v_pred = v_pred.overlay_instances( - boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy() - ) - prop_img = v_pred.get_image() - vis_img = np.concatenate((anno_img, prop_img), axis=1) - vis_img = vis_img.transpose(2, 0, 1) - vis_name = "Left: GT bounding boxes; Right: Predicted proposals" - storage.put_image(vis_name, vis_img) - break # only visualize one image in a batch - - def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]): - """ - Args: - batched_inputs: a list, batched outputs of :class:`DatasetMapper` . - Each item in the list contains the inputs for one image. - For now, each item in the list is a dict that contains: - - * image: Tensor, image in (C, H, W) format. - * instances (optional): groundtruth :class:`Instances` - * proposals (optional): :class:`Instances`, precomputed proposals. - - Other information that's included in the original dicts, such as: - - * "height", "width" (int): the output resolution of the model, used in inference. - See :meth:`postprocess` for details. - - Returns: - list[dict]: - Each dict is the output for one input image. - The dict contains one key "instances" whose value is a :class:`Instances`. - The :class:`Instances` object has the following keys: - "pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints" - """ - if not self.training: - return self.inference(batched_inputs) - - images = self.preprocess_image(batched_inputs) - if "instances" in batched_inputs[0]: - gt_instances = [x["instances"].to(self.device) for x in batched_inputs] - else: - gt_instances = None - - features = self.backbone(images.tensor) - - if self.proposal_generator is not None: - proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) - else: - assert "proposals" in batched_inputs[0] - proposals = [x["proposals"].to(self.device) for x in batched_inputs] - proposal_losses = {} - - _, detector_losses = self.roi_heads(images, features, proposals, gt_instances) - if self.vis_period > 0: - storage = get_event_storage() - if storage.iter % self.vis_period == 0: - self.visualize_training(batched_inputs, proposals) - - losses = {} - losses.update(detector_losses) - losses.update(proposal_losses) - return losses - - def inference( - self, - batched_inputs: List[Dict[str, torch.Tensor]], - detected_instances: Optional[List[Instances]] = None, - do_postprocess: bool = True, - ): - """ - Run inference on the given inputs. - - Args: - batched_inputs (list[dict]): same as in :meth:`forward` - detected_instances (None or list[Instances]): if not None, it - contains an `Instances` object per image. The `Instances` - object contains "pred_boxes" and "pred_classes" which are - known boxes in the image. - The inference will then skip the detection of bounding boxes, - and only predict other per-ROI outputs. - do_postprocess (bool): whether to apply post-processing on the outputs. - - Returns: - When do_postprocess=True, same as in :meth:`forward`. - Otherwise, a list[Instances] containing raw network outputs. - """ - assert not self.training - - images = self.preprocess_image(batched_inputs) - features = self.backbone(images.tensor) - - if detected_instances is None: - if self.proposal_generator is not None: - proposals, _ = self.proposal_generator(images, features, None) - else: - assert "proposals" in batched_inputs[0] - proposals = [x["proposals"].to(self.device) for x in batched_inputs] - - results, _ = self.roi_heads(images, features, proposals, None) - else: - detected_instances = [x.to(self.device) for x in detected_instances] - results = self.roi_heads.forward_with_given_boxes(features, detected_instances) - - if do_postprocess: - assert not torch.jit.is_scripting(), "Scripting is not supported for postprocess." - return GeneralizedRCNN._postprocess(results, batched_inputs, images.image_sizes) - return results - - def preprocess_image(self, batched_inputs: List[Dict[str, torch.Tensor]]): - """ - Normalize, pad and batch the input images. - """ - images = [self._move_to_current_device(x["image"]) for x in batched_inputs] - images = [(x - self.pixel_mean) / self.pixel_std for x in images] - images = ImageList.from_tensors( - images, - self.backbone.size_divisibility, - padding_constraints=self.backbone.padding_constraints, - ) - return images - - @staticmethod - def _postprocess(instances, batched_inputs: List[Dict[str, torch.Tensor]], image_sizes): - """ - Rescale the output instances to the target size. - """ - # note: private function; subject to changes - processed_results = [] - for results_per_image, input_per_image, image_size in zip( - instances, batched_inputs, image_sizes - ): - height = input_per_image.get("height", image_size[0]) - width = input_per_image.get("width", image_size[1]) - r = detector_postprocess(results_per_image, height, width) - processed_results.append({"instances": r}) - return processed_results - - -@META_ARCH_REGISTRY.register() -class ProposalNetwork(nn.Module): - """ - A meta architecture that only predicts object proposals. - """ - - @configurable - def __init__( - self, - *, - backbone: Backbone, - proposal_generator: nn.Module, - pixel_mean: Tuple[float], - pixel_std: Tuple[float], - ): - """ - Args: - backbone: a backbone module, must follow detectron2's backbone interface - proposal_generator: a module that generates proposals using backbone features - pixel_mean, pixel_std: list or tuple with #channels element, representing - the per-channel mean and std to be used to normalize the input image - """ - super().__init__() - self.backbone = backbone - self.proposal_generator = proposal_generator - self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) - self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) - - @classmethod - def from_config(cls, cfg): - backbone = build_backbone(cfg) - return { - "backbone": backbone, - "proposal_generator": build_proposal_generator(cfg, backbone.output_shape()), - "pixel_mean": cfg.MODEL.PIXEL_MEAN, - "pixel_std": cfg.MODEL.PIXEL_STD, - } - - @property - def device(self): - return self.pixel_mean.device - - def _move_to_current_device(self, x): - return move_device_like(x, self.pixel_mean) - - def forward(self, batched_inputs): - """ - Args: - Same as in :class:`GeneralizedRCNN.forward` - - Returns: - list[dict]: - Each dict is the output for one input image. - The dict contains one key "proposals" whose value is a - :class:`Instances` with keys "proposal_boxes" and "objectness_logits". - """ - images = [self._move_to_current_device(x["image"]) for x in batched_inputs] - images = [(x - self.pixel_mean) / self.pixel_std for x in images] - images = ImageList.from_tensors( - images, - self.backbone.size_divisibility, - padding_constraints=self.backbone.padding_constraints, - ) - features = self.backbone(images.tensor) - - if "instances" in batched_inputs[0]: - gt_instances = [x["instances"].to(self.device) for x in batched_inputs] - elif "targets" in batched_inputs[0]: - log_first_n( - logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10 - ) - gt_instances = [x["targets"].to(self.device) for x in batched_inputs] - else: - gt_instances = None - proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) - # In training, the proposals are not useful at all but we generate them anyway. - # This makes RPN-only models about 5% slower. - if self.training: - return proposal_losses - - processed_results = [] - for results_per_image, input_per_image, image_size in zip( - proposals, batched_inputs, images.image_sizes - ): - height = input_per_image.get("height", image_size[0]) - width = input_per_image.get("width", image_size[1]) - r = detector_postprocess(results_per_image, height, width) - processed_results.append({"proposals": r}) - return processed_results diff --git a/spaces/cchaun/music_tagging/models/modules.py b/spaces/cchaun/music_tagging/models/modules.py deleted file mode 100644 index 4db1084b6b8e3e4fe1ae3b3689b238a167d22429..0000000000000000000000000000000000000000 --- a/spaces/cchaun/music_tagging/models/modules.py +++ /dev/null @@ -1,271 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -import torch.nn as nn -import torchaudio -import sys -from torch.autograd import Variable -import math -import librosa - - -class Conv_1d(nn.Module): - def __init__(self, input_channels, output_channels, shape=3, stride=1, pooling=2): - super(Conv_1d, self).__init__() - self.conv = nn.Conv1d(input_channels, output_channels, shape, stride=stride, padding=shape//2) - self.bn = nn.BatchNorm1d(output_channels) - self.relu = nn.ReLU() - self.mp = nn.MaxPool1d(pooling) - def forward(self, x): - out = self.mp(self.relu(self.bn(self.conv(x)))) - return out - - -class Conv_2d(nn.Module): - def __init__(self, input_channels, output_channels, shape=3, stride=1, pooling=2): - super(Conv_2d, self).__init__() - self.conv = nn.Conv2d(input_channels, output_channels, shape, stride=stride, padding=shape//2) - self.bn = nn.BatchNorm2d(output_channels) - self.relu = nn.ReLU() - self.mp = nn.MaxPool2d(pooling) - def forward(self, x): - out = self.mp(self.relu(self.bn(self.conv(x)))) - return out - - -class Res_2d(nn.Module): - def __init__(self, input_channels, output_channels, shape=3, stride=2): - super(Res_2d, self).__init__() - # convolution - self.conv_1 = nn.Conv2d(input_channels, output_channels, shape, stride=stride, padding=shape//2) - self.bn_1 = nn.BatchNorm2d(output_channels) - self.conv_2 = nn.Conv2d(output_channels, output_channels, shape, padding=shape//2) - self.bn_2 = nn.BatchNorm2d(output_channels) - - # residual - self.diff = False - if (stride != 1) or (input_channels != output_channels): - self.conv_3 = nn.Conv2d(input_channels, output_channels, shape, stride=stride, padding=shape//2) - self.bn_3 = nn.BatchNorm2d(output_channels) - self.diff = True - self.relu = nn.ReLU() - - def forward(self, x): - # convolution - out = self.bn_2(self.conv_2(self.relu(self.bn_1(self.conv_1(x))))) - - # residual - if self.diff: - x = self.bn_3(self.conv_3(x)) - out = x + out - out = self.relu(out) - return out - - -class Res_2d_mp(nn.Module): - def __init__(self, input_channels, output_channels, pooling=2): - super(Res_2d_mp, self).__init__() - self.conv_1 = nn.Conv2d(input_channels, output_channels, 3, padding=1) - self.bn_1 = nn.BatchNorm2d(output_channels) - self.conv_2 = nn.Conv2d(output_channels, output_channels, 3, padding=1) - self.bn_2 = nn.BatchNorm2d(output_channels) - self.relu = nn.ReLU() - self.mp = nn.MaxPool2d(pooling) - def forward(self, x): - out = self.bn_2(self.conv_2(self.relu(self.bn_1(self.conv_1(x))))) - out = x + out - out = self.mp(self.relu(out)) - return out - - -class ResSE_1d(nn.Module): - def __init__(self, input_channels, output_channels, shape=3, stride=1, pooling=3): - super(ResSE_1d, self).__init__() - # convolution - self.conv_1 = nn.Conv1d(input_channels, output_channels, shape, stride=stride, padding=shape//2) - self.bn_1 = nn.BatchNorm1d(output_channels) - self.conv_2 = nn.Conv1d(output_channels, output_channels, shape, padding=shape//2) - self.bn_2 = nn.BatchNorm1d(output_channels) - - # squeeze & excitation - self.dense1 = nn.Linear(output_channels, output_channels) - self.dense2 = nn.Linear(output_channels, output_channels) - - # residual - self.diff = False - if (stride != 1) or (input_channels != output_channels): - self.conv_3 = nn.Conv1d(input_channels, output_channels, shape, stride=stride, padding=shape//2) - self.bn_3 = nn.BatchNorm1d(output_channels) - self.diff = True - self.relu = nn.ReLU() - self.sigmoid = nn.Sigmoid() - self.mp = nn.MaxPool1d(pooling) - - def forward(self, x): - # convolution - out = self.bn_2(self.conv_2(self.relu(self.bn_1(self.conv_1(x))))) - - # squeeze & excitation - se_out = nn.AvgPool1d(out.size(-1))(out) - se_out = se_out.squeeze(-1) - se_out = self.relu(self.dense1(se_out)) - se_out = self.sigmoid(self.dense2(se_out)) - se_out = se_out.unsqueeze(-1) - out = torch.mul(out, se_out) - - # residual - if self.diff: - x = self.bn_3(self.conv_3(x)) - out = x + out - out = self.mp(self.relu(out)) - return out - - -class Conv_V(nn.Module): - # vertical convolution - def __init__(self, input_channels, output_channels, filter_shape): - super(Conv_V, self).__init__() - self.conv = nn.Conv2d(input_channels, output_channels, filter_shape, - padding=(0, filter_shape[1]//2)) - self.bn = nn.BatchNorm2d(output_channels) - self.relu = nn.ReLU() - - def forward(self, x): - x = self.relu(self.bn(self.conv(x))) - freq = x.size(2) - out = nn.MaxPool2d((freq, 1), stride=(freq, 1))(x) - out = out.squeeze(2) - return out - - -class Conv_H(nn.Module): - # horizontal convolution - def __init__(self, input_channels, output_channels, filter_length): - super(Conv_H, self).__init__() - self.conv = nn.Conv1d(input_channels, output_channels, filter_length, - padding=filter_length//2) - self.bn = nn.BatchNorm1d(output_channels) - self.relu = nn.ReLU() - - def forward(self, x): - freq = x.size(2) - out = nn.AvgPool2d((freq, 1), stride=(freq, 1))(x) - out = out.squeeze(2) - out = self.relu(self.bn(self.conv(out))) - return out - - -# Modules for harmonic filters -def hz_to_midi(hz): - return 12 * (torch.log2(hz) - np.log2(440.0)) + 69 - -def midi_to_hz(midi): - return 440.0 * (2.0 ** ((midi - 69.0)/12.0)) - -def note_to_midi(note): - return librosa.core.note_to_midi(note) - -def hz_to_note(hz): - return librosa.core.hz_to_note(hz) - -def initialize_filterbank(sample_rate, n_harmonic, semitone_scale): - # MIDI - # lowest note - low_midi = note_to_midi('C1') - - # highest note - high_note = hz_to_note(sample_rate / (2 * n_harmonic)) - high_midi = note_to_midi(high_note) - - # number of scales - level = (high_midi - low_midi) * semitone_scale - midi = np.linspace(low_midi, high_midi, level + 1) - hz = midi_to_hz(midi[:-1]) - - # stack harmonics - harmonic_hz = [] - for i in range(n_harmonic): - harmonic_hz = np.concatenate((harmonic_hz, hz * (i+1))) - - return harmonic_hz, level - - -class HarmonicSTFT(nn.Module): - def __init__(self, - sample_rate=16000, - n_fft=513, - win_length=None, - hop_length=None, - pad=0, - power=2, - normalized=False, - n_harmonic=6, - semitone_scale=2, - bw_Q=1.0, - learn_bw=None): - super(HarmonicSTFT, self).__init__() - - # Parameters - self.sample_rate = sample_rate - self.n_harmonic = n_harmonic - self.bw_alpha = 0.1079 - self.bw_beta = 24.7 - - # Spectrogram - self.spec = torchaudio.transforms.Spectrogram(n_fft=n_fft, win_length=win_length, - hop_length=None, pad=0, - window_fn=torch.hann_window, - power=power, normalized=normalized, wkwargs=None) - self.amplitude_to_db = torchaudio.transforms.AmplitudeToDB() - - # Initialize the filterbank. Equally spaced in MIDI scale. - harmonic_hz, self.level = initialize_filterbank(sample_rate, n_harmonic, semitone_scale) - - # Center frequncies to tensor - self.f0 = torch.tensor(harmonic_hz.astype('float32')) - - # Bandwidth parameters - if learn_bw == 'only_Q': - self.bw_Q = nn.Parameter(torch.tensor(np.array([bw_Q]).astype('float32'))) - elif learn_bw == 'fix': - self.bw_Q = torch.tensor(np.array([bw_Q]).astype('float32')) - - def get_harmonic_fb(self): - # bandwidth - bw = (self.bw_alpha * self.f0 + self.bw_beta) / self.bw_Q - bw = bw.unsqueeze(0) # (1, n_band) - f0 = self.f0.unsqueeze(0) # (1, n_band) - fft_bins = self.fft_bins.unsqueeze(1) # (n_bins, 1) - - up_slope = torch.matmul(fft_bins, (2/bw)) + 1 - (2 * f0 / bw) - down_slope = torch.matmul(fft_bins, (-2/bw)) + 1 + (2 * f0 / bw) - fb = torch.max(self.zero, torch.min(down_slope, up_slope)) - return fb - - def to_device(self, device, n_bins): - self.f0 = self.f0.to(device) - self.bw_Q = self.bw_Q.to(device) - # fft bins - self.fft_bins = torch.linspace(0, self.sample_rate//2, n_bins) - self.fft_bins = self.fft_bins.to(device) - self.zero = torch.zeros(1) - self.zero = self.zero.to(device) - - def forward(self, waveform): - # stft - spectrogram = self.spec(waveform) - - # to device - self.to_device(waveform.device, spectrogram.size(1)) - - # triangle filter - harmonic_fb = self.get_harmonic_fb() - harmonic_spec = torch.matmul(spectrogram.transpose(1, 2), harmonic_fb).transpose(1, 2) - - # (batch, channel, length) -> (batch, harmonic, f0, length) - b, c, l = harmonic_spec.size() - harmonic_spec = harmonic_spec.view(b, self.n_harmonic, self.level, l) - - # amplitude to db - harmonic_spec = self.amplitude_to_db(harmonic_spec) - return harmonic_spec diff --git a/spaces/chansung/LLM-As-Chatbot/models/byom.py b/spaces/chansung/LLM-As-Chatbot/models/byom.py deleted file mode 100644 index 0ca3f06efa2cc96a63ab5ddc4523dd9f5e0b53e5..0000000000000000000000000000000000000000 --- a/spaces/chansung/LLM-As-Chatbot/models/byom.py +++ /dev/null @@ -1,49 +0,0 @@ -import torch -import transformers -from peft import PeftModel -from transformers import AutoModelForCausalLM, AutoTokenizer - -def load_model( - base, - finetuned, - mode_8bit, - mode_4bit, - force_download_ckpt, - model_cls, - tokenizer_cls -): - if tokenizer_cls is None: - tokenizer_cls = AutoTokenizer - else: - tokenizer_cls = eval(tokenizer_cls) - - if model_cls is None: - model_cls = AutoModelForCausalLM - else: - model_cls = eval(model_cls) - - print(f"tokenizer_cls: {tokenizer_cls}") - print(f"model_cls: {model_cls}") - - tokenizer = tokenizer_cls.from_pretrained(base) - tokenizer.padding_side = "left" - - model = model_cls.from_pretrained( - base, - load_in_8bit=mode_8bit, - load_in_4bit=mode_4bit, - torch_dtype=torch.float16, - device_map="auto", - ) - - if finetuned is not None and \ - finetuned != "" and \ - finetuned != "N/A": - model = PeftModel.from_pretrained( - model, - finetuned, - # force_download=force_download_ckpt, - device_map={'': 0} - ) - - return model, tokenizer \ No newline at end of file diff --git a/spaces/chasemcdo/hf_localai/pkg/gallery/models_test.go b/spaces/chasemcdo/hf_localai/pkg/gallery/models_test.go deleted file mode 100644 index 9ea87a700c961569b1256609490bca0dcd56dfdd..0000000000000000000000000000000000000000 --- a/spaces/chasemcdo/hf_localai/pkg/gallery/models_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package gallery_test - -import ( - "io/ioutil" - "os" - "path/filepath" - - . "github.com/go-skynet/LocalAI/pkg/gallery" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "gopkg.in/yaml.v3" -) - -var _ = Describe("Model test", func() { - Context("Downloading", func() { - It("applies model correctly", func() { - tempdir, err := os.MkdirTemp("", "test") - Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(tempdir) - c, err := ReadConfigFile(filepath.Join(os.Getenv("FIXTURES"), "gallery_simple.yaml")) - Expect(err).ToNot(HaveOccurred()) - - err = InstallModel(tempdir, "", c, map[string]interface{}{}, func(string, string, string, float64) {}) - Expect(err).ToNot(HaveOccurred()) - - for _, f := range []string{"cerebras", "cerebras-completion.tmpl", "cerebras-chat.tmpl", "cerebras.yaml"} { - _, err = os.Stat(filepath.Join(tempdir, f)) - Expect(err).ToNot(HaveOccurred()) - } - - content := map[string]interface{}{} - - dat, err := os.ReadFile(filepath.Join(tempdir, "cerebras.yaml")) - Expect(err).ToNot(HaveOccurred()) - - err = yaml.Unmarshal(dat, content) - Expect(err).ToNot(HaveOccurred()) - - Expect(content["context_size"]).To(Equal(1024)) - }) - - It("applies model from gallery correctly", func() { - tempdir, err := os.MkdirTemp("", "test") - Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(tempdir) - - gallery := []GalleryModel{{ - Name: "bert", - URL: "https://raw.githubusercontent.com/go-skynet/model-gallery/main/bert-embeddings.yaml", - }} - out, err := yaml.Marshal(gallery) - Expect(err).ToNot(HaveOccurred()) - err = ioutil.WriteFile(filepath.Join(tempdir, "gallery_simple.yaml"), out, 0644) - Expect(err).ToNot(HaveOccurred()) - - galleries := []Gallery{ - { - Name: "test", - URL: "file://" + filepath.Join(tempdir, "gallery_simple.yaml"), - }, - } - - models, err := AvailableGalleryModels(galleries, tempdir) - Expect(err).ToNot(HaveOccurred()) - Expect(len(models)).To(Equal(1)) - Expect(models[0].Name).To(Equal("bert")) - Expect(models[0].URL).To(Equal("https://raw.githubusercontent.com/go-skynet/model-gallery/main/bert-embeddings.yaml")) - Expect(models[0].Installed).To(BeFalse()) - - err = InstallModelFromGallery(galleries, "test@bert", tempdir, GalleryModel{}, func(s1, s2, s3 string, f float64) {}) - Expect(err).ToNot(HaveOccurred()) - - dat, err := os.ReadFile(filepath.Join(tempdir, "bert.yaml")) - Expect(err).ToNot(HaveOccurred()) - - content := map[string]interface{}{} - err = yaml.Unmarshal(dat, &content) - Expect(err).ToNot(HaveOccurred()) - Expect(content["backend"]).To(Equal("bert-embeddings")) - - models, err = AvailableGalleryModels(galleries, tempdir) - Expect(err).ToNot(HaveOccurred()) - Expect(len(models)).To(Equal(1)) - Expect(models[0].Installed).To(BeTrue()) - }) - - It("renames model correctly", func() { - tempdir, err := os.MkdirTemp("", "test") - Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(tempdir) - c, err := ReadConfigFile(filepath.Join(os.Getenv("FIXTURES"), "gallery_simple.yaml")) - Expect(err).ToNot(HaveOccurred()) - - err = InstallModel(tempdir, "foo", c, map[string]interface{}{}, func(string, string, string, float64) {}) - Expect(err).ToNot(HaveOccurred()) - - for _, f := range []string{"cerebras", "cerebras-completion.tmpl", "cerebras-chat.tmpl", "foo.yaml"} { - _, err = os.Stat(filepath.Join(tempdir, f)) - Expect(err).ToNot(HaveOccurred()) - } - }) - - It("overrides parameters", func() { - tempdir, err := os.MkdirTemp("", "test") - Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(tempdir) - c, err := ReadConfigFile(filepath.Join(os.Getenv("FIXTURES"), "gallery_simple.yaml")) - Expect(err).ToNot(HaveOccurred()) - - err = InstallModel(tempdir, "foo", c, map[string]interface{}{"backend": "foo"}, func(string, string, string, float64) {}) - Expect(err).ToNot(HaveOccurred()) - - for _, f := range []string{"cerebras", "cerebras-completion.tmpl", "cerebras-chat.tmpl", "foo.yaml"} { - _, err = os.Stat(filepath.Join(tempdir, f)) - Expect(err).ToNot(HaveOccurred()) - } - - content := map[string]interface{}{} - - dat, err := os.ReadFile(filepath.Join(tempdir, "foo.yaml")) - Expect(err).ToNot(HaveOccurred()) - - err = yaml.Unmarshal(dat, content) - Expect(err).ToNot(HaveOccurred()) - - Expect(content["backend"]).To(Equal("foo")) - }) - - It("catches path traversals", func() { - tempdir, err := os.MkdirTemp("", "test") - Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(tempdir) - c, err := ReadConfigFile(filepath.Join(os.Getenv("FIXTURES"), "gallery_simple.yaml")) - Expect(err).ToNot(HaveOccurred()) - - err = InstallModel(tempdir, "../../../foo", c, map[string]interface{}{}, func(string, string, string, float64) {}) - Expect(err).To(HaveOccurred()) - }) - }) -}) diff --git a/spaces/chendl/compositional_test/transformers/examples/pytorch/conftest.py b/spaces/chendl/compositional_test/transformers/examples/pytorch/conftest.py deleted file mode 100644 index e85e5afb0200bd54430a24611550246bedb76915..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/pytorch/conftest.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# tests directory-specific settings - this file is run automatically -# by pytest before any tests are run - -import sys -import warnings -from os.path import abspath, dirname, join - - -# allow having multiple repository checkouts and not needing to remember to rerun -# 'pip install -e .[dev]' when switching between checkouts and running tests. -git_repo_path = abspath(join(dirname(dirname(dirname(__file__))), "src")) -sys.path.insert(1, git_repo_path) - - -# silence FutureWarning warnings in tests since often we can't act on them until -# they become normal warnings - i.e. the tests still need to test the current functionality -warnings.simplefilter(action="ignore", category=FutureWarning) - - -def pytest_addoption(parser): - from transformers.testing_utils import pytest_addoption_shared - - pytest_addoption_shared(parser) - - -def pytest_terminal_summary(terminalreporter): - from transformers.testing_utils import pytest_terminal_summary_main - - make_reports = terminalreporter.config.getoption("--make-reports") - if make_reports: - pytest_terminal_summary_main(terminalreporter, id=make_reports) diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/tapex/wikisql_utils.py b/spaces/chendl/compositional_test/transformers/examples/research_projects/tapex/wikisql_utils.py deleted file mode 100644 index 110b14e02fb8e07c717b5b5ac146304b8f3d5e59..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/tapex/wikisql_utils.py +++ /dev/null @@ -1,257 +0,0 @@ -# coding=utf-8 -# Copyright 2022 The Microsoft, The Google and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import dataclasses -import enum -import functools -import math -import re - -# The following script is adapted from the script of TaPas. -# Original: https://github.com/google-research/tapas/master/wikisql_utils.py -from typing import Any, List, Text - - -EMPTY_ANSWER = "none" -EMPTY_ANSWER_AGG = "none" - - -def _split_thousands(delimiter, value): - split = value.split(delimiter) - return len(split) > 1 and any((len(x) == 3 for x in split)) - - -def convert_to_float(value): - """Converts value to a float using a series of increasingly complex heuristics. - Args: - value: object that needs to be converted. Allowed types include - float/int/strings. - Returns: - A float interpretation of value. - Raises: - ValueError if the float conversion of value fails. - """ - if isinstance(value, float): - return value - if isinstance(value, int): - return float(value) - if not isinstance(value, str): - raise ValueError("Argument value is not a string. Can't parse it as float") - sanitized = value - - try: - # Example: 1,000.7 - if "." in sanitized and "," in sanitized: - return float(sanitized.replace(",", "")) - # 1,000 - if "," in sanitized and _split_thousands(",", sanitized): - return float(sanitized.replace(",", "")) - # 5,5556 - if "," in sanitized and sanitized.count(",") == 1 and not _split_thousands(",", sanitized): - return float(sanitized.replace(",", ".")) - # 0.0.0.1 - if sanitized.count(".") > 1: - return float(sanitized.replace(".", "")) - # 0,0,0,1 - if sanitized.count(",") > 1: - return float(sanitized.replace(",", "")) - return float(sanitized) - except ValueError: - # Avoid adding the sanitized value in the error message. - raise ValueError("Unable to convert value to float") - - -def _normalize_float(answer): - if answer is None: - return None - try: - value = convert_to_float(answer) - if isinstance(value, float) and math.isnan(value): - return None - return value - except ValueError: - return answer.lower() - - -_TYPE_CONVERTER = { - "text": lambda x: x, - "real": convert_to_float, -} - - -class _Aggregation(enum.Enum): - """Aggregations as defined by WikiSQL. Indexes match the data.""" - - NONE = 0 - MAX = 1 - MIN = 2 - COUNT = 3 - SUM = 4 - AVERAGE = 5 - - -class _Operator(enum.Enum): - """The boolean operators used by WikiSQL. Indexes match the data.""" - - EQUALS = 0 - GREATER = 1 - LESSER = 2 - - -@dataclasses.dataclass -class _Condition: - """Represents an SQL where clauses (e.g A = "a" or B > 5).""" - - column: Text - operator: _Operator - cmp_value: Any - - -_TOKENIZER = re.compile(r"\w+|[^\w\s]+", re.UNICODE | re.MULTILINE | re.DOTALL) - - -def _normalize_for_match(x): - return list(_TOKENIZER.findall(x.lower())) - - -def _compare(operator, src, tgt): - if operator == _Operator.EQUALS: - return src == tgt - elif operator == _Operator.GREATER: - return src > tgt - elif operator == _Operator.LESSER: - return src < tgt - raise ValueError(f"Unknown operator: {operator}") - - -def _parse_value(table, column, cell_value): - """Convert numeric values to floats and keeps everything else as string.""" - types = table["types"] - return _TYPE_CONVERTER[types[column]](cell_value) - - -def _is_string(x): - return isinstance(x, str) - - -def _respect_conditions(table, row, conditions): - """True if 'row' satisfies all 'conditions'.""" - for cond in conditions: - table_value = row[cond.column] - - cmp_value = _parse_value(table, cond.column, cond.cmp_value) - - if _is_string(table_value) and _is_string(cmp_value): - table_value = _normalize_for_match(table_value) - cmp_value = _normalize_for_match(cmp_value) - - if not isinstance(table_value, type(cmp_value)): - raise ValueError("Type difference {} != {}".format(type(table_value), type(cmp_value))) - - if not _compare(cond.operator, table_value, cmp_value): - return False - return True - - -def _get_float_answer(table, answer_coordinates, aggregation_op): - """Applies operation to produce reference float answer.""" - if not answer_coordinates: - if aggregation_op == _Aggregation.COUNT: - return 0.0 - else: - return EMPTY_ANSWER_AGG - - # Count can support non numeric answers. - if aggregation_op == _Aggregation.COUNT: - return float(len(answer_coordinates)) - - # If we have just one answer, if float returns it or try a conversion. - values = [table["rows"][i][j] for (i, j) in answer_coordinates] - if len(answer_coordinates) == 1: - try: - return convert_to_float(values[0]) - except ValueError as e: - if aggregation_op != _Aggregation.NONE: - raise e - - if aggregation_op == _Aggregation.NONE: - return None - - # Other aggregation only support numeric values. Bail out if we have strings. - if not all((isinstance(v, (int, float)) for v in values)): - return None - - if aggregation_op == _Aggregation.SUM: - return float(sum(values)) - elif aggregation_op == _Aggregation.AVERAGE: - return sum(values) / len(answer_coordinates) - else: - raise ValueError(f"Unknown aggregation: {aggregation_op}") - - -def _get_answer_coordinates(table, sql_query): - """Retrieves references coordinates by executing SQL.""" - # MAX and MIN are automatically supported by the model. - aggregation_op_index = sql_query["agg"] - if aggregation_op_index >= 3: - aggregation_op = _Aggregation(aggregation_op_index) - else: - aggregation_op = _Aggregation.NONE - - target_column = sql_query["sel"] - conditions = [ - _Condition(column, _Operator(operator), cmp_value) - for column, operator, cmp_value in zip( - sql_query["conds"]["column_index"], sql_query["conds"]["operator_index"], sql_query["conds"]["condition"] - ) - ] - - indices = [] - for row in range(len(table["rows"])): - if _respect_conditions(table, table["rows"][row], conditions): - indices.append((row, target_column)) - - if not indices: - return [], aggregation_op - - if len(indices) == 1: - return indices, aggregation_op - - # Parsing of MIN/MAX. - if aggregation_op_index in (1, 2): - operators = {2: min, 1: max} - values = [(table["rows"][i][j], index) for index, (i, j) in enumerate(indices)] - reduced = functools.reduce(operators[sql_query["agg"]], values) - - ret = [indices[reduced[1]]] - return ret, _Aggregation.NONE - - return indices, aggregation_op - - -def _get_answer_text(table, answer_coordinates, float_answer): - if float_answer is not None: - return [str(float_answer)] - return [str(table["real_rows"][r][c]) for r, c in answer_coordinates] - - -def retrieve_wikisql_query_answer_tapas(table, example) -> List: - answer_coordinates, aggregation_op = _get_answer_coordinates(table, example) - float_answer = _get_float_answer(table, answer_coordinates, aggregation_op) - answer_text = _get_answer_text(table, answer_coordinates, float_answer) - # keep the original data the same with TaPas - if len(answer_text) == 0: - answer_text = [EMPTY_ANSWER] - return answer_text diff --git a/spaces/chenxx/ChuanhuChatGPT/run_Windows.bat b/spaces/chenxx/ChuanhuChatGPT/run_Windows.bat deleted file mode 100644 index 4c18f9ccaeea0af972301ffdf48778641221f76d..0000000000000000000000000000000000000000 --- a/spaces/chenxx/ChuanhuChatGPT/run_Windows.bat +++ /dev/null @@ -1,5 +0,0 @@ -@echo off -echo Opening ChuanhuChatGPT... - -REM Open powershell via bat -start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py" diff --git a/spaces/cihyFjudo/fairness-paper-search/Every Child Is Special Tagalog Version Full Movie latgabr A Heartwarming Story of a Boy with Dyslexia.md b/spaces/cihyFjudo/fairness-paper-search/Every Child Is Special Tagalog Version Full Movie latgabr A Heartwarming Story of a Boy with Dyslexia.md deleted file mode 100644 index 6ff0241d760b01bcff1a2da584b1f49e2f619db2..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Every Child Is Special Tagalog Version Full Movie latgabr A Heartwarming Story of a Boy with Dyslexia.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Every Child Is Special Tagalog Version Full Movie latgabr


    Download »»» https://tinurli.com/2uwjfo



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/cihyFjudo/fairness-paper-search/Taio Cruz There She Goes Mp3 Zip.md b/spaces/cihyFjudo/fairness-paper-search/Taio Cruz There She Goes Mp3 Zip.md deleted file mode 100644 index 32dcb5eeb937e1eb356ba36d0d0373b507e5f2a4..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Taio Cruz There She Goes Mp3 Zip.md +++ /dev/null @@ -1,6 +0,0 @@ -

    taio cruz there she goes mp3 zip


    Download File 🗸 https://tinurli.com/2uwjlm



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/attrs/validators.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/attrs/validators.py deleted file mode 100644 index ab2c9b3024714d3b1caeb2f0773a0274dfc10f01..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/attrs/validators.py +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-License-Identifier: MIT - -from attr.validators import * # noqa diff --git a/spaces/cloudwp/Top-20-Diffusion/app.py b/spaces/cloudwp/Top-20-Diffusion/app.py deleted file mode 100644 index 25633ea8fb282571ddb104c69ec21d7645518b09..0000000000000000000000000000000000000000 --- a/spaces/cloudwp/Top-20-Diffusion/app.py +++ /dev/null @@ -1,338 +0,0 @@ -import gradio as gr -import os -import sys -from pathlib import Path -import time - -models =[ - "", - "CompVis/stable-diffusion-v1-4", - "runwayml/stable-diffusion-v1-5", - "prompthero/openjourney", - "stabilityai/stable-diffusion-2-1", - "stabilityai/stable-diffusion-2-1-base", - "andite/anything-v4.0", - "Linaqruf/anything-v3.0", - "eimiss/EimisAnimeDiffusion_1.0v", - "nitrosocke/Nitro-Diffusion", - "wavymulder/portraitplus", - "22h/vintedois-diffusion-v0-1", - "dreamlike-art/dreamlike-photoreal-2.0", - "dreamlike-art/dreamlike-diffusion-1.0", - "wavymulder/Analog-Diffusion", - "nitrosocke/redshift-diffusion", - "claudfuen/photorealistic-fuen-v1", - "prompthero/openjourney-v2", - "johnslegers/epic-diffusion", - "nitrosocke/Arcane-Diffusion", - "darkstorm2150/Protogen_x5.8_Official_Release", - -] - -model_1=models[1] -model_2=models[2] -model_3=models[3] -model_4=models[4] -model_5=models[5] -model_6=models[6] -model_7=models[7] -model_8=models[8] -model_9=models[9] -model_10=models[10] -model_11=models[11] -model_12=models[12] -model_13=models[13] -model_14=models[14] -model_15=models[15] -model_16=models[16] -model_17=models[17] -model_18=models[18] -model_19=models[19] -model_20=models[20] - -text_gen=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link",live=True, preprocess=True) - -proc1=gr.Interface.load(f"models/{model_1}",live=False,preprocess=True, postprocess=False) -proc2=gr.Interface.load(f"models/{model_2}",live=False,preprocess=True, postprocess=False) -proc3=gr.Interface.load(f"models/{model_3}",live=False,preprocess=True, postprocess=False) -proc4=gr.Interface.load(f"models/{model_4}",live=False,preprocess=True, postprocess=False) -proc5=gr.Interface.load(f"models/{model_5}",live=False,preprocess=True, postprocess=False) -proc6=gr.Interface.load(f"models/{model_6}",live=False,preprocess=True, postprocess=False) -proc7=gr.Interface.load(f"models/{model_7}",live=False,preprocess=True, postprocess=False) -proc8=gr.Interface.load(f"models/{model_8}",live=False,preprocess=True, postprocess=False) -proc9=gr.Interface.load(f"models/{model_9}",live=False,preprocess=True, postprocess=False) -proc10=gr.Interface.load(f"models/{model_10}",live=False,preprocess=True, postprocess=False) -proc11=gr.Interface.load(f"models/{model_11}",live=False,preprocess=True, postprocess=False) -proc12=gr.Interface.load(f"models/{model_12}",live=False,preprocess=True, postprocess=False) -proc13=gr.Interface.load(f"models/{model_13}",live=False,preprocess=True, postprocess=False) -proc14=gr.Interface.load(f"models/{model_14}",live=False,preprocess=True, postprocess=False) -proc15=gr.Interface.load(f"models/{model_15}",live=False,preprocess=True, postprocess=False) -proc16=gr.Interface.load(f"models/{model_16}",live=False,preprocess=True, postprocess=False) -proc17=gr.Interface.load(f"models/{model_17}",live=False,preprocess=True, postprocess=False) -proc18=gr.Interface.load(f"models/{model_18}",live=False,preprocess=True, postprocess=False) -proc19=gr.Interface.load(f"models/{model_19}",live=False,preprocess=True, postprocess=False) -proc20=gr.Interface.load(f"models/{model_20}",live=False,preprocess=True, postprocess=False) - -def get_prompts(prompt_text): - return text_gen(prompt_text) -def send_it1(inputs,proc1=proc1): - output1=proc1(inputs) - return(output1) -def send_it2(inputs,proc2=proc2): - output2=proc2(inputs) - return(output2) -def send_it3(inputs,proc3=proc3): - output3=proc3(inputs) - return(output3) -def send_it4(inputs,proc4=proc4): - output4=proc4(inputs) - return(output4) -def send_it5(inputs,proc5=proc5): - output5=proc5(inputs) - return(output5) -def send_it6(inputs,proc6=proc6): - output6=proc6(inputs) - return(output6) -def send_it7(inputs,proc7=proc7): - output7=proc7(inputs) - return(output7) -def send_it8(inputs,proc8=proc8): - output8=proc8(inputs) - return(output8) -def send_it9(inputs,proc9=proc9): - output9=proc9(inputs) - return(output9) -def send_it10(inputs,proc10=proc10): - output10=proc10(inputs) - return(output10) -def send_it11(inputs,proc11=proc11): - output11=proc11(inputs) - return(output11) -def send_it12(inputs,proc12=proc12): - output12=proc12(inputs) - return(output12) -def send_it13(inputs,proc13=proc13): - output13=proc13(inputs) - return(output13) -def send_it14(inputs,proc14=proc14): - output14=proc14(inputs) - return(output14) -def send_it15(inputs,proc15=proc15): - output15=proc15(inputs) - return(output15) -def send_it16(inputs,proc16=proc16): - output16=proc16(inputs) - return(output16) -def send_it17(inputs,proc17=proc17): - output17=proc17(inputs) - return(output17) -def send_it18(inputs,proc18=proc18): - output18=proc18(inputs) - return(output18) -def send_it19(inputs,proc19=proc19): - output19=proc19(inputs) - return(output19) -def send_it20(inputs,proc20=proc20): - output20=proc20(inputs) - return(output20) - - - -with gr.Blocks(css="cake.css") as myface: - gr.HTML("""""") - with gr.Column(): - gr.HTML("""Top 20 Diffuion""") - gr.Markdown("""

    Top 20 Diffusion

    """) - gr.Markdown("""

    20 Popular Diffusion Models

    """) - gr.Markdown("""

    Comparison Space

    """) - with gr.Accordion("Details",open=False): - with gr.Tab("Description"): - gr.Markdown("""
    -

    Enter your Prompt into the "Short Prompt" box and click "Magic Prompt" to load a prettified version of your prompt
    - When you are satisfied with the prompt that is in the "Text to Image" box, click "Launch" to load the Models.

    - Images load faster with a simpler prompt.
    -
    - Images will cancel loading after 1 minute to preserve the quality of the queue.
    - Simply Click "Launch" again to try loading the incomplete images.
    - Page refreshes every 10 minutes. -
    -
    - Not responsible for content, use at your own risk. -

    """) - with gr.Tab("DIY"): - gr.HTML("""
    -

    Easy Clone:

    - Copy/Paste this code in your new app.py file

    - import gradio as gr
    - max_d=gr.Interface.load("spaces/Omnibus/Top-20-Diffusion")
    - max_d.launch()
    -

    """) - with gr.Tab("Credits"): - with gr.Row(): - with gr.Column(style="text-align:left;"): - gr.HTML(""" - - """) - with gr.Column(style="text-align:left;"): - gr.HTML(f""" - - """) - with gr.Tab("Tools"): - with gr.Tab("Draw"): - with gr.Row(): - with gr.Column(style="width=50%"): - gr.Pil(label="Crop") - with gr.Column(style="width=50%"): - gr.Pil(label="Draw") - with gr.Tab("View"): - with gr.Row(): - with gr.Column(): - gr.Pil(label="Crop").style(style="height=500") - with gr.Tab("Color Picker"): - with gr.Row(): - with gr.Column(scale=50): - gr.ColorPicker(label="Color", interactive=True) - with gr.Column(scale=50): - gr.ImagePaint(label="Draw", interactive=True) - with gr.Tab("Text"): - with gr.Row(): - with gr.Column(scale=50): - gr.Textbox(label="", lines=8, interactive=True) - with gr.Column(scale=50): - gr.Textbox(label="", lines=8, interactive=True) - - with gr.Box(visible=True) as timo: - with gr.Row(): - with gr.Column(): - input_text=gr.Textbox(label="Short Prompt") - prompt=gr.Textbox(label="Text to Image",visible=True) - with gr.Column(): - see_prompts=gr.Button("Magic Prompt") - with gr.Row(): - run=gr.Button("Launch") - clear_btn=gr.Button("Clear") - with gr.Row(): - output1=gr.Image(label=(f"{model_1}"),visible=True) - output2=gr.Image(label=(f"{model_2}"),visible=True) - output3=gr.Image(label=(f"{model_3}"),visible=True) - output4=gr.Image(label=(f"{model_4}"),visible=True) - with gr.Row(): - output5=gr.Image(label=(f"{model_5}"),visible=True) - output6=gr.Image(label=(f"{model_6}"),visible=True) - output7=gr.Image(label=(f"{model_7}"),visible=True) - output8=gr.Image(label=(f"{model_8}"),visible=True) - with gr.Row(): - output9=gr.Image(label=(f"{model_9}"),visible=True) - output10=gr.Image(label=(f"{model_10}"),visible=True) - output11=gr.Image(label=(f"{model_11}"),visible=True) - output12=gr.Image(label=(f"{model_12}"),visible=True) - with gr.Row(): - output13=gr.Image(label=(f"{model_13}"),visible=True) - output14=gr.Image(label=(f"{model_14}"),visible=True) - output15=gr.Image(label=(f"{model_15}"),visible=True) - output16=gr.Image(label=(f"{model_16}"),visible=True) - with gr.Row(): - output17=gr.Image(label=(f"{model_17}"),visible=True) - output18=gr.Image(label=(f"{model_18}"),visible=True) - output19=gr.Image(label=(f"{model_19}"),visible=True) - output20=gr.Image(label=(f"{model_20}"),visible=True) - with gr.Row(visible=False): - start_box=gr.Number(interactive=False) - end_box=gr.Number(interactive=False) - tog_box=gr.Textbox(value=0,interactive=False) - - - def clear_it(val): - if int(val) != 0: - val = 0 - else: - val = 0 - pass - return val - - def sesh_end(cnt,t_stamp): - to = t_stamp + 60 - et = time.time() - if et > to and t_stamp != 0: - d=gr.update(value=0) - tog=gr.update(value=1) - #print(f'to: {to} et: {et}') - else: - if cnt != 0: - d=gr.update(value=et) - else: - d=gr.update(value=0) - tog=gr.update(value=0) - #print (f'passing: to: {to} et: {et}') - pass - return d,tog - - def sesh_start(): - t = time.gmtime() - t_stamp = time.time() - current_time = time.strftime("%H:%M:%S", t) - return gr.update(value=t_stamp),gr.update(value=t_stamp),gr.update(value=0) - - def clear_fn(): - return None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None - - start_box.change(sesh_end,[start_box,end_box],[start_box,tog_box],every=1,show_progress=False) - run.click(sesh_start,None,[start_box,end_box,tog_box]) - run1=run.click(send_it1, inputs=[prompt], outputs=[output1]) - run2=run.click(send_it2, inputs=[prompt], outputs=[output2]) - run3=run.click(send_it3, inputs=[prompt], outputs=[output3]) - run4=run.click(send_it4, inputs=[prompt], outputs=[output4]) - run5=run.click(send_it5, inputs=[prompt], outputs=[output5]) - run6=run.click(send_it6, inputs=[prompt], outputs=[output6]) - run7=run.click(send_it7, inputs=[prompt], outputs=[output7]) - run8=run.click(send_it8, inputs=[prompt], outputs=[output8]) - run9=run.click(send_it9, inputs=[prompt], outputs=[output9]) - run10=run.click(send_it10, inputs=[prompt], outputs=[output10]) - run11=run.click(send_it11, inputs=[prompt], outputs=[output11]) - run12=run.click(send_it12, inputs=[prompt], outputs=[output12]) - run13=run.click(send_it13, inputs=[prompt], outputs=[output13]) - run14=run.click(send_it14, inputs=[prompt], outputs=[output14]) - run15=run.click(send_it15, inputs=[prompt], outputs=[output15]) - run16=run.click(send_it16, inputs=[prompt], outputs=[output16]) - run17=run.click(send_it17, inputs=[prompt], outputs=[output17]) - run18=run.click(send_it18, inputs=[prompt], outputs=[output18]) - run19=run.click(send_it19, inputs=[prompt], outputs=[output19]) - run20=run.click(send_it20, inputs=[prompt], outputs=[output20]) - - prompt1=see_prompts.click(get_prompts, inputs=[input_text], outputs=[prompt],cancels=[run1,run2,run3,run4,run5,run6,run7,run8,run9,run10,run11,run12,run13,run14,run15,run16,run17,run18,run19,run20]) - clear_btn.click(clear_fn, None, - [input_text,prompt,output1,output2,output3,output4,output5,output6,output7,output8,output9,output10,output11,output12,output13,output14,output15,output16,output17,output18,output19,output20], - cancels=[prompt1,run1,run2,run3,run4,run5,run6,run7,run8,run9,run10,run11,run12,run13,run14,run15,run16,run17,run18,run19,run20]) - tog_box.change(clear_it,tog_box,tog_box,cancels=[run1,run2,run3,run4,run5,run6,run7,run8,run9,run10,run11,run12,run13,run14,run15,run16,run17,run18,run19,run20,prompt1]) - -myface.queue(concurrency_count=600,status_update_rate=1) -myface.launch(inline=True,show_api=False) \ No newline at end of file diff --git a/spaces/cncn102/bingo1/tests/kblob.ts b/spaces/cncn102/bingo1/tests/kblob.ts deleted file mode 100644 index 50d48ea2abb41d36e3882d26f148182193869492..0000000000000000000000000000000000000000 --- a/spaces/cncn102/bingo1/tests/kblob.ts +++ /dev/null @@ -1,44 +0,0 @@ -import FormData from 'form-data' - -import { fetch } from '@/lib/isomorphic' - -const formData = new FormData() - -const knowledgeRequest = { - "imageInfo": { - "url": "https://www.baidu.com/img/PCfb_5bf082d29588c07f842ccde3f97243ea.png" - }, - "knowledgeRequest": { - "invokedSkills": ["ImageById"], - "subscriptionId": "Bing.Chat.Multimodal", - "invokedSkillsRequestData": { "enableFaceBlur": true }, - "convoData": { "convoid": "51D|BingProdUnAuthenticatedUsers|E3DCA904FF236C67C3450163BCEC64CFF3F618CC8A4AFD75FD518F5ED0ADA080", - "convotone": "Creative" } - } -} - -formData.append('knowledgeRequest', JSON.stringify(knowledgeRequest)) - - -const jsonData = { - "imageInfo": { "url": "https://www.google.com/images/branding/googlelogo/1x/googlelogo_color_272x92dp.png?1=1" }, - "knowledgeRequest": { - "invokedSkills": ["ImageById"], - "subscriptionId": "Bing.Chat.Multimodal", - "invokedSkillsRequestData": { "enableFaceBlur": true }, - "convoData": { "convoid": "", "convotone": "Creative" } - } -} - -fetch('https://www.bing.com/images/kblob', - { - method: 'POST', - body: formData.getBuffer(), - headers: { - 'Referer': 'https://www.bing.com/search', - ...formData.getHeaders() - } - - } -).then(res => res.text()) - .then(res => console.log('res', res)) diff --git a/spaces/colakin/video-generater/public/ffmpeg/compat/atomics/pthread/stdatomic.h b/spaces/colakin/video-generater/public/ffmpeg/compat/atomics/pthread/stdatomic.h deleted file mode 100644 index 81a60f102bb678eba234385a35002dfd00250886..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/compat/atomics/pthread/stdatomic.h +++ /dev/null @@ -1,197 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/* - * based on vlc_atomic.h from VLC - * Copyright (C) 2010 Rémi Denis-Courmont - */ - -#ifndef COMPAT_ATOMICS_PTHREAD_STDATOMIC_H -#define COMPAT_ATOMICS_PTHREAD_STDATOMIC_H - -#include - -#define ATOMIC_FLAG_INIT 0 - -#define ATOMIC_VAR_INIT(value) (value) - -#define atomic_init(obj, value) \ -do { \ - *(obj) = (value); \ -} while(0) - -#define kill_dependency(y) ((void)0) - -#define atomic_signal_fence(order) \ - ((void)0) - -#define atomic_is_lock_free(obj) 0 - -typedef intptr_t atomic_flag; -typedef intptr_t atomic_bool; -typedef intptr_t atomic_char; -typedef intptr_t atomic_schar; -typedef intptr_t atomic_uchar; -typedef intptr_t atomic_short; -typedef intptr_t atomic_ushort; -typedef intptr_t atomic_int; -typedef intptr_t atomic_uint; -typedef intptr_t atomic_long; -typedef intptr_t atomic_ulong; -typedef intptr_t atomic_llong; -typedef intptr_t atomic_ullong; -typedef intptr_t atomic_wchar_t; -typedef intptr_t atomic_int_least8_t; -typedef intptr_t atomic_uint_least8_t; -typedef intptr_t atomic_int_least16_t; -typedef intptr_t atomic_uint_least16_t; -typedef intptr_t atomic_int_least32_t; -typedef intptr_t atomic_uint_least32_t; -typedef intptr_t atomic_int_least64_t; -typedef intptr_t atomic_uint_least64_t; -typedef intptr_t atomic_int_fast8_t; -typedef intptr_t atomic_uint_fast8_t; -typedef intptr_t atomic_int_fast16_t; -typedef intptr_t atomic_uint_fast16_t; -typedef intptr_t atomic_int_fast32_t; -typedef intptr_t atomic_uint_fast32_t; -typedef intptr_t atomic_int_fast64_t; -typedef intptr_t atomic_uint_fast64_t; -typedef intptr_t atomic_intptr_t; -typedef intptr_t atomic_uintptr_t; -typedef intptr_t atomic_size_t; -typedef intptr_t atomic_ptrdiff_t; -typedef intptr_t atomic_intmax_t; -typedef intptr_t atomic_uintmax_t; - -void avpriv_atomic_lock(void); -void avpriv_atomic_unlock(void); - -static inline void atomic_thread_fence(int order) -{ - avpriv_atomic_lock(); - avpriv_atomic_unlock(); -} - -static inline void atomic_store(intptr_t *object, intptr_t desired) -{ - avpriv_atomic_lock(); - *object = desired; - avpriv_atomic_unlock(); -} - -#define atomic_store_explicit(object, desired, order) \ - atomic_store(object, desired) - -static inline intptr_t atomic_load(intptr_t *object) -{ - intptr_t ret; - avpriv_atomic_lock(); - ret = *object; - avpriv_atomic_unlock(); - return ret; -} - -#define atomic_load_explicit(object, order) \ - atomic_load(object) - -static inline intptr_t atomic_exchange(intptr_t *object, intptr_t desired) -{ - intptr_t ret; - avpriv_atomic_lock(); - ret = *object; - *object = desired; - avpriv_atomic_unlock(); - return ret; -} - -#define atomic_exchange_explicit(object, desired, order) \ - atomic_exchange(object, desired) - -static inline int atomic_compare_exchange_strong(intptr_t *object, intptr_t *expected, - intptr_t desired) -{ - int ret; - avpriv_atomic_lock(); - if (*object == *expected) { - ret = 1; - *object = desired; - } else { - ret = 0; - *expected = *object; - } - avpriv_atomic_unlock(); - return ret; -} - -#define atomic_compare_exchange_strong_explicit(object, expected, desired, success, failure) \ - atomic_compare_exchange_strong(object, expected, desired) - -#define atomic_compare_exchange_weak(object, expected, desired) \ - atomic_compare_exchange_strong(object, expected, desired) - -#define atomic_compare_exchange_weak_explicit(object, expected, desired, success, failure) \ - atomic_compare_exchange_weak(object, expected, desired) - -#define FETCH_MODIFY(opname, op) \ -static inline intptr_t atomic_fetch_ ## opname(intptr_t *object, intptr_t operand) \ -{ \ - intptr_t ret; \ - avpriv_atomic_lock(); \ - ret = *object; \ - *object = *object op operand; \ - avpriv_atomic_unlock(); \ - return ret; \ -} - -FETCH_MODIFY(add, +) -FETCH_MODIFY(sub, -) -FETCH_MODIFY(or, |) -FETCH_MODIFY(xor, ^) -FETCH_MODIFY(and, &) - -#undef FETCH_MODIFY - -#define atomic_fetch_add_explicit(object, operand, order) \ - atomic_fetch_add(object, operand) - -#define atomic_fetch_sub_explicit(object, operand, order) \ - atomic_fetch_sub(object, operand) - -#define atomic_fetch_or_explicit(object, operand, order) \ - atomic_fetch_or(object, operand) - -#define atomic_fetch_xor_explicit(object, operand, order) \ - atomic_fetch_xor(object, operand) - -#define atomic_fetch_and_explicit(object, operand, order) \ - atomic_fetch_and(object, operand) - -#define atomic_flag_test_and_set(object) \ - atomic_exchange(object, 1) - -#define atomic_flag_test_and_set_explicit(object, order) \ - atomic_flag_test_and_set(object) - -#define atomic_flag_clear(object) \ - atomic_store(object, 0) - -#define atomic_flag_clear_explicit(object, order) \ - atomic_flag_clear(object) - -#endif /* COMPAT_ATOMICS_PTHREAD_STDATOMIC_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cavsdata.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cavsdata.c deleted file mode 100644 index 2835a4be09bf4fa1ef8a9526a7732fbced441526..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cavsdata.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Chinese AVS video (AVS1-P2, JiZhun profile) decoder. - * Copyright (c) 2006 Stefan Gehrer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "cavs.h" - -const uint8_t ff_cavs_partition_flags[30] = { - 0, //I_8X8 - 0, //P_SKIP - 0, //P_16X16 - SPLITH, //P_16X8 - SPLITV, //P_8X16 - SPLITH|SPLITV, //P_8X8 - SPLITH|SPLITV, //B_SKIP - SPLITH|SPLITV, //B_DIRECT - 0, //B_FWD_16X16 - 0, //B_BWD_16X16 - 0, //B_SYM_16X16 - FWD0|FWD1 |SPLITH, - FWD0|FWD1 |SPLITV, - BWD0|BWD1 |SPLITH, - BWD0|BWD1 |SPLITV, - FWD0|BWD1 |SPLITH, - FWD0|BWD1 |SPLITV, - BWD0|FWD1 |SPLITH, - BWD0|FWD1 |SPLITV, - FWD0|FWD1 |SYM1|SPLITH, - FWD0|FWD1 |SYM1 |SPLITV, - BWD0|FWD1 |SYM1|SPLITH, - BWD0|FWD1 |SYM1 |SPLITV, - FWD0|FWD1|SYM0 |SPLITH, - FWD0|FWD1|SYM0 |SPLITV, - FWD0|BWD1|SYM0 |SPLITH, - FWD0|BWD1|SYM0 |SPLITV, - FWD0|FWD1|SYM0|SYM1|SPLITH, - FWD0|FWD1|SYM0|SYM1 |SPLITV, - SPLITH|SPLITV, //B_8X8 = 29 -}; - -const uint8_t ff_cavs_chroma_qp[64] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 42, 43, 43, 44, 44, - 45, 45, 46, 46, 47, 47, 48, 48, 48, 49, 49, 49, 50, 50, 50, 51 -}; - -/** mark block as "no prediction from this direction" - e.g. forward motion vector in BWD partition */ -const cavs_vector ff_cavs_dir_mv = {0,0,1,REF_DIR}; - -/** mark block as using intra prediction */ -const cavs_vector ff_cavs_intra_mv = {0,0,1,REF_INTRA}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jni.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jni.c deleted file mode 100644 index 85dcf2abafd9a0530bf44847de444b2a079fbd1a..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jni.c +++ /dev/null @@ -1,79 +0,0 @@ -/* - * JNI public API functions - * - * Copyright (c) 2015-2016 Matthieu Bouron - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "config.h" - -#include - -#include "libavutil/error.h" -#include "jni.h" - -#if CONFIG_JNI -#include -#include - -#include "libavutil/log.h" -#include "ffjni.h" - -void *java_vm; -pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; - -int av_jni_set_java_vm(void *vm, void *log_ctx) -{ - int ret = 0; - - pthread_mutex_lock(&lock); - if (java_vm == NULL) { - java_vm = vm; - } else if (java_vm != vm) { - ret = AVERROR(EINVAL); - av_log(log_ctx, AV_LOG_ERROR, "A Java virtual machine has already been set"); - } - pthread_mutex_unlock(&lock); - - return ret; -} - -void *av_jni_get_java_vm(void *log_ctx) -{ - void *vm; - - pthread_mutex_lock(&lock); - vm = java_vm; - pthread_mutex_unlock(&lock); - - return vm; -} - -#else - -int av_jni_set_java_vm(void *vm, void *log_ctx) -{ - return AVERROR(ENOSYS); -} - -void *av_jni_get_java_vm(void *log_ctx) -{ - return NULL; -} - -#endif diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download GOGOAnime.io APK and Watch Animes with High Quality and Fast Speed.md b/spaces/congsaPfin/Manga-OCR/logs/Download GOGOAnime.io APK and Watch Animes with High Quality and Fast Speed.md deleted file mode 100644 index 76d76d9094f270f315af4d90fe0840ac3c0f5ca9..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download GOGOAnime.io APK and Watch Animes with High Quality and Fast Speed.md +++ /dev/null @@ -1,132 +0,0 @@ - -

    Gogoanime.io APK Download: Watch Anime Online for Free

    -

    If you are an anime fan, you probably know how hard it can be to find a reliable and legal source to watch your favorite shows online. There are many websites that offer anime streaming, but most of them are either illegal, low quality, or full of ads. That's why many anime lovers turn to gogoanime.io, a popular website that provides free access to thousands of anime episodes with English subtitles and dubbing.

    -

    But what if you want to watch anime on your mobile device without using a browser? Well, there is a solution for that too. You can download the gogoanime.io APK, which is an app that allows you to watch anime online for free on your Android device. In this article, we will tell you everything you need to know about gogoanime.io APK, including its features, how to download and install it, its pros and cons, and some alternatives you can try.

    -

    gogoanime.io apk download


    Downloadhttps://urlca.com/2uOavh



    -

    Features of Gogoanime.io APK

    -

    Huge Collection of Anime

    -

    One of the best features of gogoanime.io APK is that it has a huge collection of anime from various genres and sources. You can find anime from classic to modern, from action to romance, from comedy to horror, and more. You can also search for anime by name, genre, season, year, or popularity. You can watch anime from popular studios like Toei Animation, Madhouse, Bones, Kyoto Animation, etc.

    -

    English Subtitles and Dubbing

    -

    Another great feature of gogoanime.io APK is that it provides both subtitles and dubbing options for anime. You can choose to watch anime with English subtitles or with English dubbing, depending on your preference. You can also switch between them easily with a simple tap on the screen. This way, you can enjoy anime in your preferred language without any hassle.

    -

    Favorite List and Notifications

    -

    G

    Gogoanime.io APK also allows you to add anime to your favorite list and get notified of new episodes. You can create your own personalized list of anime that you want to watch or follow. You can also get notifications when new episodes of your favorite anime are released. This way, you can keep track of your anime watching progress and never miss an episode.

    -

    User-Friendly Interface and Design

    -

    The last feature of gogoanime.io APK that we will mention is its user-friendly interface and design. The app has a simple and easy-to-use interface that lets you navigate through the app without any difficulty. You can access the main menu, the search bar, the favorite list, and the settings with just a few taps. The app also has a dark mode option that you can enable to reduce eye strain and save battery life. The app's design is also appealing and attractive, with colorful icons and images that match the anime theme.

    -

    How to Download and Install Gogoanime.io APK

    -

    Step 1: Enable Unknown Sources

    -

    Before you can download and install gogoanime.io APK, you need to enable unknown sources on your device settings. This is because the app is not available on the Google Play Store and you need to download it from a third-party source. To enable unknown sources, follow these steps:

    -
      -
    • Go to your device settings and tap on security or privacy.
    • -
    • Find the option that says unknown sources or install unknown apps and toggle it on.
    • -
    • A warning message will pop up, but you can ignore it and tap on OK.
    • -
    -

    Step 2: Download the APK File

    -

    Next, you need to download the APK file of gogoanime.io from the official website or a trusted source. To download the APK file, follow these steps:

    -
      -
    • Open your browser and go to the official website of gogoanime.io or a trusted source that provides the APK file.
    • -
    • Find the download button or link and tap on it.
    • -
    • A confirmation message will appear, but you can ignore it and tap on OK.
    • -
    • The download will start automatically and you can see the progress on your notification bar.
    • -
    -

    Step 3: Install the APK File

    -

    After downloading the APK file, you need to install it on your device. To install the APK file, follow these steps:

    -

    gogoanime watch anime online free apk
    -gogoanime app download for android
    -gogoanime apk latest version 2021
    -gogoanime io apk mod premium
    -gogoanime apk no ads
    -gogoanime io apk download for pc
    -gogoanime app download for ios
    -gogoanime apk pure
    -gogoanime io apk download uptodown
    -gogoanime io apk download apkpure
    -gogoanime watch anime online hd apk
    -gogoanime app download for firestick
    -gogoanime apk latest version 2022
    -gogoanime io apk mod unlocked
    -gogoanime apk pro
    -gogoanime io apk download for laptop
    -gogoanime app download for windows 10
    -gogoanime apk mirror
    -gogoanime io apk download for mac
    -gogoanime io apk download for smart tv
    -gogoanime watch anime online english subbed apk
    -gogoanime app download for roku
    -gogoanime apk old version
    -gogoanime io apk mod ad-free
    -gogoanime apk cracked
    -gogoanime io apk download for chromebook
    -gogoanime app download for linux
    -gogoanime apk rexdl
    -gogoanime io apk download for android tv box
    -gogoanime io apk download for bluestacks
    -gogoanime watch anime online dubbed apk
    -gogoanime app download for ps4
    -gogoanime apk 2020
    -gogoanime io apk mod no root
    -gogoanime apk premium free
    -gogoanime io apk download for kindle fire
    -gogoanime app download for xbox one
    -gogoanime apk happymod
    -gogoanime io apk download for mi tv stick
    -gogoanime io apk download for nvidia shield tv

    -
      -
    • Go to your file manager or downloads folder and find the APK file that you downloaded.
    • -
    • Tap on the APK file and a pop-up window will appear.
    • -
    • Tap on install and wait for the installation process to finish.
    • -
    • A success message will appear, but you can ignore it and tap on done.
    • -
    -

    Step 4: Launch the App and Enjoy

    -

    The final step is to launch the app and enjoy watching anime online for free. To launch the app, follow these steps:

    -
      -
    • Go to your app drawer or home screen and find the app icon that says gogoanime.io.
    • -
    • Tap on the app icon and the app will open.
    • -
    • You can now browse through the app's categories, search for anime, add anime to your favorite list, watch anime with subtitles or dubbing, etc.
    • -
    -

    Pros and Cons of Gogoanime.io APK

    -

    Pros

    -

    Gogoanime.io APK has many pros that make it a great choice for anime fans. Some of them are:

    -
      -
    • It is free to use and does not require any registration or subscription.
    • -
    • It has a huge collection of anime from various genres and sources.
    • -
    • It provides both subtitles and dubbing options for anime.
    • -
    • It allows users to add anime to their favorite list and get notified of new episodes.
    • -
    • It has a user-friendly interface and design with a dark mode option.
    • -
    • It has high quality and fast streaming with no buffering or lagging issues.
    • -
    -

    Cons

    -

    Gogoanime.io APK also has some cons that users should be aware of. Some of them are:

    -
      -
    • It is not legal and may violate the copyright laws of some countries.
    • -
    • It may pose security risks as it is not verified by Google Play Protect or other antivirus software.
    • -
    • It may contain ads that can be annoying or intrusive.
    • -
    • It may not be compatible with some devices or operating systems.
    • -
    • It may not have some anime titles that are exclusive to other platforms.
    • -
    - Alternatives to Gogoanime.io APK -

    Crunchyroll

    -

    If you are looking for a legal and safe alternative to gogoanime.io APK, you can try Crunchyroll. Crunchyroll is a popular platform that offers anime, manga, and drama streaming with a subscription fee. You can watch anime from various genres and sources, with subtitles or dubbing, in high quality and fast streaming. You can also access exclusive anime titles that are not available on other platforms. Crunchyroll has an app that you can download from the Google Play Store or the App Store.

    -

    AnimeFreak

    -

    If you are looking for a free alternative to gogoanime.io APK, you can try AnimeFreak. AnimeFreak is a website that provides anime streaming without any registration or subscription. You can watch anime from various genres and sources, with subtitles or dubbing, in high quality and fast streaming. You can also add anime to your favorite list and get notified of new episodes. AnimeFreak does not have an app, but you can access it from your browser.

    -

    VIZ

    -

    If you are looking for a premium alternative to gogoanime.io APK, you can try VIZ. VIZ is a platform that offers anime and manga streaming with a subscription fee. You can watch anime from popular studios like Studio Ghibli, Naruto, One Piece, etc., with subtitles or dubbing, in high quality and fast streaming. You can also access exclusive anime titles that are not available on other platforms. VIZ has an app that you can download from the Google Play Store or the App Store.

    -

    Conclusion

    -

    Gogoanime.io APK is a great app for anime fans who want to watch anime online for free on their Android devices. It has many features that make it appealing and convenient, such as a huge collection of anime, subtitles and dubbing options, favorite list and notifications, user-friendly interface and design, etc. However, it also has some drawbacks that users should be aware of, such as legal issues, security risks, ads, compatibility issues, etc. Therefore, users should use gogoanime.io APK at their own risk and discretion.

    -

    If you are interested in downloading and installing gogoanime.io APK, you can follow the steps we provided in this article. Alternatively, you can also try some of the alternatives we suggested, such as Crunchyroll, AnimeFreak, or VIZ. These platforms offer different features and benefits that may suit your preferences and needs better.

    -

    We hope this article was helpful and informative for you. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading and happy watching!

    -

    FAQs

    -

    Here are some frequently asked questions related to gogoanime.io APK and their answers:

    -
      -
    • Is gogoanime.io APK safe to use?
      -Gogoanime.io APK is not verified by Google Play Protect or other antivirus software, so it may pose security risks to your device and data. It may also contain malware or viruses that can harm your device or steal your information. Therefore, you should use gogoanime.io APK at your own risk and discretion.
    • -
    • Is gogoanime.io APK legal to use?
      -Gogoanime.io APK is not legal to use in some countries where anime streaming is regulated by copyright laws. It may violate the rights of the anime creators and distributors who own the content. Therefore, you should use gogoanime.io APK at your own risk and discretion.
    • -
    • How do I update gogoanime.io APK?
      -Gogoanime.io APK does not have an automatic update feature, so you need to manually update it whenever there is a new version available. To update gogoanime.io APK, you need to download the latest APK file from the official website or a trusted source and install it over the existing app.
    • -
    • How do I uninstall gogoanime.io APK?
      -To uninstall gogoanime.io APK, you need to go to your device settings and tap on apps or applications. Find the app icon that says gogoanime.io and tap on it. Tap on uninstall and confirm your action. The app will be removed from your device.
    • -
    • What are some similar apps to gogoanime.io APK?
      -Some similar apps to gogoanime.io APK are AnimeDLR, AniMixPlay, AnimeZone, etc. These apps also provide free anime streaming on Android devices with various features and benefits.
    • -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to Upgrade Your Minecraft to 1.16.1 and Enjoy the New Features.md b/spaces/congsaPfin/Manga-OCR/logs/How to Upgrade Your Minecraft to 1.16.1 and Enjoy the New Features.md deleted file mode 100644 index 9ceb6fa8016b90cb08222977874d2b934c421850..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/How to Upgrade Your Minecraft to 1.16.1 and Enjoy the New Features.md +++ /dev/null @@ -1,134 +0,0 @@ -
    -

    How to Download Minecraft 1.16.1 for Free

    -

    Minecraft is one of the most popular and creative games in the world, with millions of players enjoying its endless possibilities. If you are one of them, you might be wondering how to download the latest version of Minecraft, which is 1.16.1, for free. In this article, we will show you how to do that for both Java Edition and Bedrock Edition, as well as what's new in this update.

    -

    minecraft 1 16 1 download


    DOWNLOAD 🗸🗸🗸 https://urlca.com/2uO80E



    -

    What is Minecraft 1.16.1?

    -

    Minecraft 1.16.1 is the first minor update of the Nether Update, which was released on June 23, 2020. The Nether Update is one of the most significant updates in Minecraft history, as it completely transformed the Nether dimension, adding new biomes, mobs, structures, blocks, items, and more.

    -

    The Nether Update

    -

    The Nether Update introduced four new biomes to the Nether: the Crimson Forest, the Warped Forest, the Basalt Deltas, and the Soulsand Valley. Each biome has its own unique features, such as vegetation, terrain, ambient sounds, and hazards. For example, the Crimson Forest is filled with red fungi and vines, while the Basalt Deltas are volcanic wastelands with lava pools and pillars of basalt.

    -

    The update also added new mobs to the Nether, such as the Piglins, the Hoglins, the Zoglins, and the Striders. The Piglins are hostile humanoid creatures that live in bastion remnants and trade with gold ingots. The Hoglins are huge boar-like beasts that can be bred and hunted for food. The Zoglins are zombified versions of Hoglins that attack anything in sight. The Striders are passive creatures that can walk on lava and be ridden with a saddle and a warped fungus on a stick.

    -

    Additionally, the update added new structures to the Nether, such as the bastion remnants, the ruined portals, and the nether fortresses. The bastion remnants are ancient fortifications where Piglins dwell and guard their treasures. The ruined portals are broken nether portals that can be found in both the Overworld and the Nether. The nether fortresses are massive complexes where Wither Skeletons, Blazes, and other hostile mobs spawn.

    -

    minecraft 1 16 1 download free full version
    -minecraft 1 16 1 download apk for android
    -minecraft 1 16 1 download windows 10 edition
    -minecraft 1 16 1 download mac os
    -minecraft 1 16 1 download server jar
    -minecraft 1 16 1 download mods and modpacks
    -minecraft 1 16 1 download nether update
    -minecraft 1 16 1 download optifine
    -minecraft 1 16 1 download forge
    -minecraft 1 16 1 download texture packs
    -minecraft 1 16 1 download shaders
    -minecraft 1 16 1 download maps and worlds
    -minecraft 1 16 1 download skins and skin packs
    -minecraft 1 16 1 download resource packs
    -minecraft 1 16 1 download data packs
    -minecraft 1 16 1 download seeds and coordinates
    -minecraft 1 16 1 download snapshots and pre-releases
    -minecraft 1 16 1 download realms and servers
    -minecraft 1 16 1 download multiplayer and online
    -minecraft 1 16 1 download launcher and installer
    -minecraft 1 16 1 download cracked and unblocked
    -minecraft 1 16 1 download java edition and bedrock edition
    -minecraft 1.16.1 download xbox one and ps4
    -minecraft.116.2.download switch and mobile
    -minecraft.116.3.download linux and chromebook
    -minecraft.116.4.download vr and ray tracing
    -minecraft.116.5.download fabric and curseforge
    -minecraft.116.6.download datapack generator and worldedit
    -minecraft.116.7.download command blocks and functions
    -minecraft.116.8.download custom npcs and mobs
    -minecraft.116.9.download biomes o plenty and terraforged
    -minecraft.116.10.download quark and create
    -minecraft.116.11.download immersive engineering and engineer's tools[^3^]
    -minecraft.116.12.download tinkers construct and tool belt[^3^]
    -minecraft.116.13.download astral sorcery and botania
    -minecraft.116.14.download thaumcraft and blood magic
    -minecraft.116.15.download rftools and applied energistics
    -minecraft.116.16.download industrial craft and buildcraft
    -minecraft.116.17.download twilight forest and the betweenlands
    -minecraft.116.18.download ice and fire and alex's mobs
    -minecraft.116.19.download pam's harvestcraft and cooking for blockheads
    -minecraft.116.20.download chisel and bits and little tiles
    -minecraft.116.21.download decocraft and bibliocraft
    -minecraft.116.22.download furniture mod and mr crayfish's furniture mod
    -minecraft.116.23.download pixelmon and pokecube

    -

    Furthermore, the update added new blocks and items to the Nether, such as netherite, ancient debris, nether gold ore, respawn anchors, lodestones, target blocks, soul torches and lanterns, chains, warped and crimson stems and planks, shroomlights, warped and crimson fungi and roots, nether sprouts, twisting and weeping vines, basalt, blackstone, soul soil and sand, soul fire and campfires, and more.

    -

    Bug Fixes and Improvements

    -

    Minecraft 1.16.1 is not only about adding new content to the game but also about fixing bugs and improving performance. Some of the notable bug fixes and improvements in this version are:

    -
      -
    • Fixed stability issues with Realms
    • -
    • Fixed a crash that could occur when loading a world with missing or corrupted level data
    • -
    • Fixed a crash that could occur when using a crafting table in certain situations
    • -
    • Fixed a crash that could occur when using an anvil in certain situations
    • -
    • Fixed a crash that could occur when using a smithing table in certain situations
    • -
    • Fixed a crash that could occur when using a loom in certain situations
    • -
    • Fixed a crash that could occur when using a cartography table in certain situationsFixed a crash that could occur when using a grindstone in certain situations
    • -
    • Fixed a crash that could occur when using a stonecutter in certain situations
    • -
    • Fixed a crash that could occur when using a brewing stand in certain situations
    • -
    • Fixed a crash that could occur when using a beacon in certain situations
    • -
    • Fixed a crash that could occur when using a lectern in certain situations
    • -
    • Fixed a crash that could occur when using an enchanting table in certain situations
    • -
    • Fixed a crash that could occur when using a composter in certain situations
    • -
    • Fixed a crash that could occur when using a barrel in certain situations
    • -
    • Fixed a crash that could occur when using a dispenser or dropper in certain situations
    • -
    • Fixed a crash that could occur when using a hopper or hopper minecart in certain situations
    • -
    • Fixed a crash that could occur when using a chest or chest minecart in certain situations
    • -
    • Fixed a crash that could occur when using an ender chest or shulker box in certain situations
    • -
    • Fixed a crash that could occur when using an item frame or painting in certain situations
    • -
    • Fixed a crash that could occur when using an armor stand in certain situations
    • -
    • Fixed a crash that could occur when using a boat or minecart in certain situations
    • -
    • Fixed a crash that could occur when using fireworks or fire charges in certain situations
    • -
    • Fixed a crash that could occur when using spawn eggs or mob heads in certain situations
    • -
    • Fixed a crash that could occur when using potions or tipped arrows in certain situations
    • -
    • Fixed a crash that could occur when using banners or signs in certain situations
    • -
    • Fixed a crash that could occur when using books or maps in certain situations
    • -
    • Fixed several issues with Nether portal linking and generation
    • -
    • Fixed several issues with world generation and biomes
    • -
    • Fixed several issues with mob spawning and behavior
    • -
    • Fixed several issues with block placement and interaction
    • -
    • Fixed several issues with item durability and enchantments
    • -
    • Fixed several issues with game modes and commands
    • -
    • Fixed several issues with achievements and statistics
    • -
    • Fixed several issues with multiplayer and realms
    • -
    • Improved performance and stability of the game engine
    • -
    • Improved user interface and accessibility of the game menus
    • -
    • Improved sound effects and music of the game environment
    • -
    • Improved graphics and animations of the game visuals
    • -
    • Improved compatibility and security of the game software
    • -
    -

    How to Download Minecraft 1.16.1 for Java Edition

    -

    If you want to download Minecraft 1.16.1 for Java Edition, which is the original version of the game for PC, Mac, and Linux, you will need to follow these steps:

    -

    Download the Launcher

    -

    The first step is to download the Minecraft Launcher, which is the program that allows you to launch and update the game. You can download it from the official website of Minecraft. You will need to create an account or log in with your existing one. Then, you will need to buy the game or redeem a code if you already have one. After that, you can download and install the launcher on your device.

    -

    Select the Latest Release

    -

    The next step is to select the latest release of the game, which is 1.16.1, from the launcher. You can do this by clicking on the "Installations" tab and then on the "New" button. You can name your installation whatever you want, but make sure to select "Latest release (1.16.1)" from the "Version" dropdown menu. Then, click on "Create" and your installation will be ready.

    -

    Enjoy the Game

    -

    The final step is to enjoy the game. You can launch your installation by clicking on the "Play" button on the launcher. You can then choose to play singleplayer or multiplayer, create or join worlds, customize your settings, and explore the Nether Update.

    -

    How to Download Minecraft 1.16.1 for Bedrock Edition

    -

    If you want to download Minecraft 1.16.1 for Bedrock Edition, which is the cross-platform version of the game for Windows 10, Xbox One, PlayStation 4, Nintendo Switch, iOS, Android, and more, you will need to follow these steps:

    -

    Download the App

    -

    The first step is to download the Minecraft app, which is the application that allows you to play and update the game. You can download it from the official website of Minecraft or from the store of your device. You will need to create an account or log in with your existing one. Then, you will need to buy the game or redeem a code if you already have one. After that, you can download and install the app on your device.

    -

    Update the Game

    -

    The next step is to update the game to the latest version, which is 1.16.1, from the app. You can do this by opening the app and checking for updates. If there is an update available, you can download and install it on your device. Alternatively, you can enable automatic updates on your device settings to keep your game up to date.

    -

    Explore the Nether

    -

    The final step is to explore the Nether. You can launch the game by opening the app and tapping on the "Play" button. You can then choose to play singleplayer or multiplayer, create or join worlds, customize your settings, and enjoy the Nether Update.

    -

    Conclusion

    -

    Summary

    -

    In this article, we have shown you how to download Minecraft 1.16.1 for free for both Java Edition and Bedrock Edition. We have also explained what's new in this update, such as the Nether Update and the bug fixes and improvements. We hope you have found this article helpful and informative, and that you have fun playing Minecraft 1.16.1.

    -

    FAQs

    -

    Here are some frequently asked questions about Minecraft 1.16.1:

    -
      -
    • Q: Is Minecraft 1.16.1 free?
    • -
    • A: Yes, if you already own Minecraft for Java Edition or Bedrock Edition, you can download and play Minecraft 1.16.1 for free. However, if you don't own Minecraft yet, you will need to buy it from the official website or the store of your device.
    • -
    • Q: What is the difference between Java Edition and Bedrock Edition?
    • -
    • A: Java Edition is the original version of Minecraft for PC, Mac, and Linux, while Bedrock Edition is the cross-platform version of Minecraft for Windows 10, Xbox One, PlayStation 4, Nintendo Switch, iOS, Android, and more. They have some differences in features, gameplay, and compatibility, but they are both updated regularly and support multiplayer.
    • -
    • Q: How can I play multiplayer in Minecraft 1.16.1?
    • -
    • A: You can play multiplayer in Minecraft 1.16.1 by joining or hosting a server, a realm, or a world. A server is a public or private online game that can host many players at once. A realm is a private online game that can host up to 10 players at once and requires a subscription fee. A world is a local game that can host up to 8 players at once and requires a Wi-Fi connection.
    • -
    • Q: How can I backup my worlds in Minecraft 1.16.1?
    • -
    • A: You can backup your worlds in Minecraft 1.16.1 by copying or exporting them from your device storage to another location, such as a cloud service or a USB drive. You can also use third-party tools or mods to backup your worlds automatically or periodically.
    • -
    • Q: How can I get more content for Minecraft 1.16.1?
    • -
    • A: You can get more content for Minecraft 1.16.1 by downloading or creating resource packs, data packs, maps, skins, mods, plugins, addons, and more from various websites or platforms, such as CurseForge, Planet Minecraft, Minecraft Marketplace, etc.
    • -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Traffic Rider Mod APK 99 MB - The Ultimate Guide to the Fastest and Most Realistic Motorcycle Game.md b/spaces/congsaPfin/Manga-OCR/logs/Traffic Rider Mod APK 99 MB - The Ultimate Guide to the Fastest and Most Realistic Motorcycle Game.md deleted file mode 100644 index bf153d326f87c42e69e77a63f2ca42cc6e0e347f..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Traffic Rider Mod APK 99 MB - The Ultimate Guide to the Fastest and Most Realistic Motorcycle Game.md +++ /dev/null @@ -1,104 +0,0 @@ -
    -

    Traffic Rider Mod APK 99 MB: A Review

    -

    If you are a fan of motorcycle racing games, you might have heard of Traffic Rider, a popular game that offers realistic graphics, smooth controls, and thrilling gameplay. But did you know that there is a modified version of this game that reduces its size to only 99 MB, while still retaining all the features and fun? In this article, we will review Traffic Rider Mod APK 99 MB, a modded version of the original game that you can download and install on your Android device. We will also show you how to do it, and answer some frequently asked questions about this mod.

    -

    What is Traffic Rider?

    -

    A realistic motorcycle racing game

    -

    Traffic Rider is a motorcycle racing game developed by Soner Kara, a Turkish game studio. It was released in 2016 and has since gained over 100 million downloads on Google Play Store. The game is praised for its realistic graphics, smooth controls, and thrilling gameplay. You can choose from over 30 different motorcycles, each with its own specifications and sounds. You can also customize your bike with different colors and accessories.

    -

    traffic rider mod apk 99 mb


    DOWNLOAD ✶✶✶ https://urlca.com/2uOf5A



    -

    Features and gameplay

    -

    The game has two modes: career mode and endless mode. In career mode, you have to complete various missions and objectives, such as reaching a certain speed, overtaking a number of vehicles, or covering a certain distance. You can earn cash and gold by completing these missions, which you can use to buy new bikes or upgrade your existing ones. You can also unlock new locations and scenarios, such as highways, deserts, cities, or snow.

    -

    In endless mode, you can ride your bike as long as you want, without any time limit or mission. You can enjoy the scenery and the traffic, while avoiding collisions and accidents. You can also challenge yourself by increasing the difficulty level, which affects the traffic density, speed limit, and police presence. The game also has a leaderboard system, where you can compare your scores and achievements with other players around the world.

    -

    What is Traffic Rider Mod APK 99 MB?

    -

    A modified version of the original game

    -

    Traffic Rider Mod APK 99 MB is a modified version of the original game that reduces its size to only 99 MB, while still retaining all the features and fun. This mod is created by an unknown developer, who claims to have compressed the game files without compromising the quality or performance. The mod also removes some unnecessary ads and permissions from the original game, making it more user-friendly and secure.

    -

    Benefits and drawbacks of using the mod

    -

    The main benefit of using this mod is that it saves a lot of storage space on your device. If you have a low-end device or limited storage capacity, you can still enjoy this game without worrying about running out of space or slowing down your device. The mod also offers unlimited money and gold, which means you can buy any bike or upgrade you want without grinding or spending real money.

    -

    The main drawback of using this mod is that it is not an official version of the game, which means it may not be compatible with some devices or updates. It may also contain some bugs or glitches that affect the gameplay or functionality. Moreover, using this mod may violate the terms and conditions of the original game developer, which

    could result in a ban or a penalty from the game server. Therefore, you should use this mod at your own risk and discretion, and respect the rights and efforts of the original game developer.

    -

    How to download and install Traffic Rider Mod APK 99 MB?

    -

    Steps to follow

    -

    If you want to try this mod, you need to follow these steps:

    -
      -
    1. Download the Traffic Rider Mod APK 99 MB file from a reliable source. You can search for it on Google or use the link provided below.
    2. -
    3. Enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.
    4. -
    5. Locate the downloaded file on your device and tap on it to start the installation process.
    6. -
    7. Follow the instructions on the screen and wait for the installation to finish.
    8. -
    9. Launch the game and enjoy!
    10. -
    -

    Note: You may need to uninstall the original game before installing the mod, as they may conflict with each other.

    -

    Precautions and tips

    -

    Before downloading and installing this mod, you should take some precautions and tips into account:

    -

    traffic rider hack apk 99 mb download
    -traffic rider unlimited money mod apk 99 mb
    -traffic rider mod apk latest version 99 mb
    -traffic rider motorcycle racing mod apk 99 mb
    -traffic rider mod apk android 1 99 mb
    -traffic rider mod apk offline 99 mb
    -traffic rider mod apk revdl 99 mb
    -traffic rider mod apk rexdl 99 mb
    -traffic rider mod apk free download 99 mb
    -traffic rider mod apk unlimited everything 99 mb
    -traffic rider mod apk no root 99 mb
    -traffic rider mod apk all bikes unlocked 99 mb
    -traffic rider mod apk unlimited gold 99 mb
    -traffic rider mod apk unlimited keys 99 mb
    -traffic rider mod apk unlimited coins 99 mb
    -traffic rider mod apk unlimited cash 99 mb
    -traffic rider mod apk unlimited gems 99 mb
    -traffic rider mod apk unlimited fuel 99 mb
    -traffic rider mod apk unlimited nitro 99 mb
    -traffic rider mod apk unlimited lives 99 mb
    -traffic rider mod apk premium unlocked 99 mb
    -traffic rider mod apk pro version 99 mb
    -traffic rider mod apk full version 99 mb
    -traffic rider mod apk mega mod 99 mb
    -traffic rider mod apk super mod 99 mb
    -traffic rider mod apk god mode 99 mb
    -traffic rider mod apk high speed 99 mb
    -traffic rider mod apk high graphics 99 mb
    -traffic rider mod apk low size 99 mb
    -traffic rider mod apk low end devices 99 mb
    -traffic rider mod apk easy install 99 mb
    -traffic rider mod apk direct link 99 mb
    -traffic rider mod apk mediafire link 99 mb
    -traffic rider mod apk google drive link 99 mb
    -traffic rider mod apk dropbox link 99 mb
    -traffic rider mod apk mirror link 99 mb
    -traffic rider mod apk original link 99 mb
    -traffic rider mod apk safe link 99 mb
    -traffic rider mod apk virus free link 99 mb
    -traffic rider mod apk working link 99 mb
    -traffic rider mod apk best link 99 mb
    -traffic rider mod apk fast link 99 mb
    -traffic rider mod apk updated link 99 mb
    -traffic rider mod apk new link 99 mb
    -traffic rider mod apk old link 99 mb
    -traffic rider mod apk latest link 99 mb
    -traffic rider mod apk cracked link 99 mb
    -traffic rider mod apk patched link 99 mb
    -traffic rider mod apk fixed link 99 mb

    -
      -
    • Make sure you have enough storage space on your device, as the mod may require more space than the original game.
    • -
    • Make sure you have a stable internet connection, as the mod may need to download some additional data or updates.
    • -
    • Make a backup of your game data, in case something goes wrong or you want to revert to the original game.
    • -
    • Do not use this mod for online or multiplayer modes, as it may cause errors or bans.
    • -
    • Do not share your personal or financial information with any third-party sources or websites that offer this mod, as they may be scams or malware.
    • -
    -

    Conclusion

    -

    Summary of the main points

    -

    In this article, we have reviewed Traffic Rider Mod APK 99 MB, a modified version of the original game that reduces its size to only 99 MB, while still retaining all the features and fun. We have also shown you how to download and install this mod on your Android device, and answered some frequently asked questions about this mod.

    -

    Recommendation and rating

    -

    We recommend this mod to anyone who loves motorcycle racing games, but has a low-end device or limited storage capacity. This mod offers unlimited money and gold, realistic graphics, smooth controls, and thrilling gameplay. However, you should also be aware of the drawbacks and risks of using this mod, such as compatibility issues, bugs, glitches, or bans. Therefore, you should use this mod at your own risk and discretion, and respect the rights and efforts of the original game developer.

    -

    We rate this mod 4 out of 5 stars, based on its features, performance, and user feedback. We hope you found this article helpful and informative. If you have any questions or comments, feel free to leave them below. Thank you for reading!

    -

    FAQs

    -

    Is Traffic Rider Mod APK 99 MB safe to use?

    -

    Traffic Rider Mod APK 99 MB is safe to use as long as you download it from a reliable source and follow the installation instructions carefully. However, since it is not an official version of the game, it may contain some bugs or glitches that affect the gameplay or functionality. Moreover, using this mod may violate the terms and conditions of the original game developer, which could result in a ban or a penalty from the game server. Therefore, you should use this mod at your own risk and discretion, and respect the rights and efforts of the original game developer.

    -

    Does Traffic Rider Mod APK 99 MB require root access?

    -

    No, Traffic Rider Mod APK 99 MB does not require root access to work on your device. You can install it without rooting your device or modifying any system settings. However, if you have a rooted device, you may need to disable root detection before launching the game, as some games may not work properly on rooted devices.

    -

    What are the differences between Traffic Rider Mod APK 99 MB and Traffic Rider Pro APK?

    -

    Traffic Rider Mod APK 99 MB is a modified version of the original game that reduces its size to only 99 MB, while still retaining all the features and fun. It also offers unlimited money and gold, which means you can buy any bike or upgrade you want without grinding or spending real money. Traffic Rider Pro APK is an official version of the game that offers some premium features and benefits for a one-time payment. These include removing ads, unlocking all bikes and locations, getting double cash and gold, and getting 30% extra faster bikes. You can choose either version depending on your preference and budget.

    -

    How to update Traffic Rider Mod APK 99 MB?

    -

    To update Traffic Rider Mod APK 99 MB, you need to follow the same steps as downloading and installing it. You need to download the latest version of the mod from a reliable source and install it over the existing one. You may also need to uninstall the original game before installing the mod, as they may conflict with each other. However, you should note that updating the mod may erase your game data or progress, so you should make a backup before doing so.

    -

    How to contact the developers of Traffic Rider Mod APK 99 MB?

    -

    Unfortunately, there is no official way to contact the developers of Traffic Rider Mod APK 99 MB, as they are unknown and anonymous. You may try to find them on some online forums or communities, but there is no guarantee that they will respond or help you. Therefore, you should use this mod at your own risk and discretion, and respect the rights and efforts of the original game developer.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/David Laser Scanner 3 5 Keygen Idm How to Crack and Activate the Software for Free.md b/spaces/contluForse/HuggingGPT/assets/David Laser Scanner 3 5 Keygen Idm How to Crack and Activate the Software for Free.md deleted file mode 100644 index b9ae43fad821b4495fa3439559d98e5a70afd8dc..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/David Laser Scanner 3 5 Keygen Idm How to Crack and Activate the Software for Free.md +++ /dev/null @@ -1,6 +0,0 @@ -

    David Laser Scanner 3 5 Keygen Idm


    Download Ziphttps://ssurll.com/2uzyxN



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/cooelf/Multimodal-CoT/timm/models/layers/conv2d_same.py b/spaces/cooelf/Multimodal-CoT/timm/models/layers/conv2d_same.py deleted file mode 100644 index 75f0f98d4ec1e3f4a0dc004b977815afaa25e7fc..0000000000000000000000000000000000000000 --- a/spaces/cooelf/Multimodal-CoT/timm/models/layers/conv2d_same.py +++ /dev/null @@ -1,42 +0,0 @@ -""" Conv2d w/ Same Padding - -Hacked together by / Copyright 2020 Ross Wightman -""" -import torch -import torch.nn as nn -import torch.nn.functional as F -from typing import Tuple, Optional - -from .padding import pad_same, get_padding_value - - -def conv2d_same( - x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1), - padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1): - x = pad_same(x, weight.shape[-2:], stride, dilation) - return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) - - -class Conv2dSame(nn.Conv2d): - """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions - """ - - def __init__(self, in_channels, out_channels, kernel_size, stride=1, - padding=0, dilation=1, groups=1, bias=True): - super(Conv2dSame, self).__init__( - in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) - - def forward(self, x): - return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) - - -def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): - padding = kwargs.pop('padding', '') - kwargs.setdefault('bias', False) - padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) - if is_dynamic: - return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) - else: - return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) - - diff --git a/spaces/coraKong/voice-cloning-demo/app.py b/spaces/coraKong/voice-cloning-demo/app.py deleted file mode 100644 index 5bca31a5d878518ea9883ea08d47055604bfcad7..0000000000000000000000000000000000000000 --- a/spaces/coraKong/voice-cloning-demo/app.py +++ /dev/null @@ -1,92 +0,0 @@ -import gradio as gr -from TTS.api import TTS - -# Init TTS -tts = TTS(model_name="tts_models/multilingual/multi-dataset/your_tts", progress_bar=False, gpu=False) -zh_tts = TTS(model_name="tts_models/zh-CN/baker/tacotron2-DDC-GST", progress_bar=False, gpu=False) -de_tts = TTS(model_name="tts_models/de/thorsten/vits", gpu=False) -es_tts = TTS(model_name="tts_models/es/mai/tacotron2-DDC", progress_bar=False, gpu=False) - -def text_to_speech(text: str, speaker_wav, speaker_wav_file, language: str): - if speaker_wav_file and not speaker_wav: - speaker_wav = speaker_wav_file - file_path = "output.wav" - if language == "zh-CN": - # if speaker_wav is not None: - # zh_tts.tts_to_file(text, speaker_wav=speaker_wav, file_path=file_path) - # else: - zh_tts.tts_to_file(text, file_path=file_path) - elif language == "de": - # if speaker_wav is not None: - # de_tts.tts_to_file(text, speaker_wav=speaker_wav, file_path=file_path) - # else: - de_tts.tts_to_file(text, file_path=file_path) - elif language == "es": - # if speaker_wav is not None: - # es_tts.tts_to_file(text, speaker_wav=speaker_wav, file_path=file_path) - # else: - es_tts.tts_to_file(text, file_path=file_path) - else: - if speaker_wav is not None: - tts.tts_to_file(text, speaker_wav=speaker_wav, language=language, file_path=file_path) - else: - tts.tts_to_file(text, speaker=tts.speakers[0], language=language, file_path=file_path) - return file_path - - - -# inputs = [gr.Textbox(label="Input the text", value="", max_lines=3), -# gr.Audio(label="Voice to clone", source="microphone", type="filepath"), -# gr.Audio(label="Voice to clone", type="filepath"), -# gr.Radio(label="Language", choices=["en", "zh-CN", "fr-fr", "pt-br", "de", "es"], value="en"), -# gr.Text(intro_text, font_size=14)] -# outputs = gr.Audio(label="Output") - -# demo = gr.Interface(fn=text_to_speech, inputs=inputs, outputs=outputs) - -# demo.launch() - - -title = "Voice-Cloning-Demo" - -def toggle(choice): - if choice == "mic": - return gr.update(visible=True, value=None), gr.update(visible=False, value=None) - else: - return gr.update(visible=False, value=None), gr.update(visible=True, value=None) - -def handle_language_change(choice): - if choice == "zh-CN" or choice == "de" or choice == "es": - return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False) - else: - return gr.update(visible=True), gr.update(visible=True), gr.update(visible=True) - -warming_text = """Please note that Chinese, German, and Spanish are currently not supported for voice cloning.""" - -with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - text_input = gr.Textbox(label="Input the text", value="", max_lines=3) - lan_input = gr.Radio(label="Language", choices=["en", "fr-fr", "pt-br", "zh-CN", "de", "es"], value="en") - gr.Markdown(warming_text) - radio = gr.Radio(["mic", "file"], value="mic", - label="How would you like to upload your audio?") - audio_input_mic = gr.Audio(label="Voice to clone", source="microphone", type="filepath", visible=True) - audio_input_file = gr.Audio(label="Voice to clone", type="filepath", visible=False) - - with gr.Row(): - with gr.Column(): - btn_clear = gr.Button("Clear") - with gr.Column(): - btn = gr.Button("Submit", variant="primary") - with gr.Column(): - audio_output = gr.Audio(label="Output") - - # gr.Examples(examples, fn=inference, inputs=[audio_file, text_input], - # outputs=audio_output, cache_examples=True) - btn.click(text_to_speech, inputs=[text_input, audio_input_mic, - audio_input_file, lan_input], outputs=audio_output) - radio.change(toggle, radio, [audio_input_mic, audio_input_file]) - lan_input.change(handle_language_change, lan_input, [radio, audio_input_mic, audio_input_file]) - -demo.launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/cvlab/zero123-live/ldm/modules/diffusionmodules/__init__.py b/spaces/cvlab/zero123-live/ldm/modules/diffusionmodules/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/cvlab/zero123-live/taming-transformers/scripts/sample_fast.py b/spaces/cvlab/zero123-live/taming-transformers/scripts/sample_fast.py deleted file mode 100644 index ff546c7dcbe459807ac3b70f834ccc1082fe8b4e..0000000000000000000000000000000000000000 --- a/spaces/cvlab/zero123-live/taming-transformers/scripts/sample_fast.py +++ /dev/null @@ -1,260 +0,0 @@ -import argparse, os, sys, glob -import torch -import time -import numpy as np -from omegaconf import OmegaConf -from PIL import Image -from tqdm import tqdm, trange -from einops import repeat - -from main import instantiate_from_config -from taming.modules.transformer.mingpt import sample_with_past - - -rescale = lambda x: (x + 1.) / 2. - - -def chw_to_pillow(x): - return Image.fromarray((255*rescale(x.detach().cpu().numpy().transpose(1,2,0))).clip(0,255).astype(np.uint8)) - - -@torch.no_grad() -def sample_classconditional(model, batch_size, class_label, steps=256, temperature=None, top_k=None, callback=None, - dim_z=256, h=16, w=16, verbose_time=False, top_p=None): - log = dict() - assert type(class_label) == int, f'expecting type int but type is {type(class_label)}' - qzshape = [batch_size, dim_z, h, w] - assert not model.be_unconditional, 'Expecting a class-conditional Net2NetTransformer.' - c_indices = repeat(torch.tensor([class_label]), '1 -> b 1', b=batch_size).to(model.device) # class token - t1 = time.time() - index_sample = sample_with_past(c_indices, model.transformer, steps=steps, - sample_logits=True, top_k=top_k, callback=callback, - temperature=temperature, top_p=top_p) - if verbose_time: - sampling_time = time.time() - t1 - print(f"Full sampling takes about {sampling_time:.2f} seconds.") - x_sample = model.decode_to_img(index_sample, qzshape) - log["samples"] = x_sample - log["class_label"] = c_indices - return log - - -@torch.no_grad() -def sample_unconditional(model, batch_size, steps=256, temperature=None, top_k=None, top_p=None, callback=None, - dim_z=256, h=16, w=16, verbose_time=False): - log = dict() - qzshape = [batch_size, dim_z, h, w] - assert model.be_unconditional, 'Expecting an unconditional model.' - c_indices = repeat(torch.tensor([model.sos_token]), '1 -> b 1', b=batch_size).to(model.device) # sos token - t1 = time.time() - index_sample = sample_with_past(c_indices, model.transformer, steps=steps, - sample_logits=True, top_k=top_k, callback=callback, - temperature=temperature, top_p=top_p) - if verbose_time: - sampling_time = time.time() - t1 - print(f"Full sampling takes about {sampling_time:.2f} seconds.") - x_sample = model.decode_to_img(index_sample, qzshape) - log["samples"] = x_sample - return log - - -@torch.no_grad() -def run(logdir, model, batch_size, temperature, top_k, unconditional=True, num_samples=50000, - given_classes=None, top_p=None): - batches = [batch_size for _ in range(num_samples//batch_size)] + [num_samples % batch_size] - if not unconditional: - assert given_classes is not None - print("Running in pure class-conditional sampling mode. I will produce " - f"{num_samples} samples for each of the {len(given_classes)} classes, " - f"i.e. {num_samples*len(given_classes)} in total.") - for class_label in tqdm(given_classes, desc="Classes"): - for n, bs in tqdm(enumerate(batches), desc="Sampling Class"): - if bs == 0: break - logs = sample_classconditional(model, batch_size=bs, class_label=class_label, - temperature=temperature, top_k=top_k, top_p=top_p) - save_from_logs(logs, logdir, base_count=n * batch_size, cond_key=logs["class_label"]) - else: - print(f"Running in unconditional sampling mode, producing {num_samples} samples.") - for n, bs in tqdm(enumerate(batches), desc="Sampling"): - if bs == 0: break - logs = sample_unconditional(model, batch_size=bs, temperature=temperature, top_k=top_k, top_p=top_p) - save_from_logs(logs, logdir, base_count=n * batch_size) - - -def save_from_logs(logs, logdir, base_count, key="samples", cond_key=None): - xx = logs[key] - for i, x in enumerate(xx): - x = chw_to_pillow(x) - count = base_count + i - if cond_key is None: - x.save(os.path.join(logdir, f"{count:06}.png")) - else: - condlabel = cond_key[i] - if type(condlabel) == torch.Tensor: condlabel = condlabel.item() - os.makedirs(os.path.join(logdir, str(condlabel)), exist_ok=True) - x.save(os.path.join(logdir, str(condlabel), f"{count:06}.png")) - - -def get_parser(): - def str2bool(v): - if isinstance(v, bool): - return v - if v.lower() in ("yes", "true", "t", "y", "1"): - return True - elif v.lower() in ("no", "false", "f", "n", "0"): - return False - else: - raise argparse.ArgumentTypeError("Boolean value expected.") - - parser = argparse.ArgumentParser() - parser.add_argument( - "-r", - "--resume", - type=str, - nargs="?", - help="load from logdir or checkpoint in logdir", - ) - parser.add_argument( - "-o", - "--outdir", - type=str, - nargs="?", - help="path where the samples will be logged to.", - default="" - ) - parser.add_argument( - "-b", - "--base", - nargs="*", - metavar="base_config.yaml", - help="paths to base configs. Loaded from left-to-right. " - "Parameters can be overwritten or added with command-line options of the form `--key value`.", - default=list(), - ) - parser.add_argument( - "-n", - "--num_samples", - type=int, - nargs="?", - help="num_samples to draw", - default=50000 - ) - parser.add_argument( - "--batch_size", - type=int, - nargs="?", - help="the batch size", - default=25 - ) - parser.add_argument( - "-k", - "--top_k", - type=int, - nargs="?", - help="top-k value to sample with", - default=250, - ) - parser.add_argument( - "-t", - "--temperature", - type=float, - nargs="?", - help="temperature value to sample with", - default=1.0 - ) - parser.add_argument( - "-p", - "--top_p", - type=float, - nargs="?", - help="top-p value to sample with", - default=1.0 - ) - parser.add_argument( - "--classes", - type=str, - nargs="?", - help="specify comma-separated classes to sample from. Uses 1000 classes per default.", - default="imagenet" - ) - return parser - - -def load_model_from_config(config, sd, gpu=True, eval_mode=True): - model = instantiate_from_config(config) - if sd is not None: - model.load_state_dict(sd) - if gpu: - model.cuda() - if eval_mode: - model.eval() - return {"model": model} - - -def load_model(config, ckpt, gpu, eval_mode): - # load the specified checkpoint - if ckpt: - pl_sd = torch.load(ckpt, map_location="cpu") - global_step = pl_sd["global_step"] - print(f"loaded model from global step {global_step}.") - else: - pl_sd = {"state_dict": None} - global_step = None - model = load_model_from_config(config.model, pl_sd["state_dict"], gpu=gpu, eval_mode=eval_mode)["model"] - return model, global_step - - -if __name__ == "__main__": - sys.path.append(os.getcwd()) - parser = get_parser() - - opt, unknown = parser.parse_known_args() - assert opt.resume - - ckpt = None - - if not os.path.exists(opt.resume): - raise ValueError("Cannot find {}".format(opt.resume)) - if os.path.isfile(opt.resume): - paths = opt.resume.split("/") - try: - idx = len(paths)-paths[::-1].index("logs")+1 - except ValueError: - idx = -2 # take a guess: path/to/logdir/checkpoints/model.ckpt - logdir = "/".join(paths[:idx]) - ckpt = opt.resume - else: - assert os.path.isdir(opt.resume), opt.resume - logdir = opt.resume.rstrip("/") - ckpt = os.path.join(logdir, "checkpoints", "last.ckpt") - - base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*-project.yaml"))) - opt.base = base_configs+opt.base - - configs = [OmegaConf.load(cfg) for cfg in opt.base] - cli = OmegaConf.from_dotlist(unknown) - config = OmegaConf.merge(*configs, cli) - - model, global_step = load_model(config, ckpt, gpu=True, eval_mode=True) - - if opt.outdir: - print(f"Switching logdir from '{logdir}' to '{opt.outdir}'") - logdir = opt.outdir - - if opt.classes == "imagenet": - given_classes = [i for i in range(1000)] - else: - cls_str = opt.classes - assert not cls_str.endswith(","), 'class string should not end with a ","' - given_classes = [int(c) for c in cls_str.split(",")] - - logdir = os.path.join(logdir, "samples", f"top_k_{opt.top_k}_temp_{opt.temperature:.2f}_top_p_{opt.top_p}", - f"{global_step}") - - print(f"Logging to {logdir}") - os.makedirs(logdir, exist_ok=True) - - run(logdir, model, opt.batch_size, opt.temperature, opt.top_k, unconditional=model.be_unconditional, - given_classes=given_classes, num_samples=opt.num_samples, top_p=opt.top_p) - - print("done.") diff --git a/spaces/cymic/Waifu_Diffusion_Webui/modules/codeformer_model.py b/spaces/cymic/Waifu_Diffusion_Webui/modules/codeformer_model.py deleted file mode 100644 index 1709e5fe50eafe6dee20e9e4456b44c0a5fb1150..0000000000000000000000000000000000000000 --- a/spaces/cymic/Waifu_Diffusion_Webui/modules/codeformer_model.py +++ /dev/null @@ -1,140 +0,0 @@ -import os -import sys -import traceback - -import cv2 -import torch - -import modules.face_restoration -import modules.shared -from modules import shared, devices, modelloader -from modules.paths import script_path, models_path - -# codeformer people made a choice to include modified basicsr library to their project which makes -# it utterly impossible to use it alongside with other libraries that also use basicsr, like GFPGAN. -# I am making a choice to include some files from codeformer to work around this issue. -model_dir = "Codeformer" -model_path = os.path.join(models_path, model_dir) -model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth' - -have_codeformer = False -codeformer = None - - -def setup_model(dirname): - global model_path - if not os.path.exists(model_path): - os.makedirs(model_path) - - path = modules.paths.paths.get("CodeFormer", None) - if path is None: - return - - try: - from torchvision.transforms.functional import normalize - from modules.codeformer.codeformer_arch import CodeFormer - from basicsr.utils.download_util import load_file_from_url - from basicsr.utils import imwrite, img2tensor, tensor2img - from facelib.utils.face_restoration_helper import FaceRestoreHelper - from modules.shared import cmd_opts - - net_class = CodeFormer - - class FaceRestorerCodeFormer(modules.face_restoration.FaceRestoration): - def name(self): - return "CodeFormer" - - def __init__(self, dirname): - self.net = None - self.face_helper = None - self.cmd_dir = dirname - - def create_models(self): - - if self.net is not None and self.face_helper is not None: - self.net.to(devices.device_codeformer) - return self.net, self.face_helper - model_paths = modelloader.load_models(model_path, model_url, self.cmd_dir, download_name='codeformer-v0.1.0.pth') - if len(model_paths) != 0: - ckpt_path = model_paths[0] - else: - print("Unable to load codeformer model.") - return None, None - net = net_class(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9, connect_list=['32', '64', '128', '256']).to(devices.device_codeformer) - checkpoint = torch.load(ckpt_path)['params_ema'] - net.load_state_dict(checkpoint) - net.eval() - - face_helper = FaceRestoreHelper(1, face_size=512, crop_ratio=(1, 1), det_model='retinaface_resnet50', save_ext='png', use_parse=True, device=devices.device_codeformer) - - self.net = net - self.face_helper = face_helper - - return net, face_helper - - def send_model_to(self, device): - self.net.to(device) - self.face_helper.face_det.to(device) - self.face_helper.face_parse.to(device) - - def restore(self, np_image, w=None): - np_image = np_image[:, :, ::-1] - - original_resolution = np_image.shape[0:2] - - self.create_models() - if self.net is None or self.face_helper is None: - return np_image - - self.send_model_to(devices.device_codeformer) - - self.face_helper.clean_all() - self.face_helper.read_image(np_image) - self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5) - self.face_helper.align_warp_face() - - for idx, cropped_face in enumerate(self.face_helper.cropped_faces): - cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True) - normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True) - cropped_face_t = cropped_face_t.unsqueeze(0).to(devices.device_codeformer) - - try: - with torch.no_grad(): - output = self.net(cropped_face_t, w=w if w is not None else shared.opts.code_former_weight, adain=True)[0] - restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1)) - del output - torch.cuda.empty_cache() - except Exception as error: - print(f'\tFailed inference for CodeFormer: {error}', file=sys.stderr) - restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1)) - - restored_face = restored_face.astype('uint8') - self.face_helper.add_restored_face(restored_face) - - self.face_helper.get_inverse_affine(None) - - restored_img = self.face_helper.paste_faces_to_input_image() - restored_img = restored_img[:, :, ::-1] - - if original_resolution != restored_img.shape[0:2]: - restored_img = cv2.resize(restored_img, (0, 0), fx=original_resolution[1]/restored_img.shape[1], fy=original_resolution[0]/restored_img.shape[0], interpolation=cv2.INTER_LINEAR) - - self.face_helper.clean_all() - - if shared.opts.face_restoration_unload: - self.send_model_to(devices.cpu) - - return restored_img - - global have_codeformer - have_codeformer = True - - global codeformer - codeformer = FaceRestorerCodeFormer(dirname) - shared.face_restorers.append(codeformer) - - except Exception: - print("Error setting up CodeFormer:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - - # sys.path = stored_sys_path diff --git a/spaces/d3vindia/RAPODIS/app.py b/spaces/d3vindia/RAPODIS/app.py deleted file mode 100644 index 021abae4ba3a116c108ad9540fa62a319471f854..0000000000000000000000000000000000000000 --- a/spaces/d3vindia/RAPODIS/app.py +++ /dev/null @@ -1,104 +0,0 @@ -import gradio as gr -import cv2 -import requests -import os - -from ultralytics import YOLO - -file_urls = [ - 'https://www.dropbox.com/s/b5g97xo901zb3ds/pothole_example.jpg?dl=1', - 'https://www.dropbox.com/s/86uxlxxlm1iaexa/pothole_screenshot.png?dl=1', - 'https://www.dropbox.com/s/7sjfwncffg8xej2/video_7.mp4?dl=1' -] - -def download_file(url, save_name): - url = url - if not os.path.exists(save_name): - file = requests.get(url) - open(save_name, 'wb').write(file.content) - -for i, url in enumerate(file_urls): - if 'mp4' in file_urls[i]: - download_file( - file_urls[i], - f"video.mp4" - ) - else: - download_file( - file_urls[i], - f"image_{i}.jpg" - ) - -model = YOLO('best.pt') -path = [['image_0.jpg'], ['image_1.jpg']] -video_path = [['video.mp4']] - -def show_preds_image(image_path): - image = cv2.imread(image_path) - outputs = model.predict(source=image_path) - results = outputs[0].cpu().numpy() - for i, det in enumerate(results.boxes.xyxy): - cv2.rectangle( - image, - (int(det[0]), int(det[1])), - (int(det[2]), int(det[3])), - color=(0, 0, 255), - thickness=2, - lineType=cv2.LINE_AA - ) - return cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - -inputs_image = [ - gr.components.Image(type="filepath", label="Input Image"), -] -outputs_image = [ - gr.components.Image(type="numpy", label="Output Image"), -] -interface_image = gr.Interface( - fn=show_preds_image, - inputs=inputs_image, - outputs=outputs_image, - title="Pothole detector app", - examples=path, - cache_examples=False, -) - -def show_preds_video(video_path): - cap = cv2.VideoCapture(video_path) - while(cap.isOpened()): - ret, frame = cap.read() - if ret: - frame_copy = frame.copy() - outputs = model.predict(source=frame) - results = outputs[0].cpu().numpy() - for i, det in enumerate(results.boxes.xyxy): - cv2.rectangle( - frame_copy, - (int(det[0]), int(det[1])), - (int(det[2]), int(det[3])), - color=(0, 0, 255), - thickness=2, - lineType=cv2.LINE_AA - ) - yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB) - -inputs_video = [ - gr.components.Video(type="filepath", label="Input Video"), - -] -outputs_video = [ - gr.components.Image(type="numpy", label="Output Video"), -] -interface_video = gr.Interface( - fn=show_preds_video, - inputs=inputs_video, - outputs=outputs_video, - title="Pothole detector", - examples=video_path, - cache_examples=False, -) - -gr.TabbedInterface( - [interface_image, interface_video], - tab_names=['Image inference', 'Video inference'] -).queue().launch() \ No newline at end of file diff --git a/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/face3d/models/arcface_torch/backbones/iresnet2060.py b/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/face3d/models/arcface_torch/backbones/iresnet2060.py deleted file mode 100644 index 21d1122144d207637d2444cba1f68fe630c89f31..0000000000000000000000000000000000000000 --- a/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/face3d/models/arcface_torch/backbones/iresnet2060.py +++ /dev/null @@ -1,176 +0,0 @@ -import torch -from torch import nn - -assert torch.__version__ >= "1.8.1" -from torch.utils.checkpoint import checkpoint_sequential - -__all__ = ['iresnet2060'] - - -def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): - """3x3 convolution with padding""" - return nn.Conv2d(in_planes, - out_planes, - kernel_size=3, - stride=stride, - padding=dilation, - groups=groups, - bias=False, - dilation=dilation) - - -def conv1x1(in_planes, out_planes, stride=1): - """1x1 convolution""" - return nn.Conv2d(in_planes, - out_planes, - kernel_size=1, - stride=stride, - bias=False) - - -class IBasicBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None, - groups=1, base_width=64, dilation=1): - super(IBasicBlock, self).__init__() - if groups != 1 or base_width != 64: - raise ValueError('BasicBlock only supports groups=1 and base_width=64') - if dilation > 1: - raise NotImplementedError("Dilation > 1 not supported in BasicBlock") - self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05, ) - self.conv1 = conv3x3(inplanes, planes) - self.bn2 = nn.BatchNorm2d(planes, eps=1e-05, ) - self.prelu = nn.PReLU(planes) - self.conv2 = conv3x3(planes, planes, stride) - self.bn3 = nn.BatchNorm2d(planes, eps=1e-05, ) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - identity = x - out = self.bn1(x) - out = self.conv1(out) - out = self.bn2(out) - out = self.prelu(out) - out = self.conv2(out) - out = self.bn3(out) - if self.downsample is not None: - identity = self.downsample(x) - out += identity - return out - - -class IResNet(nn.Module): - fc_scale = 7 * 7 - - def __init__(self, - block, layers, dropout=0, num_features=512, zero_init_residual=False, - groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False): - super(IResNet, self).__init__() - self.fp16 = fp16 - self.inplanes = 64 - self.dilation = 1 - if replace_stride_with_dilation is None: - replace_stride_with_dilation = [False, False, False] - if len(replace_stride_with_dilation) != 3: - raise ValueError("replace_stride_with_dilation should be None " - "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) - self.groups = groups - self.base_width = width_per_group - self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) - self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05) - self.prelu = nn.PReLU(self.inplanes) - self.layer1 = self._make_layer(block, 64, layers[0], stride=2) - self.layer2 = self._make_layer(block, - 128, - layers[1], - stride=2, - dilate=replace_stride_with_dilation[0]) - self.layer3 = self._make_layer(block, - 256, - layers[2], - stride=2, - dilate=replace_stride_with_dilation[1]) - self.layer4 = self._make_layer(block, - 512, - layers[3], - stride=2, - dilate=replace_stride_with_dilation[2]) - self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05, ) - self.dropout = nn.Dropout(p=dropout, inplace=True) - self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features) - self.features = nn.BatchNorm1d(num_features, eps=1e-05) - nn.init.constant_(self.features.weight, 1.0) - self.features.weight.requires_grad = False - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.normal_(m.weight, 0, 0.1) - elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - if zero_init_residual: - for m in self.modules(): - if isinstance(m, IBasicBlock): - nn.init.constant_(m.bn2.weight, 0) - - def _make_layer(self, block, planes, blocks, stride=1, dilate=False): - downsample = None - previous_dilation = self.dilation - if dilate: - self.dilation *= stride - stride = 1 - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - conv1x1(self.inplanes, planes * block.expansion, stride), - nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ), - ) - layers = [] - layers.append( - block(self.inplanes, planes, stride, downsample, self.groups, - self.base_width, previous_dilation)) - self.inplanes = planes * block.expansion - for _ in range(1, blocks): - layers.append( - block(self.inplanes, - planes, - groups=self.groups, - base_width=self.base_width, - dilation=self.dilation)) - - return nn.Sequential(*layers) - - def checkpoint(self, func, num_seg, x): - if self.training: - return checkpoint_sequential(func, num_seg, x) - else: - return func(x) - - def forward(self, x): - with torch.cuda.amp.autocast(self.fp16): - x = self.conv1(x) - x = self.bn1(x) - x = self.prelu(x) - x = self.layer1(x) - x = self.checkpoint(self.layer2, 20, x) - x = self.checkpoint(self.layer3, 100, x) - x = self.layer4(x) - x = self.bn2(x) - x = torch.flatten(x, 1) - x = self.dropout(x) - x = self.fc(x.float() if self.fp16 else x) - x = self.features(x) - return x - - -def _iresnet(arch, block, layers, pretrained, progress, **kwargs): - model = IResNet(block, layers, **kwargs) - if pretrained: - raise ValueError() - return model - - -def iresnet2060(pretrained=False, progress=True, **kwargs): - return _iresnet('iresnet2060', IBasicBlock, [3, 128, 1024 - 128, 3], pretrained, progress, **kwargs) diff --git a/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/face3d/models/arcface_torch/configs/__init__.py b/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/face3d/models/arcface_torch/configs/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/dajuzi/img-to-music/app.py b/spaces/dajuzi/img-to-music/app.py deleted file mode 100644 index a325b27b8177f9bca294439724ec16c2da2f0169..0000000000000000000000000000000000000000 --- a/spaces/dajuzi/img-to-music/app.py +++ /dev/null @@ -1,163 +0,0 @@ -import time -import base64 -import gradio as gr -from sentence_transformers import SentenceTransformer - -import httpx -import json - -import os -import requests -import urllib - -from os import path -from pydub import AudioSegment - -#img_to_text = gr.Blocks.load(name="spaces/pharma/CLIP-Interrogator") -img_to_text = gr.Blocks.load(name="spaces/fffiloni/CLIP-Interrogator-2") - -from share_btn import community_icon_html, loading_icon_html, share_js - -def get_prompts(uploaded_image, track_duration, gen_intensity, gen_mode): - print("calling clip interrogator") - #prompt = img_to_text(uploaded_image, "ViT-L (best for Stable Diffusion 1.*)", "fast", fn_index=1)[0] - prompt = img_to_text(uploaded_image, 'fast', 4, fn_index=1)[0] - print(prompt) - music_result = generate_track_by_prompt(prompt, track_duration, gen_intensity, gen_mode) - print(music_result) - return music_result[0], gr.update(visible=True), gr.update(visible=True), gr.update(visible=True) - -from utils import get_tags_for_prompts, get_mubert_tags_embeddings, get_pat - -minilm = SentenceTransformer('all-MiniLM-L6-v2') -mubert_tags_embeddings = get_mubert_tags_embeddings(minilm) - - -def get_track_by_tags(tags, pat, duration, gen_intensity, gen_mode, maxit=20): - - r = httpx.post('https://api-b2b.mubert.com/v2/RecordTrackTTM', - json={ - "method": "RecordTrackTTM", - "params": { - "pat": pat, - "duration": duration, - "format": "wav", - "intensity":gen_intensity, - "tags": tags, - "mode": gen_mode - } - }) - - rdata = json.loads(r.text) - assert rdata['status'] == 1, rdata['error']['text'] - trackurl = rdata['data']['tasks'][0]['download_link'] - - print('Generating track ', end='') - for i in range(maxit): - r = httpx.get(trackurl) - if r.status_code == 200: - return trackurl - time.sleep(1) - - -def generate_track_by_prompt(prompt, duration, gen_intensity, gen_mode): - try: - pat = get_pat("prodia@prodia.com") - _, tags = get_tags_for_prompts(minilm, mubert_tags_embeddings, [prompt, ])[0] - result = get_track_by_tags(tags, pat, int(duration), gen_intensity, gen_mode) - print(result) - return result, ",".join(tags), "Success" - except Exception as e: - return None, "", str(e) - -def convert_mp3_to_wav(mp3_filepath): - - url = mp3_filepath - save_as = "file.mp3" - - data = urllib.request.urlopen(url) - - f = open(save_as,'wb') - f.write(data.read()) - f.close() - - wave_file="file.wav" - - sound = AudioSegment.from_mp3(save_as) - sound.export(wave_file, format="wav") - - return wave_file - -article = """ - - - -
    -

    You may also like:

    -
    - - - - - - - - - - -
    -
    - - -""" - -with gr.Blocks(css="style.css") as demo: - with gr.Column(elem_id="col-container"): - - gr.HTML("""
    -
    -

    - Image to Music -

    -
    -

    - Sends an image in to CLIP Interrogator - to generate a text prompt which is then run through - Mubert text-to-music to generate music from the input image! -

    -
    """) - - input_img = gr.Image(type="filepath", elem_id="input-img") - music_output = gr.Audio(label="Result", type="filepath", elem_id="music-output").style(height="5rem") - - with gr.Group(elem_id="share-btn-container"): - community_icon = gr.HTML(community_icon_html, visible=False) - loading_icon = gr.HTML(loading_icon_html, visible=False) - share_button = gr.Button("Share to community", elem_id="share-btn", visible=False) - - with gr.Accordion(label="Music Generation Options", open=False): - track_duration = gr.Slider(minimum=20, maximum=120, value=30, step=5, label="Track duration", elem_id="duration-inp") - with gr.Row(): - gen_intensity = gr.Dropdown(choices=["low", "medium", "high"], value="medium", label="Intensity") - gen_mode = gr.Radio(label="mode", choices=["track", "loop"], value="track") - - generate = gr.Button("Generate Music from Image") - - gr.HTML(article) - - generate.click(get_prompts, inputs=[input_img,track_duration,gen_intensity,gen_mode], outputs=[music_output, share_button, community_icon, loading_icon], api_name="i2m") - share_button.click(None, [], [], _js=share_js) - -demo.queue(max_size=32, concurrency_count=20).launch() \ No newline at end of file diff --git a/spaces/datainsight1/Medical_Prescriptions/app.py b/spaces/datainsight1/Medical_Prescriptions/app.py deleted file mode 100644 index 9ede631def2e71052c371e09954f189f3ffa88dd..0000000000000000000000000000000000000000 --- a/spaces/datainsight1/Medical_Prescriptions/app.py +++ /dev/null @@ -1,60 +0,0 @@ - -import re -import gradio as gr - -import torch -from transformers import DonutProcessor, VisionEncoderDecoderModel - -#processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-cord-v2") -#model = VisionEncoderDecoderModel.from_pretrained("naver-clova-ix/donut-base-finetuned-cord-v2") -#processor = DonutProcessor.from_pretrained("Iqra56/ENGLISHDONUT") -#model = VisionEncoderDecoderModel.from_pretrained("Iqra56/ENGLISHDONUT") -processor = DonutProcessor.from_pretrained("Iqra56/DONUTWOKEYS") -model = VisionEncoderDecoderModel.from_pretrained("Iqra56/DONUTWOKEYS") -device = "cuda" if torch.cuda.is_available() else "cpu" -model.to(device) - -def process_document(image): - # prepare encoder inputs - pixel_values = processor(image, return_tensors="pt").pixel_values - - # prepare decoder inputs - task_prompt = "" - decoder_input_ids = processor.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt").input_ids - - # generate answer - outputs = model.generate( - pixel_values.to(device), - decoder_input_ids=decoder_input_ids.to(device), - max_length=model.decoder.config.max_position_embeddings, - early_stopping=True, - pad_token_id=processor.tokenizer.pad_token_id, - eos_token_id=processor.tokenizer.eos_token_id, - use_cache=True, - num_beams=1, - bad_words_ids=[[processor.tokenizer.unk_token_id]], - return_dict_in_generate=True, - ) - - # postprocess - sequence = processor.batch_decode(outputs.sequences)[0] - sequence = sequence.replace(processor.tokenizer.eos_token, "").replace(processor.tokenizer.pad_token, "") - sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token - - return processor.token2json(sequence) - -description = "Gradio Demo for Donut, an instance of `VisionEncoderDecoderModel` fine-tuned on CORD (document parsing). To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below." -article = "

    Donut: OCR-free Document Understanding Transformer | Github Repo

    " - -demo = gr.Interface( - fn=process_document, - inputs="image", - outputs="json", - title="Demo: Donut 🍩 for Document Parsing", - description=description, - article=article, - enable_queue=True, - examples=[["Binder1_Page_48_Image_0001.png"], ["SKMBT_75122072616550_Page_50_Image_0001.png"]], - cache_examples=False) - -demo.launch() diff --git a/spaces/dbredvick/whisper-webui/app-local.py b/spaces/dbredvick/whisper-webui/app-local.py deleted file mode 100644 index d8eabbc62924dab3d0cc03a8a2373ffffe01eadc..0000000000000000000000000000000000000000 --- a/spaces/dbredvick/whisper-webui/app-local.py +++ /dev/null @@ -1,3 +0,0 @@ -# Run the app with no audio file restrictions -from app import create_ui -create_ui(-1) \ No newline at end of file diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/pens/areaPen.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/pens/areaPen.py deleted file mode 100644 index 004bb06b091ceb777cca2c02f8481a2785a46d35..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/pens/areaPen.py +++ /dev/null @@ -1,52 +0,0 @@ -"""Calculate the area of a glyph.""" - -from fontTools.pens.basePen import BasePen - - -__all__ = ["AreaPen"] - - -class AreaPen(BasePen): - def __init__(self, glyphset=None): - BasePen.__init__(self, glyphset) - self.value = 0 - - def _moveTo(self, p0): - self._p0 = self._startPoint = p0 - - def _lineTo(self, p1): - x0, y0 = self._p0 - x1, y1 = p1 - self.value -= (x1 - x0) * (y1 + y0) * 0.5 - self._p0 = p1 - - def _qCurveToOne(self, p1, p2): - # https://github.com/Pomax/bezierinfo/issues/44 - p0 = self._p0 - x0, y0 = p0[0], p0[1] - x1, y1 = p1[0] - x0, p1[1] - y0 - x2, y2 = p2[0] - x0, p2[1] - y0 - self.value -= (x2 * y1 - x1 * y2) / 3 - self._lineTo(p2) - self._p0 = p2 - - def _curveToOne(self, p1, p2, p3): - # https://github.com/Pomax/bezierinfo/issues/44 - p0 = self._p0 - x0, y0 = p0[0], p0[1] - x1, y1 = p1[0] - x0, p1[1] - y0 - x2, y2 = p2[0] - x0, p2[1] - y0 - x3, y3 = p3[0] - x0, p3[1] - y0 - self.value -= (x1 * (-y2 - y3) + x2 * (y1 - 2 * y3) + x3 * (y1 + 2 * y2)) * 0.15 - self._lineTo(p3) - self._p0 = p3 - - def _closePath(self): - self._lineTo(self._startPoint) - del self._p0, self._startPoint - - def _endPath(self): - if self._p0 != self._startPoint: - # Area is not defined for open contours. - raise NotImplementedError - del self._p0, self._startPoint diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Button-0f9363c8.css b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Button-0f9363c8.css deleted file mode 100644 index 793ad00d7162ad414fc8ccc6df6d9a9b8799646e..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Button-0f9363c8.css +++ /dev/null @@ -1 +0,0 @@ -.block.svelte-90oupt{position:relative;margin:0;box-shadow:var(--block-shadow);border-width:var(--block-border-width);border-color:var(--block-border-color);border-radius:var(--block-radius);background:var(--block-background-fill);width:100%;line-height:var(--line-sm)}.block.border_focus.svelte-90oupt{border-color:var(--color-accent)}.padded.svelte-90oupt{padding:var(--block-padding)}.hidden.svelte-90oupt{display:none}.hide-container.svelte-90oupt{margin:0;box-shadow:none;--block-border-width:0;background:transparent;padding:0;overflow:visible}div.svelte-e8n7p6{margin-bottom:var(--spacing-lg);color:var(--block-info-text-color);font-weight:var(--block-info-text-weight);font-size:var(--block-info-text-size);line-height:var(--line-sm)}span.has-info.svelte-1gfkn6j{margin-bottom:var(--spacing-xs)}span.svelte-1gfkn6j:not(.has-info){margin-bottom:var(--spacing-lg)}span.svelte-1gfkn6j{display:inline-block;position:relative;z-index:var(--layer-4);border:solid var(--block-title-border-width) var(--block-title-border-color);border-radius:var(--block-title-radius);background:var(--block-title-background-fill);padding:var(--block-title-padding);color:var(--block-title-text-color);font-weight:var(--block-title-text-weight);font-size:var(--block-title-text-size);line-height:var(--line-sm)}.hide.svelte-1gfkn6j{margin:0;height:0}div.svelte-1mwvhlq{display:inline-flex;align-items:center;z-index:var(--layer-2);box-shadow:var(--block-label-shadow);border:var(--block-label-border-width) solid var(--border-color-primary);border-top:none;border-left:none;border-radius:var(--block-label-radius);background:var(--block-label-background-fill);padding:var(--block-label-padding);pointer-events:none;color:var(--block-label-text-color);font-weight:var(--block-label-text-weight);font-size:var(--block-label-text-size);line-height:var(--line-sm)}.gr-group div.svelte-1mwvhlq{border-top-left-radius:0}div.float.svelte-1mwvhlq{position:absolute;top:var(--block-label-margin);left:var(--block-label-margin)}div.svelte-1mwvhlq:not(.float){position:static;margin-top:var(--block-label-margin);margin-left:var(--block-label-margin)}.hide.svelte-1mwvhlq{height:0}span.svelte-1mwvhlq{opacity:.8;margin-right:var(--size-2);width:calc(var(--block-label-text-size) - 1px);height:calc(var(--block-label-text-size) - 1px)}.hide-label.svelte-1mwvhlq{box-shadow:none;border-width:0;background:transparent;overflow:visible}button.svelte-1030q2h{display:flex;justify-content:center;align-items:center;gap:1px;z-index:var(--layer-1);box-shadow:var(--shadow-drop);border:1px solid var(--button-secondary-border-color);border-radius:var(--radius-sm);background:var(--background-fill-primary);padding:2px;color:var(--block-label-text-color)}button.svelte-1030q2h:hover{cursor:pointer;border:2px solid var(--button-secondary-border-color-hover);padding:1px;color:var(--block-label-text-color)}span.svelte-1030q2h{padding:0 1px;font-size:10px}div.svelte-1030q2h{padding:2px;width:14px;height:14px}.pending.svelte-1030q2h{animation:svelte-1030q2h-flash .5s infinite}@keyframes svelte-1030q2h-flash{0%{opacity:.5}50%{opacity:1}to{opacity:.5}}.empty.svelte-lk9eg8{display:flex;justify-content:center;align-items:center;margin-top:calc(0px - var(--size-6));height:var(--size-full)}.icon.svelte-lk9eg8{opacity:.5;height:var(--size-5);color:var(--body-text-color)}.small.svelte-lk9eg8{min-height:calc(var(--size-32) - 20px)}.large.svelte-lk9eg8{min-height:calc(var(--size-64) - 20px)}.unpadded_box.svelte-lk9eg8{margin-top:0}.small_parent.svelte-lk9eg8{min-height:100%!important}.wrap.svelte-1ck5uk8{display:flex;flex-direction:column;justify-content:center;min-height:var(--size-60);color:var(--block-label-text-color);line-height:var(--line-md)}.or.svelte-1ck5uk8{color:var(--body-text-color-subdued)}@media (min-width: 768px){.wrap.svelte-1ck5uk8{font-size:var(--text-lg)}}button.svelte-cmf5ev,a.svelte-cmf5ev{display:inline-flex;justify-content:center;align-items:center;transition:var(--button-transition);box-shadow:var(--button-shadow);padding:var(--size-0-5) var(--size-2);text-align:center}button.svelte-cmf5ev:hover,button[disabled].svelte-cmf5ev,a.svelte-cmf5ev:hover,a.disabled.svelte-cmf5ev{box-shadow:var(--button-shadow-hover)}button.svelte-cmf5ev:active,a.svelte-cmf5ev:active{box-shadow:var(--button-shadow-active)}button[disabled].svelte-cmf5ev,a.disabled.svelte-cmf5ev{opacity:.5;filter:grayscale(30%);cursor:not-allowed}.hidden.svelte-cmf5ev{display:none}.primary.svelte-cmf5ev{border:var(--button-border-width) solid var(--button-primary-border-color);background:var(--button-primary-background-fill);color:var(--button-primary-text-color)}.primary.svelte-cmf5ev:hover,.primary[disabled].svelte-cmf5ev{border-color:var(--button-primary-border-color-hover);background:var(--button-primary-background-fill-hover);color:var(--button-primary-text-color-hover)}.secondary.svelte-cmf5ev{border:var(--button-border-width) solid var(--button-secondary-border-color);background:var(--button-secondary-background-fill);color:var(--button-secondary-text-color)}.secondary.svelte-cmf5ev:hover,.secondary[disabled].svelte-cmf5ev{border-color:var(--button-secondary-border-color-hover);background:var(--button-secondary-background-fill-hover);color:var(--button-secondary-text-color-hover)}.stop.svelte-cmf5ev{border:var(--button-border-width) solid var(--button-cancel-border-color);background:var(--button-cancel-background-fill);color:var(--button-cancel-text-color)}.stop.svelte-cmf5ev:hover,.stop[disabled].svelte-cmf5ev{border-color:var(--button-cancel-border-color-hover);background:var(--button-cancel-background-fill-hover);color:var(--button-cancel-text-color-hover)}.sm.svelte-cmf5ev{border-radius:var(--button-small-radius);padding:var(--button-small-padding);font-weight:var(--button-small-text-weight);font-size:var(--button-small-text-size)}.lg.svelte-cmf5ev{border-radius:var(--button-large-radius);padding:var(--button-large-padding);font-weight:var(--button-large-text-weight);font-size:var(--button-large-text-size)}.button-icon.svelte-cmf5ev{width:var(--text-xl);height:var(--text-xl);margin-right:var(--spacing-xl)} diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/h11/_version.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/h11/_version.py deleted file mode 100644 index 4c8911305680c1083b2da9b87ece12bc36f3a9e1..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/h11/_version.py +++ /dev/null @@ -1,16 +0,0 @@ -# This file must be kept very simple, because it is consumed from several -# places -- it is imported by h11/__init__.py, execfile'd by setup.py, etc. - -# We use a simple scheme: -# 1.0.0 -> 1.0.0+dev -> 1.1.0 -> 1.1.0+dev -# where the +dev versions are never released into the wild, they're just what -# we stick into the VCS in between releases. -# -# This is compatible with PEP 440: -# http://legacy.python.org/dev/peps/pep-0440/ -# via the use of the "local suffix" "+dev", which is disallowed on index -# servers and causes 1.0.0+dev to sort after plain 1.0.0, which is what we -# want. (Contrast with the special suffix 1.0.0.dev, which sorts *before* -# 1.0.0.) - -__version__ = "0.14.0" diff --git a/spaces/dcq/freegpt-webui/g4f/Provider/Providers/Phind.py b/spaces/dcq/freegpt-webui/g4f/Provider/Providers/Phind.py deleted file mode 100644 index 9fa8ec821f701d7841432e498a11ac9dd017978c..0000000000000000000000000000000000000000 --- a/spaces/dcq/freegpt-webui/g4f/Provider/Providers/Phind.py +++ /dev/null @@ -1,36 +0,0 @@ -import os -import json -import time -import subprocess - -from ...typing import sha256, Dict, get_type_hints - -url = 'https://phind.com' -model = ['gpt-4'] -supports_stream = True - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - - path = os.path.dirname(os.path.realpath(__file__)) - config = json.dumps({ - 'model': model, - 'messages': messages}, separators=(',', ':')) - - cmd = ['python', f'{path}/helpers/phind.py', config] - - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - - for line in iter(p.stdout.readline, b''): - if b'Just a moment...' in line: - os.system('clear' if os.name == 'posix' else 'cls') - yield 'Clouflare error, please try again...' - os._exit(0) - - else: - if b'ping - 2023-' in line: - continue - - yield line.decode('cp1251') #[:-1] - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/declare-lab/tango/diffusers/tests/pipelines/dance_diffusion/test_dance_diffusion.py b/spaces/declare-lab/tango/diffusers/tests/pipelines/dance_diffusion/test_dance_diffusion.py deleted file mode 100644 index bbd4aa694b769a0903c505383d9634de8ebd4063..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/tests/pipelines/dance_diffusion/test_dance_diffusion.py +++ /dev/null @@ -1,160 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import unittest - -import numpy as np -import torch - -from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel -from diffusers.utils import slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu, skip_mps - -from ...pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS -from ...test_pipelines_common import PipelineTesterMixin - - -torch.backends.cuda.matmul.allow_tf32 = False - - -class DanceDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = DanceDiffusionPipeline - params = UNCONDITIONAL_AUDIO_GENERATION_PARAMS - required_optional_params = PipelineTesterMixin.required_optional_params - { - "callback", - "latents", - "callback_steps", - "output_type", - "num_images_per_prompt", - } - batch_params = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS - test_attention_slicing = False - test_cpu_offload = False - - def get_dummy_components(self): - torch.manual_seed(0) - unet = UNet1DModel( - block_out_channels=(32, 32, 64), - extra_in_channels=16, - sample_size=512, - sample_rate=16_000, - in_channels=2, - out_channels=2, - flip_sin_to_cos=True, - use_timestep_embedding=False, - time_embedding_type="fourier", - mid_block_type="UNetMidBlock1D", - down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), - up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), - ) - scheduler = IPNDMScheduler() - - components = { - "unet": unet, - "scheduler": scheduler, - } - return components - - def get_dummy_inputs(self, device, seed=0): - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - inputs = { - "batch_size": 1, - "generator": generator, - "num_inference_steps": 4, - } - return inputs - - def test_dance_diffusion(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - pipe = DanceDiffusionPipeline(**components) - pipe = pipe.to(device) - pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - output = pipe(**inputs) - audio = output.audios - - audio_slice = audio[0, -3:, -3:] - - assert audio.shape == (1, 2, components["unet"].sample_size) - expected_slice = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000]) - assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 - - @skip_mps - def test_save_load_local(self): - return super().test_save_load_local() - - @skip_mps - def test_dict_tuple_outputs_equivalent(self): - return super().test_dict_tuple_outputs_equivalent() - - @skip_mps - def test_save_load_optional_components(self): - return super().test_save_load_optional_components() - - @skip_mps - def test_attention_slicing_forward_pass(self): - return super().test_attention_slicing_forward_pass() - - -@slow -@require_torch_gpu -class PipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_dance_diffusion(self): - device = torch_device - - pipe = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k") - pipe = pipe.to(device) - pipe.set_progress_bar_config(disable=None) - - generator = torch.manual_seed(0) - output = pipe(generator=generator, num_inference_steps=100, audio_length_in_s=4.096) - audio = output.audios - - audio_slice = audio[0, -3:, -3:] - - assert audio.shape == (1, 2, pipe.unet.sample_size) - expected_slice = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020]) - - assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 - - def test_dance_diffusion_fp16(self): - device = torch_device - - pipe = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k", torch_dtype=torch.float16) - pipe = pipe.to(device) - pipe.set_progress_bar_config(disable=None) - - generator = torch.manual_seed(0) - output = pipe(generator=generator, num_inference_steps=100, audio_length_in_s=4.096) - audio = output.audios - - audio_slice = audio[0, -3:, -3:] - - assert audio.shape == (1, 2, pipe.unet.sample_size) - expected_slice = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341]) - - assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/options/train_options.py b/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/options/train_options.py deleted file mode 100644 index 1337bfdd5f372b5c686a91b394a2aadbe5741f44..0000000000000000000000000000000000000000 --- a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/options/train_options.py +++ /dev/null @@ -1,53 +0,0 @@ -"""This script contains the training options for Deep3DFaceRecon_pytorch -""" - -from .base_options import BaseOptions -from util import util - -class TrainOptions(BaseOptions): - """This class includes training options. - - It also includes shared options defined in BaseOptions. - """ - - def initialize(self, parser): - parser = BaseOptions.initialize(self, parser) - # dataset parameters - # for train - parser.add_argument('--data_root', type=str, default='./', help='dataset root') - parser.add_argument('--flist', type=str, default='datalist/train/masks.txt', help='list of mask names of training set') - parser.add_argument('--batch_size', type=int, default=32) - parser.add_argument('--dataset_mode', type=str, default='flist', help='chooses how datasets are loaded. [None | flist]') - parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') - parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data') - parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') - parser.add_argument('--preprocess', type=str, default='shift_scale_rot_flip', help='scaling and cropping of images at load time [shift_scale_rot_flip | shift_scale | shift | shift_rot_flip ]') - parser.add_argument('--use_aug', type=util.str2bool, nargs='?', const=True, default=True, help='whether use data augmentation') - - # for val - parser.add_argument('--flist_val', type=str, default='datalist/val/masks.txt', help='list of mask names of val set') - parser.add_argument('--batch_size_val', type=int, default=32) - - - # visualization parameters - parser.add_argument('--display_freq', type=int, default=1000, help='frequency of showing training results on screen') - parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') - - # network saving and loading parameters - parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results') - parser.add_argument('--save_epoch_freq', type=int, default=1, help='frequency of saving checkpoints at the end of epochs') - parser.add_argument('--evaluation_freq', type=int, default=5000, help='evaluation freq') - parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration') - parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') - parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by , +, ...') - parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') - parser.add_argument('--pretrained_name', type=str, default=None, help='resume training from another checkpoint') - - # training parameters - parser.add_argument('--n_epochs', type=int, default=20, help='number of epochs with the initial learning rate') - parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate for adam') - parser.add_argument('--lr_policy', type=str, default='step', help='learning rate policy. [linear | step | plateau | cosine]') - parser.add_argument('--lr_decay_epochs', type=int, default=10, help='multiply by a gamma every lr_decay_epochs epoches') - - self.isTrain = True - return parser diff --git a/spaces/deven367/yt-video-annotator-hf/setup.py b/spaces/deven367/yt-video-annotator-hf/setup.py deleted file mode 100644 index be41f3b8ba62e2b78170808654d4cf92328e190d..0000000000000000000000000000000000000000 --- a/spaces/deven367/yt-video-annotator-hf/setup.py +++ /dev/null @@ -1,54 +0,0 @@ -from pkg_resources import parse_version -from configparser import ConfigParser -import setuptools -assert parse_version(setuptools.__version__)>=parse_version('36.2') - -# note: all settings are in settings.ini; edit there, not here -config = ConfigParser(delimiters=['=']) -config.read('settings.ini') -cfg = config['DEFAULT'] - -cfg_keys = 'version description keywords author author_email'.split() -expected = cfg_keys + "lib_name user branch license status min_python audience language".split() -for o in expected: assert o in cfg, "missing expected setting: {}".format(o) -setup_cfg = {o:cfg[o] for o in cfg_keys} - -licenses = { - 'apache2': ('Apache Software License 2.0','OSI Approved :: Apache Software License'), - 'mit': ('MIT License', 'OSI Approved :: MIT License'), - 'gpl2': ('GNU General Public License v2', 'OSI Approved :: GNU General Public License v2 (GPLv2)'), - 'gpl3': ('GNU General Public License v3', 'OSI Approved :: GNU General Public License v3 (GPLv3)'), - 'bsd3': ('BSD License', 'OSI Approved :: BSD License'), -} -statuses = [ '1 - Planning', '2 - Pre-Alpha', '3 - Alpha', - '4 - Beta', '5 - Production/Stable', '6 - Mature', '7 - Inactive' ] -py_versions = '3.6 3.7 3.8 3.9 3.10'.split() - -requirements = cfg.get('requirements','').split() -if cfg.get('pip_requirements'): requirements += cfg.get('pip_requirements','').split() -min_python = cfg['min_python'] -lic = licenses.get(cfg['license'].lower(), (cfg['license'], None)) -dev_requirements = (cfg.get('dev_requirements') or '').split() - -setuptools.setup( - name = cfg['lib_name'], - license = lic[0], - classifiers = [ - 'Development Status :: ' + statuses[int(cfg['status'])], - 'Intended Audience :: ' + cfg['audience'].title(), - 'Natural Language :: ' + cfg['language'].title(), - ] + ['Programming Language :: Python :: '+o for o in py_versions[py_versions.index(min_python):]] + (['License :: ' + lic[1] ] if lic[1] else []), - url = cfg['git_url'], - packages = setuptools.find_packages(), - include_package_data = True, - install_requires = requirements, - extras_require={ 'dev': dev_requirements }, - dependency_links = cfg.get('dep_links','').split(), - python_requires = '>=' + cfg['min_python'], - long_description = open('README.md').read(), - long_description_content_type = 'text/markdown', - zip_safe = False, - entry_points = { - 'console_scripts': cfg.get('console_scripts','').split(), - }, - **setup_cfg) diff --git a/spaces/diacanFperku/AutoGPT/((LINK)) Download Keygen Xforce For Maya 2011 ((LINK)) Download.md b/spaces/diacanFperku/AutoGPT/((LINK)) Download Keygen Xforce For Maya 2011 ((LINK)) Download.md deleted file mode 100644 index 2de17b930d4cbdf64aedea7bbbb8b03135c09da0..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/((LINK)) Download Keygen Xforce For Maya 2011 ((LINK)) Download.md +++ /dev/null @@ -1,12 +0,0 @@ -
    -

    if you can not find the file you want, just send a message and we will send the file to you. download autodesk autocad 2010 free keygen x32 bits for windows 32bit. if you can not find the file you want, just send a message and we will send the file to you.

    -

    autodesk maya 2012 xforce keygen 64 bit, autodesk maya 2012 keygen xforce, autodesk maya xforce keygen 2010. autodesk maya xforce keygen 2013, autodesk maya xforce keygen 2011, autodesk maya xforce keygen 2017, autodesk maya xforce keygen 2015,.

    -

    download keygen xforce for Maya 2011 download


    DOWNLOADhttps://gohhs.com/2uFVlN



    -

    autodesk maya xforce keygen 2013, autodesk maya xforce keygen 2011, autodesk maya xforce keygen 2017, autodesk maya xforce keygen 2015,. autodesk maya xforce keygen 2013, autodesk maya xforce keygen 2011, autodesk maya xforce keygen 2017, autodesk maya xforce keygen 2015,.

    -

    xforce keygen maya 2011, xforce keygen autodesk maya 2011 download, autodesk xforce keygen autodesk maya 2011 download, xforce keygen. autodesk xforce keygen autodesk maya 2011, autodesk xforce keygen autodesk maya 2011 download, xforce keygen.

    -

    download autodesk maya 2008 x32 bit (free) released. autodesk inventor 2010 crack. autocad 2017 xforce keygen download, autocad 2017 xforce keygen generator free download,. thanks to: ruba james for sharing these key for autodesk 2017.

    -

    this download is not available in your country? autodesk vray 1.3.8.0 for autodesk maya 2011 autodesk maya 2011 xforce keygen download. autocad 2017 xforce keygen download, autocad 2017 xforce keygen, autocad 2017 xforce keygen download, autocad 2017 xforce keygen download.

    -

    for download xforce keygen for autodesk maya 2011 autodesk maya 2011 xforce keygen, download autodesk xforce keygen for maya 2011 autodesk maya 2011, download autodesk xforce keygen for maya 2011 autodesk maya 2011, download autodesk xforce keygen for maya 2011 autodesk maya 2011.

    -

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/ChessBase Fritz Trainer MONSTER DVD Collection FritzTrainer Chess SDVL Videoless.md b/spaces/diacanFperku/AutoGPT/ChessBase Fritz Trainer MONSTER DVD Collection FritzTrainer Chess SDVL Videoless.md deleted file mode 100644 index 03e6000fa63411964435de549afd66140d780e66..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/ChessBase Fritz Trainer MONSTER DVD Collection FritzTrainer Chess SDVL Videoless.md +++ /dev/null @@ -1,6 +0,0 @@ -

    ChessBase Fritz Trainer MONSTER DVD Collection FritzTrainer Chess SDVL Videoless


    Download Zip 🗸🗸🗸 https://gohhs.com/2uFVsl



    -
    -Chess pieces with cubes power point slides and ppt diagram templates · Chess Plastics ... Fritz Trainer MONSTER DVD Collection (FritzTrainer Chess) SDVL Videoless ... CHESSBASE PLAYCHESS ГЕРМАНИЯ пълен списък с резултати на ... 1fdad05405
    -
    -
    -

    diff --git a/spaces/diacanFperku/AutoGPT/District 13 Ultimatum 2009 Dvdrip-english Dubbed Torrent.md b/spaces/diacanFperku/AutoGPT/District 13 Ultimatum 2009 Dvdrip-english Dubbed Torrent.md deleted file mode 100644 index 7aff2a5a13d84d037e984e17a47e9d5d0b92ef03..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/District 13 Ultimatum 2009 Dvdrip-english Dubbed Torrent.md +++ /dev/null @@ -1,6 +0,0 @@ -

    District 13 Ultimatum 2009 Dvdrip-english Dubbed Torrent


    DOWNLOADhttps://gohhs.com/2uFSXe



    - - d5da3c52bf
    -
    -
    -

    diff --git a/spaces/diacanFperku/AutoGPT/Express VPN 7.5.5 Crack 2020.md b/spaces/diacanFperku/AutoGPT/Express VPN 7.5.5 Crack 2020.md deleted file mode 100644 index 258af3b17eb3dc359ddfbb205c34b5368866d082..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Express VPN 7.5.5 Crack 2020.md +++ /dev/null @@ -1,58 +0,0 @@ -

    Express VPN 7.5.5 Crack 2020


    Download Zip ✸✸✸ https://gohhs.com/2uFUu3



    -
    -Redistribution and use in source and binary forms, with or without - - modification, are permitted provided that the following conditions - - are met: - - 1. Redistributions of source code must retain the above copyright - - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - - notice, this list of conditions and the following disclaimer in - - the documentation and/or other materials provided with the - - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS - - IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - - TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - - PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - - HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED - - TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - - LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -*/ - -/* - - * prototypes for libmd. - - * - md_init(const char *, int) - - * - md_open(const char *, int) - - * - md_write(const char *, int, const unsigned char *, int) - - * - md_read(char *, int) 4fefd39f24
    -
    -
    -

    diff --git a/spaces/diacanFperku/AutoGPT/Friends With Benefits 1080p Yify Torrent [HOT].md b/spaces/diacanFperku/AutoGPT/Friends With Benefits 1080p Yify Torrent [HOT].md deleted file mode 100644 index 85cccf645bd45b40a358eeb099b2cecb3cf6fd84..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Friends With Benefits 1080p Yify Torrent [HOT].md +++ /dev/null @@ -1,6 +0,0 @@ -

    friends with benefits 1080p yify torrent


    Download · https://gohhs.com/2uFTPW



    - - 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/dineshreddy/WALT/configs/_base_/datasets/walt_vehicle.py b/spaces/dineshreddy/WALT/configs/_base_/datasets/walt_vehicle.py deleted file mode 100644 index 466fa524d0f43b8684a01abe57188501787db8a4..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/configs/_base_/datasets/walt_vehicle.py +++ /dev/null @@ -1,49 +0,0 @@ -dataset_type = 'WaltDataset' -data_root = 'data/cwalt_train/' -data_root_test = 'data/cwalt_test/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=5, - workers_per_gpu=5, - train=dict( - type=dataset_type, - ann_file=data_root + '/', - img_prefix=data_root + '/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root_test + '/', - img_prefix=data_root_test + '/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root_test + '/', - img_prefix=data_root_test + '/', - pipeline=test_pipeline)) -evaluation = dict(metric=['bbox', 'segm']) diff --git a/spaces/dineshreddy/WALT/mmdet/models/dense_heads/anchor_free_head.py b/spaces/dineshreddy/WALT/mmdet/models/dense_heads/anchor_free_head.py deleted file mode 100644 index 1814a0cc4f577f470f74f025440073a0aaa1ebd0..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/mmdet/models/dense_heads/anchor_free_head.py +++ /dev/null @@ -1,340 +0,0 @@ -from abc import abstractmethod - -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init -from mmcv.runner import force_fp32 - -from mmdet.core import multi_apply -from ..builder import HEADS, build_loss -from .base_dense_head import BaseDenseHead -from .dense_test_mixins import BBoxTestMixin - - -@HEADS.register_module() -class AnchorFreeHead(BaseDenseHead, BBoxTestMixin): - """Anchor-free head (FCOS, Fovea, RepPoints, etc.). - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - feat_channels (int): Number of hidden channels. Used in child classes. - stacked_convs (int): Number of stacking convs of the head. - strides (tuple): Downsample factor of each feature map. - dcn_on_last_conv (bool): If true, use dcn in the last layer of - towers. Default: False. - conv_bias (bool | str): If specified as `auto`, it will be decided by - the norm_cfg. Bias of conv will be set as True if `norm_cfg` is - None, otherwise False. Default: "auto". - loss_cls (dict): Config of classification loss. - loss_bbox (dict): Config of localization loss. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Config dict for normalization layer. Default: None. - train_cfg (dict): Training config of anchor head. - test_cfg (dict): Testing config of anchor head. - """ # noqa: W605 - - _version = 1 - - def __init__(self, - num_classes, - in_channels, - feat_channels=256, - stacked_convs=4, - strides=(4, 8, 16, 32, 64), - dcn_on_last_conv=False, - conv_bias='auto', - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='IoULoss', loss_weight=1.0), - conv_cfg=None, - norm_cfg=None, - train_cfg=None, - test_cfg=None): - super(AnchorFreeHead, self).__init__() - self.num_classes = num_classes - self.cls_out_channels = num_classes - self.in_channels = in_channels - self.feat_channels = feat_channels - self.stacked_convs = stacked_convs - self.strides = strides - self.dcn_on_last_conv = dcn_on_last_conv - assert conv_bias == 'auto' or isinstance(conv_bias, bool) - self.conv_bias = conv_bias - self.loss_cls = build_loss(loss_cls) - self.loss_bbox = build_loss(loss_bbox) - self.train_cfg = train_cfg - self.test_cfg = test_cfg - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.fp16_enabled = False - - self._init_layers() - - def _init_layers(self): - """Initialize layers of the head.""" - self._init_cls_convs() - self._init_reg_convs() - self._init_predictor() - - def _init_cls_convs(self): - """Initialize classification conv layers of the head.""" - self.cls_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - if self.dcn_on_last_conv and i == self.stacked_convs - 1: - conv_cfg = dict(type='DCNv2') - else: - conv_cfg = self.conv_cfg - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=self.norm_cfg, - bias=self.conv_bias)) - - def _init_reg_convs(self): - """Initialize bbox regression conv layers of the head.""" - self.reg_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - if self.dcn_on_last_conv and i == self.stacked_convs - 1: - conv_cfg = dict(type='DCNv2') - else: - conv_cfg = self.conv_cfg - self.reg_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=self.norm_cfg, - bias=self.conv_bias)) - - def _init_predictor(self): - """Initialize predictor layers of the head.""" - self.conv_cls = nn.Conv2d( - self.feat_channels, self.cls_out_channels, 3, padding=1) - self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) - - def init_weights(self): - """Initialize weights of the head.""" - for m in self.cls_convs: - if isinstance(m.conv, nn.Conv2d): - normal_init(m.conv, std=0.01) - for m in self.reg_convs: - if isinstance(m.conv, nn.Conv2d): - normal_init(m.conv, std=0.01) - bias_cls = bias_init_with_prob(0.01) - normal_init(self.conv_cls, std=0.01, bias=bias_cls) - normal_init(self.conv_reg, std=0.01) - - def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs): - """Hack some keys of the model state dict so that can load checkpoints - of previous version.""" - version = local_metadata.get('version', None) - if version is None: - # the key is different in early versions - # for example, 'fcos_cls' become 'conv_cls' now - bbox_head_keys = [ - k for k in state_dict.keys() if k.startswith(prefix) - ] - ori_predictor_keys = [] - new_predictor_keys = [] - # e.g. 'fcos_cls' or 'fcos_reg' - for key in bbox_head_keys: - ori_predictor_keys.append(key) - key = key.split('.') - conv_name = None - if key[1].endswith('cls'): - conv_name = 'conv_cls' - elif key[1].endswith('reg'): - conv_name = 'conv_reg' - elif key[1].endswith('centerness'): - conv_name = 'conv_centerness' - else: - assert NotImplementedError - if conv_name is not None: - key[1] = conv_name - new_predictor_keys.append('.'.join(key)) - else: - ori_predictor_keys.pop(-1) - for i in range(len(new_predictor_keys)): - state_dict[new_predictor_keys[i]] = state_dict.pop( - ori_predictor_keys[i]) - super()._load_from_state_dict(state_dict, prefix, local_metadata, - strict, missing_keys, unexpected_keys, - error_msgs) - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: Usually contain classification scores and bbox predictions. - cls_scores (list[Tensor]): Box scores for each scale level, - each is a 4D-tensor, the channel number is - num_points * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level, each is a 4D-tensor, the channel number is - num_points * 4. - """ - return multi_apply(self.forward_single, feats)[:2] - - def forward_single(self, x): - """Forward features of a single scale level. - - Args: - x (Tensor): FPN feature maps of the specified stride. - - Returns: - tuple: Scores for each class, bbox predictions, features - after classification and regression conv layers, some - models needs these features like FCOS. - """ - cls_feat = x - reg_feat = x - - for cls_layer in self.cls_convs: - cls_feat = cls_layer(cls_feat) - cls_score = self.conv_cls(cls_feat) - - for reg_layer in self.reg_convs: - reg_feat = reg_layer(reg_feat) - bbox_pred = self.conv_reg(reg_feat) - return cls_score, bbox_pred, cls_feat, reg_feat - - @abstractmethod - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute loss of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level, - each is a 4D-tensor, the channel number is - num_points * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level, each is a 4D-tensor, the channel number is - num_points * 4. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - """ - - raise NotImplementedError - - @abstractmethod - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def get_bboxes(self, - cls_scores, - bbox_preds, - img_metas, - cfg=None, - rescale=None): - """Transform network output for a batch into bbox predictions. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_points * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_points * 4, H, W) - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used - rescale (bool): If True, return boxes in original image space - """ - - raise NotImplementedError - - @abstractmethod - def get_targets(self, points, gt_bboxes_list, gt_labels_list): - """Compute regression, classification and centerness targets for points - in multiple images. - - Args: - points (list[Tensor]): Points of each fpn level, each has shape - (num_points, 2). - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, - each has shape (num_gt, 4). - gt_labels_list (list[Tensor]): Ground truth labels of each box, - each has shape (num_gt,). - """ - raise NotImplementedError - - def _get_points_single(self, - featmap_size, - stride, - dtype, - device, - flatten=False): - """Get points of a single scale level.""" - h, w = featmap_size - x_range = torch.arange(w, dtype=dtype, device=device) - y_range = torch.arange(h, dtype=dtype, device=device) - y, x = torch.meshgrid(y_range, x_range) - if flatten: - y = y.flatten() - x = x.flatten() - return y, x - - def get_points(self, featmap_sizes, dtype, device, flatten=False): - """Get points according to feature map sizes. - - Args: - featmap_sizes (list[tuple]): Multi-level feature map sizes. - dtype (torch.dtype): Type of points. - device (torch.device): Device of points. - - Returns: - tuple: points of each image. - """ - mlvl_points = [] - for i in range(len(featmap_sizes)): - mlvl_points.append( - self._get_points_single(featmap_sizes[i], self.strides[i], - dtype, device, flatten)) - return mlvl_points - - def aug_test(self, feats, img_metas, rescale=False): - """Test function with test time augmentation. - - Args: - feats (list[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains features for all images in the batch. - img_metas (list[list[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. each dict has image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[ndarray]: bbox results of each class - """ - return self.aug_test_bboxes(feats, img_metas, rescale=rescale) diff --git a/spaces/dineshreddy/WALT/mmdet/models/losses/focal_loss.py b/spaces/dineshreddy/WALT/mmdet/models/losses/focal_loss.py deleted file mode 100644 index 493907c6984d532175e0351daf2eafe4b9ff0256..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/mmdet/models/losses/focal_loss.py +++ /dev/null @@ -1,181 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss - -from ..builder import LOSSES -from .utils import weight_reduce_loss - - -# This method is only for debugging -def py_sigmoid_focal_loss(pred, - target, - weight=None, - gamma=2.0, - alpha=0.25, - reduction='mean', - avg_factor=None): - """PyTorch version of `Focal Loss `_. - - Args: - pred (torch.Tensor): The prediction with shape (N, C), C is the - number of classes - target (torch.Tensor): The learning label of the prediction. - weight (torch.Tensor, optional): Sample-wise loss weight. - gamma (float, optional): The gamma for calculating the modulating - factor. Defaults to 2.0. - alpha (float, optional): A balanced form for Focal Loss. - Defaults to 0.25. - reduction (str, optional): The method used to reduce the loss into - a scalar. Defaults to 'mean'. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - """ - pred_sigmoid = pred.sigmoid() - target = target.type_as(pred) - pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) - focal_weight = (alpha * target + (1 - alpha) * - (1 - target)) * pt.pow(gamma) - loss = F.binary_cross_entropy_with_logits( - pred, target, reduction='none') * focal_weight - if weight is not None: - if weight.shape != loss.shape: - if weight.size(0) == loss.size(0): - # For most cases, weight is of shape (num_priors, ), - # which means it does not have the second axis num_class - weight = weight.view(-1, 1) - else: - # Sometimes, weight per anchor per class is also needed. e.g. - # in FSAF. But it may be flattened of shape - # (num_priors x num_class, ), while loss is still of shape - # (num_priors, num_class). - assert weight.numel() == loss.numel() - weight = weight.view(loss.size(0), -1) - assert weight.ndim == loss.ndim - loss = weight_reduce_loss(loss, weight, reduction, avg_factor) - return loss - - -def sigmoid_focal_loss(pred, - target, - weight=None, - gamma=2.0, - alpha=0.25, - reduction='mean', - avg_factor=None): - r"""A warpper of cuda version `Focal Loss - `_. - - Args: - pred (torch.Tensor): The prediction with shape (N, C), C is the number - of classes. - target (torch.Tensor): The learning label of the prediction. - weight (torch.Tensor, optional): Sample-wise loss weight. - gamma (float, optional): The gamma for calculating the modulating - factor. Defaults to 2.0. - alpha (float, optional): A balanced form for Focal Loss. - Defaults to 0.25. - reduction (str, optional): The method used to reduce the loss into - a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - """ - # Function.apply does not accept keyword arguments, so the decorator - # "weighted_loss" is not applicable - loss = _sigmoid_focal_loss(pred.contiguous(), target, gamma, alpha, None, - 'none') - if weight is not None: - if weight.shape != loss.shape: - if weight.size(0) == loss.size(0): - # For most cases, weight is of shape (num_priors, ), - # which means it does not have the second axis num_class - weight = weight.view(-1, 1) - else: - # Sometimes, weight per anchor per class is also needed. e.g. - # in FSAF. But it may be flattened of shape - # (num_priors x num_class, ), while loss is still of shape - # (num_priors, num_class). - assert weight.numel() == loss.numel() - weight = weight.view(loss.size(0), -1) - assert weight.ndim == loss.ndim - loss = weight_reduce_loss(loss, weight, reduction, avg_factor) - return loss - - -@LOSSES.register_module() -class FocalLoss(nn.Module): - - def __init__(self, - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - reduction='mean', - loss_weight=1.0): - """`Focal Loss `_ - - Args: - use_sigmoid (bool, optional): Whether to the prediction is - used for sigmoid or softmax. Defaults to True. - gamma (float, optional): The gamma for calculating the modulating - factor. Defaults to 2.0. - alpha (float, optional): A balanced form for Focal Loss. - Defaults to 0.25. - reduction (str, optional): The method used to reduce the loss into - a scalar. Defaults to 'mean'. Options are "none", "mean" and - "sum". - loss_weight (float, optional): Weight of loss. Defaults to 1.0. - """ - super(FocalLoss, self).__init__() - assert use_sigmoid is True, 'Only sigmoid focal loss supported now.' - self.use_sigmoid = use_sigmoid - self.gamma = gamma - self.alpha = alpha - self.reduction = reduction - self.loss_weight = loss_weight - - def forward(self, - pred, - target, - weight=None, - avg_factor=None, - reduction_override=None): - """Forward function. - - Args: - pred (torch.Tensor): The prediction. - target (torch.Tensor): The learning label of the prediction. - weight (torch.Tensor, optional): The weight of loss for each - prediction. Defaults to None. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Options are "none", "mean" and "sum". - - Returns: - torch.Tensor: The calculated loss - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - if self.use_sigmoid: - if torch.cuda.is_available() and pred.is_cuda: - calculate_loss_func = sigmoid_focal_loss - else: - num_classes = pred.size(1) - target = F.one_hot(target, num_classes=num_classes + 1) - target = target[:, :num_classes] - calculate_loss_func = py_sigmoid_focal_loss - - loss_cls = self.loss_weight * calculate_loss_func( - pred, - target, - weight, - gamma=self.gamma, - alpha=self.alpha, - reduction=reduction, - avg_factor=avg_factor) - - else: - raise NotImplementedError - return loss_cls diff --git a/spaces/dlmn/BHASHAVANI/app.py b/spaces/dlmn/BHASHAVANI/app.py deleted file mode 100644 index d02101b2f9543e3635334b3ccb5dd7ab315ca27b..0000000000000000000000000000000000000000 --- a/spaces/dlmn/BHASHAVANI/app.py +++ /dev/null @@ -1,145 +0,0 @@ -import torch - -import gradio as gr -import yt_dlp as youtube_dl -from transformers import pipeline -from transformers.pipelines.audio_utils import ffmpeg_read - -import tempfile -import os - -MODEL_NAME = "openai/whisper-large-v2" -BATCH_SIZE = 8 -FILE_LIMIT_MB = 1000 -YT_LENGTH_LIMIT_S = 3600 # limit to 1 hour YouTube files - -device = 0 if torch.cuda.is_available() else "cpu" - -pipe = pipeline( - task="automatic-speech-recognition", - model=MODEL_NAME, - chunk_length_s=30, - device=device, -) - - -def transcribe(inputs, task): - if inputs is None: - raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.") - - text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"] - return text - - -def _return_yt_html_embed(yt_url): - video_id = yt_url.split("?v=")[-1] - HTML_str = ( - f'
    ' - "
    " - ) - return HTML_str - -def download_yt_audio(yt_url, filename): - info_loader = youtube_dl.YoutubeDL() - - try: - info = info_loader.extract_info(yt_url, download=False) - except youtube_dl.utils.DownloadError as err: - raise gr.Error(str(err)) - - file_length = info["duration_string"] - file_h_m_s = file_length.split(":") - file_h_m_s = [int(sub_length) for sub_length in file_h_m_s] - - if len(file_h_m_s) == 1: - file_h_m_s.insert(0, 0) - if len(file_h_m_s) == 2: - file_h_m_s.insert(0, 0) - file_length_s = file_h_m_s[0] * 3600 + file_h_m_s[1] * 60 + file_h_m_s[2] - - if file_length_s > YT_LENGTH_LIMIT_S: - yt_length_limit_hms = time.strftime("%HH:%MM:%SS", time.gmtime(YT_LENGTH_LIMIT_S)) - file_length_hms = time.strftime("%HH:%MM:%SS", time.gmtime(file_length_s)) - raise gr.Error(f"Maximum YouTube length is {yt_length_limit_hms}, got {file_length_hms} YouTube video.") - - ydl_opts = {"outtmpl": filename, "format": "worstvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best"} - - with youtube_dl.YoutubeDL(ydl_opts) as ydl: - try: - ydl.download([yt_url]) - except youtube_dl.utils.ExtractorError as err: - raise gr.Error(str(err)) - - -def yt_transcribe(yt_url, task, max_filesize=75.0): - html_embed_str = _return_yt_html_embed(yt_url) - - with tempfile.TemporaryDirectory() as tmpdirname: - filepath = os.path.join(tmpdirname, "video.mp4") - download_yt_audio(yt_url, filepath) - with open(filepath, "rb") as f: - inputs = f.read() - - inputs = ffmpeg_read(inputs, pipe.feature_extractor.sampling_rate) - inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate} - - text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"] - - return html_embed_str, text - - -demo = gr.Blocks() - -mf_transcribe = gr.Interface( - fn=transcribe, - inputs=[ - gr.inputs.Audio(source="microphone", type="filepath", optional=True), - gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"), - ], - outputs="text", - layout="horizontal", - theme="huggingface", - title="BHASHAVANI: Transcribe Audio 🎙️", - description=( - "Transcribe long-form microphone or audio inputs with the click of a button! ." - ), - allow_flagging="never", -) - -file_transcribe = gr.Interface( - fn=transcribe, - inputs=[ - gr.inputs.Audio(source="upload", type="filepath", optional=True, label="Audio file"), - gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"), - ], - outputs="text", - layout="horizontal", - theme="huggingface", - title="BHASHAVANI: Transcribe Audio 🎧", - description=( - "Transcribe long-form microphone or audio inputs with the click of a button! 🤗 " - ), - allow_flagging="never", -) - -yt_transcribe = gr.Interface( - fn=yt_transcribe, - inputs=[ - gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"), - gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe") - ], - outputs=["html", "text"], - layout="horizontal", - theme="huggingface", - title="BHASHAVANI: Transcribe YouTube", - description=( - "Transcribe long-form YouTube videos with the click of a button🤗 " - ), - allow_flagging="never", -) - -with demo: - gr.TabbedInterface([mf_transcribe, file_transcribe, yt_transcribe], ["Microphone", "Audio file", "YouTube"]) - -demo.launch(enable_queue=True) - diff --git a/spaces/duycse1603/math2tex/ScanSSD/layers/functions/__init__.py b/spaces/duycse1603/math2tex/ScanSSD/layers/functions/__init__.py deleted file mode 100644 index 79555601e6d891e58393af1464d338d5ed786b72..0000000000000000000000000000000000000000 --- a/spaces/duycse1603/math2tex/ScanSSD/layers/functions/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .detection import Detect -from .prior_box import PriorBox - - -__all__ = ['Detect', 'PriorBox'] diff --git a/spaces/eIysia/VITS-Umamusume-voice-synthesizer/mel_processing.py b/spaces/eIysia/VITS-Umamusume-voice-synthesizer/mel_processing.py deleted file mode 100644 index 3e252e76320522a8a4195a60665168f22769aec2..0000000000000000000000000000000000000000 --- a/spaces/eIysia/VITS-Umamusume-voice-synthesizer/mel_processing.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/ennet/ChatDev/online_log/static/js/main.js b/spaces/ennet/ChatDev/online_log/static/js/main.js deleted file mode 100644 index 3776dae87524b5fea0ca2f2e5d40b2bf3e3cd0ca..0000000000000000000000000000000000000000 --- a/spaces/ennet/ChatDev/online_log/static/js/main.js +++ /dev/null @@ -1,111 +0,0 @@ -function scrollToBottom() { - var scrollContainer = document.getElementById('chat-box'); - scrollContainer.scrollTop = scrollContainer.scrollHeight; -} - -function append_message(role, text, avatarUrl) { - - var message_container = $("
    ").addClass("message-container"); - var avatar_element = $("").addClass("avatar"); - var role_element = $("

    ").addClass("role").text(role); - - if (avatarUrl) { - avatar_element.css("background-image", `url(${avatarUrl})`); - } else { - avatar_element.css("background-color", "green"); - } - - message_container.append(role_element); - message_container.append(avatar_element); - - var parsedText = role === 'System' ? parseSystemMessage(text) : parseCodeBlocks(text, role); - - message_container.append(parsedText); - - $("#chat-box").append(message_container); - scrollToBottom(); -} - -function parseCodeBlocks(text, role) { - var parts = text.split(/(```[\s\S]*?```)/g); - var parsedText = $("
    ").addClass("message-text"); - parts.forEach(part => { - if (part.startsWith("```") && role != "System") { - var trimmedBlock = part.trim(); - var language = trimmedBlock.match(/^```(\w+)/); - if (language) { - language = language[1]; - var codeContent = trimmedBlock.replace(/^```(\w+)/, '').replace(/```$/, ''); - var codeBlockHTML = ` -
    -
    ${role} - ${language}
    -
    ${hljs.highlightAuto(codeContent, [language]).value}
    -
    - `; - parsedText.append(codeBlockHTML); - } - } else { - parsedText.append(marked(_.escape(part), {breaks: true})); - } - }); - return parsedText; -} - - -function get_new_messages() { - - $.getJSON("/get_messages", function (data) { - var lastDisplayedMessageIndex = $("#chat-box .message-container").length; - - for (var i = lastDisplayedMessageIndex; i < data.length; i++) { - var role = data[i].role; - var text = data[i].text; - var avatarUrl = data[i].avatarUrl; - - append_message(role, text, avatarUrl); - - } - }); -} - -function parseSystemMessage(text) { - var message = $("
    ").addClass("message-text").addClass("system-message"); - var firstLine = text.split('\n')[0]; - var collapsed = true; - - var messageContent = $("
    ").html(marked(firstLine, { breaks: true })).addClass("original-markdown"); - var originalMarkdown = $("
    ").html(marked(text, { breaks: true })).addClass("original-markdown"); - - var expandButton = $("") - .addClass("expand-button") - .text("Expand") - .click(function () { - if (collapsed) { - messageContent.hide(); - originalMarkdown.show(); - expandButton.text("Collapse"); - } else { - messageContent.show(); - originalMarkdown.hide(); - expandButton.text("Expand"); - } - collapsed = !collapsed; - }); - - message.append(messageContent); - message.append(originalMarkdown); - message.append(expandButton); - - originalMarkdown.hide(); - - return message; -} - - -$(document).ready(function () { - get_new_messages(); - setInterval(function () { - get_new_messages(); - }, 1000); -}); - diff --git a/spaces/erbanku/gpt-academic/request_llm/README.md b/spaces/erbanku/gpt-academic/request_llm/README.md deleted file mode 100644 index 4a912d10136dc99a3ebbe1e228e98f6ab63ad277..0000000000000000000000000000000000000000 --- a/spaces/erbanku/gpt-academic/request_llm/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# 如何使用其他大语言模型 - -## ChatGLM - -- 安装依赖 `pip install -r request_llm/requirements_chatglm.txt` -- 修改配置,在config.py中将LLM_MODEL的值改为"chatglm" - -``` sh -LLM_MODEL = "chatglm" -``` -- 运行! -``` sh -`python main.py` -``` - - ---- -## Text-Generation-UI (TGUI,调试中,暂不可用) - -### 1. 部署TGUI -``` sh -# 1 下载模型 -git clone https://github.com/oobabooga/text-generation-webui.git -# 2 这个仓库的最新代码有问题,回滚到几周之前 -git reset --hard fcda3f87767e642d1c0411776e549e1d3894843d -# 3 切换路径 -cd text-generation-webui -# 4 安装text-generation的额外依赖 -pip install accelerate bitsandbytes flexgen gradio llamacpp markdown numpy peft requests rwkv safetensors sentencepiece tqdm datasets git+https://github.com/huggingface/transformers -# 5 下载模型 -python download-model.py facebook/galactica-1.3b -# 其他可选如 facebook/opt-1.3b -# facebook/galactica-1.3b -# facebook/galactica-6.7b -# facebook/galactica-120b -# facebook/pygmalion-1.3b 等 -# 详情见 https://github.com/oobabooga/text-generation-webui - -# 6 启动text-generation -python server.py --cpu --listen --listen-port 7865 --model facebook_galactica-1.3b -``` - -### 2. 修改config.py - -``` sh -# LLM_MODEL格式: tgui:[模型]@[ws地址]:[ws端口] , 端口要和上面给定的端口一致 -LLM_MODEL = "tgui:galactica-1.3b@localhost:7860" -``` - -### 3. 运行! -``` sh -cd chatgpt-academic -python main.py -``` diff --git a/spaces/eruuin/something/index.html b/spaces/eruuin/something/index.html deleted file mode 100644 index 7f9ac35b8a58fbfe5495ce39195100c888819dfc..0000000000000000000000000000000000000000 --- a/spaces/eruuin/something/index.html +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - Eruuin's Space - - - -
    -

    Welcome to my Space!

    -

    You can modify this app directly by editing index.html in the Files and versions tab.

    -

    - Also don't forget to check the - Spaces documentation. -

    -
    - - diff --git a/spaces/eson/tokenizer-arena/vocab/gpt_neox_chinese_v1/to_v2/sort_test.py b/spaces/eson/tokenizer-arena/vocab/gpt_neox_chinese_v1/to_v2/sort_test.py deleted file mode 100644 index ce55787535c21cb174fd393e532c515f010389da..0000000000000000000000000000000000000000 --- a/spaces/eson/tokenizer-arena/vocab/gpt_neox_chinese_v1/to_v2/sort_test.py +++ /dev/null @@ -1,18 +0,0 @@ - - - -a = { - "aa", 1, -"aaa", 1, -"aaaa", 1, -"aaaaaa", 1, -"aaaaaaa", 1, - - "baa", 3, -"baaa", 2, -"baaaa", 2, -"baaaaaa", 2, -"baaaaaaa", 2, -} - -sorted(a.items(), key=lambda kv:(kv[1], )) \ No newline at end of file diff --git a/spaces/evaluate-metric/ter/ter.py b/spaces/evaluate-metric/ter/ter.py deleted file mode 100644 index 4adb9986cc4c5cd08fe03246d4dc8f54a31e0839..0000000000000000000000000000000000000000 --- a/spaces/evaluate-metric/ter/ter.py +++ /dev/null @@ -1,212 +0,0 @@ -# Copyright 2021 The HuggingFace Evaluate Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" TER metric as available in sacrebleu. """ -import datasets -import sacrebleu as scb -from packaging import version -from sacrebleu import TER - -import evaluate - - -_CITATION = """\ -@inproceedings{snover-etal-2006-study, - title = "A Study of Translation Edit Rate with Targeted Human Annotation", - author = "Snover, Matthew and - Dorr, Bonnie and - Schwartz, Rich and - Micciulla, Linnea and - Makhoul, John", - booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", - month = aug # " 8-12", - year = "2006", - address = "Cambridge, Massachusetts, USA", - publisher = "Association for Machine Translation in the Americas", - url = "https://aclanthology.org/2006.amta-papers.25", - pages = "223--231", -} -@inproceedings{post-2018-call, - title = "A Call for Clarity in Reporting {BLEU} Scores", - author = "Post, Matt", - booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", - month = oct, - year = "2018", - address = "Belgium, Brussels", - publisher = "Association for Computational Linguistics", - url = "https://www.aclweb.org/anthology/W18-6319", - pages = "186--191", -} -""" - -_DESCRIPTION = """\ -TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a -hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu -(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found -here: https://github.com/jhclark/tercom. - -The implementation here is slightly different from sacrebleu in terms of the required input format. The length of -the references and hypotheses lists need to be the same, so you may need to transpose your references compared to -sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 - -See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. -""" - -_KWARGS_DESCRIPTION = """ -Produces TER scores alongside the number of edits and reference length. - -Args: - predictions (list of str): The system stream (a sequence of segments). - references (list of list of str): A list of one or more reference streams (each a sequence of segments). - normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. - ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. - support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, - as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. - Only applies if `normalized = True`. Defaults to `False`. - case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. - -Returns: - 'score' (float): TER score (num_edits / sum_ref_lengths * 100) - 'num_edits' (int): The cumulative number of edits - 'ref_length' (float): The cumulative average reference length - -Examples: - Example 1: - >>> predictions = ["does this sentence match??", - ... "what about this sentence?", - ... "What did the TER metric user say to the developer?"] - >>> references = [["does this sentence match", "does this sentence match!?!"], - ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], - ... ["Your jokes are...", "...TERrible"]] - >>> ter = evaluate.load("ter") - >>> results = ter.compute(predictions=predictions, - ... references=references, - ... case_sensitive=True) - >>> print(results) - {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0} - - Example 2: - >>> predictions = ["does this sentence match??", - ... "what about this sentence?"] - >>> references = [["does this sentence match", "does this sentence match!?!"], - ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] - >>> ter = evaluate.load("ter") - >>> results = ter.compute(predictions=predictions, - ... references=references, - ... case_sensitive=True) - >>> print(results) - {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0} - - Example 3: - >>> predictions = ["does this sentence match??", - ... "what about this sentence?"] - >>> references = [["does this sentence match", "does this sentence match!?!"], - ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] - >>> ter = evaluate.load("ter") - >>> results = ter.compute(predictions=predictions, - ... references=references, - ... normalized=True, - ... case_sensitive=True) - >>> print(results) - {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5} - - Example 4: - >>> predictions = ["does this sentence match??", - ... "what about this sentence?"] - >>> references = [["does this sentence match", "does this sentence match!?!"], - ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] - >>> ter = evaluate.load("ter") - >>> results = ter.compute(predictions=predictions, - ... references=references, - ... ignore_punct=True, - ... case_sensitive=False) - >>> print(results) - {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0} - - Example 5: - >>> predictions = ["does this sentence match??", - ... "what about this sentence?", - ... "What did the TER metric user say to the developer?"] - >>> references = [["does this sentence match", "does this sentence match!?!"], - ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], - ... ["Your jokes are...", "...TERrible"]] - >>> ter = evaluate.load("ter") - >>> results = ter.compute(predictions=predictions, - ... references=references, - ... ignore_punct=True, - ... case_sensitive=False) - >>> print(results) - {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0} -""" - - -@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) -class Ter(evaluate.Metric): - def _info(self): - if version.parse(scb.__version__) < version.parse("1.4.12"): - raise ImportWarning( - "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" - 'You can install it with `pip install "sacrebleu>=1.4.12"`.' - ) - return evaluate.MetricInfo( - description=_DESCRIPTION, - citation=_CITATION, - homepage="http://www.cs.umd.edu/~snover/tercom/", - inputs_description=_KWARGS_DESCRIPTION, - features=[ - datasets.Features( - { - "predictions": datasets.Value("string", id="sequence"), - "references": datasets.Sequence(datasets.Value("string", id="sequence"), id="references"), - } - ), - datasets.Features( - { - "predictions": datasets.Value("string", id="sequence"), - "references": datasets.Value("string", id="sequence"), - } - ), - ], - codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"], - reference_urls=[ - "https://github.com/jhclark/tercom", - ], - ) - - def _compute( - self, - predictions, - references, - normalized: bool = False, - ignore_punct: bool = False, - support_zh_ja_chars: bool = False, - case_sensitive: bool = False, - ): - # if only one reference is provided make sure we still use list of lists - if isinstance(references[0], str): - references = [[ref] for ref in references] - - references_per_prediction = len(references[0]) - if any(len(refs) != references_per_prediction for refs in references): - raise ValueError("Sacrebleu requires the same number of references for each prediction") - transformed_references = [[refs[i] for refs in references] for i in range(references_per_prediction)] - - sb_ter = TER( - normalized=normalized, - no_punct=ignore_punct, - asian_support=support_zh_ja_chars, - case_sensitive=case_sensitive, - ) - output = sb_ter.corpus_score(predictions, transformed_references) - - return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length} diff --git a/spaces/falterWliame/Face_Mask_Detection/Oregon Trail 3rd Edition Torrent Download.md b/spaces/falterWliame/Face_Mask_Detection/Oregon Trail 3rd Edition Torrent Download.md deleted file mode 100644 index 07a2e026c9325ba002796c14d4655cd4287fe355..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Oregon Trail 3rd Edition Torrent Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

    oregon trail 3rd edition torrent download


    Downloadhttps://urlca.com/2uDcsf



    -
    -This is an educational and simulation game created with themes of ecology/nature, geography, history, hunting, sailing/boating, and westerns. Captures and shots. I want to be able to capture photos, such as landscape, on my bike, but I don't have a good camera or the right camera for it. So I decided to create a game that would allow me to create images using the camera on my mobile device. This tool would be used in apps like Facebook and Instagram, for example. It may have other purposes, but that would be my choice. 8a78ff9644
    -
    -
    -

    diff --git a/spaces/falterWliame/Face_Mask_Detection/Qcom Loader Download BEST.md b/spaces/falterWliame/Face_Mask_Detection/Qcom Loader Download BEST.md deleted file mode 100644 index f05a1b7e311f1027481500311867fd884dedfeca..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Qcom Loader Download BEST.md +++ /dev/null @@ -1,9 +0,0 @@ -

    qcom loader download


    DOWNLOADhttps://urlca.com/2uDcfH



    -
    -Use Qualcomm Flash Image Loader (QFIL) How to use Qualcomm flash image loader How to install QFIL flash tool and flash using QPST tools? QFIL and QPST are tools that allow you to flash firmware using Qualcomm's hardware tools. -QFIL is for flashing a smartphone, and QPST is for flashing a smartphone and a computer. -QFIL and QPST are programs that help you flash your smartphone or computer with Qualcomm hardware. -With their help, you can also backup your smartphone firmware, restore the firmware using Qualcomm hardware. 8a78ff9644
    -
    -
    -

    diff --git a/spaces/fatiXbelha/sd/Asphalt 8 Airborne 3.9.0j Mod APK - Everything You Need to Know Before Downloading.md b/spaces/fatiXbelha/sd/Asphalt 8 Airborne 3.9.0j Mod APK - Everything You Need to Know Before Downloading.md deleted file mode 100644 index d34962d248e5ca8cb1a899209bbbf6090d8ea224..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Asphalt 8 Airborne 3.9.0j Mod APK - Everything You Need to Know Before Downloading.md +++ /dev/null @@ -1,124 +0,0 @@ - -

    Asphalt 8: Airborne 3.9.0j Mod APK - A Racing Game That Defies Gravity

    -

    Introduction

    -

    If you are a fan of racing games, you must have heard of Asphalt 8: Airborne, one of the most popular and advanced racing games on Android. This game offers you the opportunity to drive hundreds of cars and bikes, perform high-speed aerial stunts, race on 75+ tracks, and compete with other players online or offline.

    -

    But what if you want to enjoy the game without any limitations or restrictions? What if you want to have unlimited money and tokens, free shopping and upgrades, and access to all cars and tracks? Well, that's where Asphalt 8: Airborne 3.9.0j Mod APK comes in handy.

    -

    asphalt 8 3.9 0j mod apk


    Download File === https://urllie.com/2uNxBY



    -

    What is Asphalt 8: Airborne?

    -

    Asphalt 8: Airborne is a racing game developed by Gameloft, a leading mobile game developer. It is part of the Asphalt franchise, which has been running since 2004. The game was released in 2013 and has received several updates and improvements since then.

    -

    Asphalt 8: Airborne is an arcade-style racing game that features realistic car models, stunning graphics, physics-based interactions, and various game modes. The game has been acclaimed by players and critics alike, winning several awards and accolades.

    -

    What is Asphalt 8: Airborne 3.9.0j Mod APK?

    -

    Asphalt 8: Airborne 3.9.0j Mod APK is a modified version of the original game that gives you some extra benefits and features that are not available in the official version. The mod APK file is created by third-party developers who modify the game code to unlock some features or add some cheats.

    -

    The mod APK file is not available on the Google Play Store or any other official source. You have to download it from a trusted website or link that provides the mod APK file for free. However, you should be careful when downloading mod APK files, as some of them may contain viruses or malware that can harm your device or steal your data.

    -

    Features of Asphalt 8: Airborne

    -

    300+ Official Speed Machines

    -

    Asphalt 8: Airborne features more than 300 licensed cars and motorbikes from top manufacturers like Ferrari, Lamborghini, McLaren, Bugatti, Mercedes, Audi, Ford, Chevrolet, and more. You can choose from a variety of vehicles, ranging from sports cars to muscle cars, from bikes to SUVs.

    -

    You can also customize and upgrade your rides with over 2,300 decals, paint jobs, rims, spoilers, engines, tires, and more. You can make your vehicles look unique and perform better on the tracks.

    -

    Stunning Graphics and Physics-Based Interactions

    -

    Asphalt 8: Airborne boasts of impressive graphics that make the game look realistic and immersive. The game uses a new physics engine that allows for dynamic interactions between the vehicles, environments, and tracks.

    -

    You can see the details of your car models, the reflections of the sun and the shadows on the road, the sparks and smoke from collisions and crashes, the debris and dust flying around

    the tracks, and the weather effects like rain, snow, fog, and sandstorms. You can also experience the realistic sound effects of the engines, brakes, horns, and crashes.

    -

    asphalt 8 airborne 3.9 0j unlimited money mod apk
    -asphalt 8 hack mod apk download 3.9 0j
    -asphalt 8 mod apk latest version 3.9 0j
    -asphalt 8 mod apk android 1 3.9 0j
    -asphalt 8 mod apk revdl 3.9 0j
    -asphalt 8 mod apk rexdl 3.9 0j
    -asphalt 8 mod apk offline 3.9 0j
    -asphalt 8 mod apk free shopping 3.9 0j
    -asphalt 8 mod apk all cars unlocked 3.9 0j
    -asphalt 8 mod apk unlimited tokens and credits 3.9 0j
    -asphalt 8 mod apk anti ban 3.9 0j
    -asphalt 8 mod apk highly compressed 3.9 0j
    -asphalt 8 mod apk obb file download 3.9 0j
    -asphalt 8 mod apk mega mod menu v3.9.0j
    -asphalt 8 mod apk no root required v3.9.0j
    -asphalt 8 mod apk unlimited nitro and speed v3.9.0j
    -asphalt 8 mod apk all bikes unlocked v3.9.0j
    -asphalt 8 mod apk all seasons unlocked v3.9.0j
    -asphalt 8 mod apk all tracks unlocked v3.9.0j
    -asphalt 8 mod apk all decals unlocked v3.9.0j
    -asphalt 8 mod apk all pro kits unlocked v3.9.0j
    -asphalt 8 mod apk all events unlocked v3.9.0j
    -asphalt 8 mod apk all multiplayer modes unlocked v3.9.0j
    -asphalt 8 mod apk all special offers unlocked v3.9.0j
    -asphalt 8 mod apk all fusion coins unlocked v3.9.0j
    -asphalt 8 mod apk unlimited blueprints v3.9.0j
    -asphalt 8 mod apk unlimited fusion points v3.9.0j
    -asphalt 8 mod apk unlimited mastery rewards v3.9.0j
    -asphalt 8 mod apk unlimited vip points v3.9.0j
    -asphalt 8 mod apk unlimited elite cars v3.9.0j
    -asphalt 8 mod apk unlimited tuning kits v3.9.0j
    -asphalt 8 mod apk unlimited extra tanks v3.9.0j
    -asphalt 8 mod apk unlimited double credits v3.9.0j
    -asphalt 8 mod apk unlimited double nitro v3.9.0j
    -asphalt 8 mod apk unlimited double xp v3.9.0j
    -asphalt 8 mod apk unlimited perfect nitro triggers v3.9.0j
    -asphalt 8 mod apk unlimited flat spins in one jump v3.9.0j
    -asphalt 8 mod apk unlimited barrel rolls in one jump v3.9.0j
    -asphalt 8 mod apk unlimited near misses v3.9.0j
    -asphalt 8 mod apk unlimited knockdowns v3.9.0j
    -asphalt 8 mod apk unlimited drift distance v3.9.0j
    -asphalt 8 mod apk unlimited air time v3.9.0j
    -asphalt 8 mod apk unlimited oncoming traffic dodges v3.9.0j
    -asphalt

    -

    Intense Arcade Gameplay and Multiplayer Experience

    -

    Asphalt 8: Airborne offers you a thrilling and adrenaline-pumping gameplay that will keep you hooked for hours. You can race on 75+ tracks across 18 different locations, such as Venice, Tokyo, Nevada, Iceland, Dubai, and more. You can also perform amazing aerial stunts, such as barrel rolls, flips, jumps, and drifts.

    -

    You can also challenge other players from around the world in various multiplayer modes, such as classic, elimination, infected, tag racing, and more. You can join or create your own racing club, chat with other racers, and compete for the top spot on the leaderboards.

    -

    Massive Content Depth and Customization Options

    -

    Asphalt 8: Airborne has a lot of content to offer you, whether you are a casual or a hardcore racer. You can play through 9 seasons and over 400 events in the career mode, unlock new cars and tracks, and earn rewards and achievements. You can also play in the special events mode, where you can participate in limited-time challenges and win exclusive prizes.

    -

    You can also customize your game experience with various options and settings. You can choose from 5 different control schemes, adjust the graphics quality and sound volume, enable or disable tilt steering and auto-acceleration, and more.

    -

    Benefits of Asphalt 8: Airborne 3.9.0j Mod APK

    -

    Asphalt 8: Airborne 3.9.0j Mod APK is a modified version of the original game that gives you some extra benefits and features that are not available in the official version. Here are some of the benefits of using the mod APK:

    -

    Unlimited Money and Tokens

    -

    Money and tokens are the main currencies in Asphalt 8: Airborne. You need them to buy new cars and bikes, upgrade your vehicles, unlock new tracks and events, and more. However, earning money and tokens in the game can be time-consuming and tedious.

    -

    With the mod APK, you don't have to worry about running out of money or tokens. You will have unlimited amounts of both currencies in your account. You can spend them as much as you want without any limitations or restrictions.

    -

    Free Shopping and Upgrades

    -

    Shopping and upgrading your vehicles in Asphalt 8: Airborne can be expensive and require a lot of money and tokens. You may not be able to afford the car or bike that you want or need to improve your performance on the tracks.

    -

    With the mod APK, you don't have to pay anything for shopping or upgrading your vehicles. You can buy any car or bike that you like for free. You can also upgrade your vehicles to the maximum level for free. You can make your vehicles faster, stronger, and more durable without spending a dime.

    -

    Unlocked All Cars and Tracks

    -

    Cars and tracks are the main attractions of Asphalt 8: Airborne. The game features more than 300 cars and bikes from top manufacturers and 75+ tracks from different locations. However, not all cars and tracks are available from the start. You have to unlock them by playing through the career mode or by paying with money or tokens.

    -

    With the mod APK, you don't have to wait or pay to unlock all cars and tracks. You will have access to all cars and tracks from the beginning of the game. You can choose any car or bike that you want and race on any track that you like.

    -

    Installation Guide for Asphalt 8: Airborne 3.9.0j Mod APK

    -

    If you want to install Asphalt 8: Airborne 3.9.0j Mod APK on your Android device, you need to follow these simple steps:

    -

    Download the Mod APK File from a Trusted Source

    -

    The first step is to download the mod APK file from a trusted source that provides the mod APK file for free. You can search for Asphalt 8: Airborne 3.9.0j Mod APK on Google or any other search engine and find a reliable website or link that offers the download link.

    -

    You should be careful when downloading mod APK files from unknown sources, as some of them may contain viruses or malware that can harm your device or steal your data. You should also check the reviews and ratings of the website or link before downloading anything.

    -

    Enable Unknown Sources on Your Device Settings

    -

    The second step is to enable unknown sources on your device settings. This will allow you to install apps that are not from the Google Play Store or any other official source. To do this, you need to go to your device settings, then security, then unknown sources, and then toggle the switch to on.

    -

    You may see a warning message that installing apps from unknown sources may harm your device or data. You can ignore this message and proceed with the installation. However, you should only install apps from trusted sources and scan them with an antivirus app before installing them.

    -

    Install the Mod APK File and Launch the Game

    -

    The third step is to install the mod APK file and launch the game. To do this, you need to locate the mod APK file that you downloaded on your device storage, then tap on it to start the installation process. You may see some prompts asking for permissions or access to your device features. You can grant them and continue with the installation.

    -

    Once the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer. You will see the modded version of Asphalt 8: Airborne with all the benefits and features that we mentioned earlier. You can enjoy the game without any limitations or restrictions.

    -

    Conclusion and FAQs

    -

    Asphalt 8: Airborne is one of the best racing games on Android that offers you a thrilling and immersive racing experience. However, if you want to have more fun and freedom in the game, you can try Asphalt 8: Airborne 3.9.0j Mod APK, which gives you unlimited money and tokens, free shopping and upgrades, and unlocked all cars and tracks.

    -

    To install Asphalt 8: Airborne 3.9.0j Mod APK, you need to download the mod APK file from a trusted source, enable unknown sources on your device settings, and install the mod APK file and launch the game. You can then enjoy the game with all the benefits and features of the mod APK.

    -

    If you have any questions or doubts about Asphalt 8: Airborne 3.9.0j Mod APK, you can check out these FAQs:

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    QuestionAnswer
    Is Asphalt 8: Airborne 3.9.0j Mod APK safe to use?Asphalt 8: Airborne 3.9.0j Mod APK is safe to use as long as you download it from a trusted source and scan it with an antivirus app before installing it. However, you should be aware that using mod APK files may violate the terms and conditions of the game and may result in your account being banned or suspended.
    Does Asphalt 8: Airborne 3.9.0j Mod APK require root access?No, Asphalt 8: Airborne 3.9.0j Mod APK does not require root access to work on your device. You can install it without rooting your device.
    Can I play online with Asphalt 8: Airborne 3.9.0j Mod APK?Yes, you can play online with Asphalt 8: Airborne 3.9.0j Mod APK, but you may face some issues or errors while connecting to the servers or other players. You may also encounter some cheaters or hackers who use mod APK files to gain an unfair advantage over other players.
    Can I update Asphalt 8: Airborne 3.9.0j Mod APK?No, you cannot update Asphalt 8: Airborne 3.9.0j Mod APK from the Google Play Store or any other official source. If you want to update the game, you need to uninstall the mod APK file and install the official version of the game from the Google Play Store or any other official source.
    Can I use Asphalt 8: Airborne 3.9.0j Mod APK with other mods or hacks?No, you cannot use Asphalt 8: Airborne 3.9.0j Mod APK with other mods or hacks, as they may cause conflicts or errors in the game. You should only use one mod or hack at a time.
    -

    I hope this article has helped you understand what Asphalt 8: Airborne 3.9.0j Mod APK is and how to install it on your Android device.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Car Unlock Sound Effects The Ultimate Guide to Finding and Using Them.md b/spaces/fatiXbelha/sd/Car Unlock Sound Effects The Ultimate Guide to Finding and Using Them.md deleted file mode 100644 index 09a8f7aba5741e69e34d8930a9b9e3f5c2301d59..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Car Unlock Sound Effects The Ultimate Guide to Finding and Using Them.md +++ /dev/null @@ -1,120 +0,0 @@ -
    -

    Car Unlock Sound Download: How to Find and Use the Best Sounds for Your Car

    -

    Do you want to make your car more fun and unique? Do you want to add some flair and personality to your car's features? Do you want to impress your friends and family with your cool car sounds? If you answered yes to any of these questions, then you might be interested in downloading some car unlock sound effects for your car.

    -

    Introduction

    -

    What are car unlock sound effects and why do you need them?

    -

    Car unlock sound effects are audio clips that play when you unlock your car with a remote key or a smartphone app. They can be anything from realistic car sounds, such as beeps, clicks, or honks, to more creative and fun sounds, such as music, voice, or animal noises. Car unlock sound effects can make your car more enjoyable and entertaining, as well as provide some practical benefits, such as security and convenience.

    -

    car unlock sound download


    Download ===== https://urllie.com/2uNBsJ



    -

    How to find the best car unlock sound effects online

    -

    There are many websites that offer free or paid car unlock sound effects for download. Some of the most popular ones are:

    -
      -
    • Pixabay: This website has a large collection of royalty-free unlock sound effects that you can download and use for any purpose. You can find sounds of doors unlocking with keys, padlocks, chains, bolts, and more. You can also filter the results by duration, category, or tags.
    • -
    • Storyblocks: This website has a subscription plan that gives you unlimited access to thousands of royalty-free car lock unlock sound effects. You can find sounds of metal locks, wooden doors, key chains, car trailers, automatic doors, and more. You can also sort the results by relevance, popularity, or newest.
    • -
    • Pixabay: This website also has a smaller but still impressive collection of royalty-free car-locking sound effects that you can download and use for any purpose. You can find sounds of locking and unlocking doors with keys, inside car sounds, open and close doors, keys locking, and more. You can also filter the results by duration or category.
    • -
    -

    How to download and use car unlock sound effects on your device

    -

    Once you have found the car unlock sound effects that you like, you can download them to your device by following these steps:

    -
      -
    1. Click on the download button or link on the website.
    2. -
    3. Choose the file format and quality that you want. Most websites offer MP3 or WAV formats, which are compatible with most devices.
    4. -
    5. Save the file to your device's storage or cloud service.
    6. -
    7. Transfer the file to your car's device using a USB cable, Bluetooth connection, or wireless network.
    8. -
    9. Select the file as your car's unlock sound effect in the settings menu of your device.
    10. -
    -

    Benefits of using car unlock sound effects

    -

    Enhance your car's security and convenience

    -

    One of the main benefits of using car unlock sound effects is that they can improve your car's security and convenience. By having a distinctive and recognizable sound when you unlock your car, you can easily locate your car in a crowded parking lot or street. You can also deter potential thieves or vandals from targeting your car, as they will know that you have a sound system installed in your car. You can also save time and hassle by unlocking your car with a simple click of a button or a tap of your phone, instead of fumbling with keys or codes.

    -

    Customize your car's personality and style

    -

    Another benefit of using car unlock sound effects is that they can customize your car's personality and style. By choosing a sound effect that matches your taste and mood, you can express yourself and make your car stand out from the crowd. You can also change the sound effect according to the occasion, such as a festive sound for holidays, a romantic sound for dates, or a funny sound for pranks. You can have fun and be creative with your car unlock sound effects, as long as they are not too loud or annoying for others.

    -

    Impress your friends and family with your cool car sounds

    -

    A final benefit of using car unlock sound effects is that they can impress your friends and family with your cool car sounds. By having a unique and interesting sound when you unlock your car, you can attract attention and admiration from others. You can also surprise and delight your passengers with your car unlock sound effects, making them feel special and entertained. You can also share your car unlock sound effects with others, such as by sending them the files or playing them on social media.

    -

    Tips and tricks for using car unlock sound effects

    -

    Choose the right sound format and quality for your device

    -

    One of the tips and tricks for using car unlock sound effects is to choose the right sound format and quality for your device. Different devices may have different requirements and capabilities for playing sound effects, so you need to make sure that the file format and quality are compatible with your device. Generally, MP3 files are smaller and more widely supported, while WAV files are larger and higher quality. You can also check the bitrate and sample rate of the file, which affect the clarity and smoothness of the sound.

    -

    Adjust the volume and duration of the sound effects to suit your preference

    -

    Another tip and trick for using car unlock sound effects is to adjust the volume and duration of the sound effects to suit your preference. You don't want the sound effect to be too loud or too long, as it may disturb others or annoy yourself. You can use an audio editor software or app to modify the volume and duration of the file, or use the settings menu of your device to adjust them. You can also use a fade in or fade out effect to make the sound more natural and pleasant.

    -

    Test the sound effects before using them on your car

    -

    A final tip and trick for using car unlock sound effects is to test the sound effects before using them on your car. You don't want to end up with a sound effect that doesn't work properly or sounds bad on your car's device. You can test the sound effect by playing it on your computer or phone first, or by using a headphone or speaker. You can also test it on your car's device by connecting it with a USB cable, Bluetooth connection, or wireless network. You can then listen to how it sounds and make any necessary adjustments.

    -

    car unlock sound effect download
    -car lock unlock sound mp3 download
    -car door unlock sound free download
    -car key unlock sound download
    -car remote unlock sound download
    -car lock and unlock sound download
    -car locking sound effect download
    -car lock sound wav download
    -car door lock sound download
    -car key lock sound download
    -car remote lock sound download
    -car locking and unlocking sound download
    -car lock beep sound download
    -car lock alarm sound download
    -car lock click sound download
    -car lock horn sound download
    -car lock open sound download
    -car lock ringtone download
    -car lock siren sound download
    -car lock voice sound download
    -car unlock beep sound download
    -car unlock button sound download
    -car unlock horn sound download
    -car unlock ringtone download
    -car unlock sfx download
    -car door unlock beep sound download
    -car door unlock button sound download
    -car door unlock horn sound download
    -car door unlock ringtone download
    -car door unlock sfx download
    -car key unlock beep sound download
    -car key unlock button sound download
    -car key unlock horn sound download
    -car key unlock ringtone download
    -car key unlock sfx download
    -car remote unlock beep sound download
    -car remote unlock button sound download
    -car remote unlock horn sound download
    -car remote unlock ringtone download
    -car remote unlock sfx download

    -

    Conclusion

    -

    Summary of the main points

    -

    In conclusion, car unlock sound effects are audio clips that play when you unlock your car with a remote key or a smartphone app. They can be realistic or creative sounds that enhance your car's security and convenience, customize your car's personality and style, and impress your friends and family with your cool car sounds. You can find and download free or paid car unlock sound effects online from various websites, such as Pixabay, Storyblocks, or Zapsplat. You can then transfer them to your car's device using a USB cable, Bluetooth connection, or wireless network. You can also choose the right sound format and quality for your device, adjust the volume and duration of the sound effects to suit your preference, and test the sound effects before using them on your car.

    -

    Call to action and final thoughts

    -

    If you are interested in downloading some car unlock sound effects for your car, you can start by browsing through some of the websites mentioned above. You can also search for other websites that offer similar services, or create your own sound effects using an audio recorder or synthesizer. You can then enjoy having some fun and unique sounds when you unlock your car. However, remember to be respectful and responsible when using car unlock sound effects, as they may affect others around you. Don't use sounds that are too loud, too long, or too inappropriate for public places. Also, don't rely on sounds alone for securing your car; always lock your doors and windows properly.

    -

    FAQs

    -
      -
    • Q A: What are some examples of car unlock sound effects? -

      A: Some examples of car unlock sound effects are:

      -
        -
      • A car horn honking
      • -
      • A doorbell ringing
      • -
      • A voice saying "Welcome" or "Hello"
      • -
      • A musical tune or jingle
      • -
      • An animal sound, such as a dog barking or a cat meowing
      • -
      -
    • Q: How can I create my own car unlock sound effects? -

      A: You can create your own car unlock sound effects by using an audio recorder or synthesizer. You can record your own voice, use a musical instrument, or make some noises with objects. You can then edit and mix the sound using an audio editor software or app. You can also use online tools, such as SoundBible or Audiotool, to generate and customize sound effects.

    • -
    • Q: How can I change the car unlock sound effect on my device? -

      A: You can change the car unlock sound effect on your device by following these steps:

      -
        -
      1. Go to the settings menu of your device.
      2. -
      3. Find the option for car unlock sound effect, which may be under sound, security, or accessibility.
      4. -
      5. Select the file that you want to use as your car unlock sound effect from your device's storage or cloud service.
      6. -
      7. Save the changes and exit the settings menu.
      8. -
    • -
    • Q: How can I turn off the car unlock sound effect on my device? -

      A: You can turn off the car unlock sound effect on your device by following these steps:

      -
        -
      1. Go to the settings menu of your device.
      2. -
      3. Find the option for car unlock sound effect, which may be under sound, security, or accessibility.
      4. -
      5. Uncheck the box or toggle the switch that enables the car unlock sound effect.
      6. -
      7. Save the changes and exit the settings menu.
      8. -
    • -
    • Q: Where can I find more information about car unlock sound effects? -

      A: You can find more information about car unlock sound effects by visiting some of these websites:

      -
        -
      • Car Sound Effects Library: This website has a comprehensive guide on how to use car sound effects, including car unlock sound effects. It also has a blog that features tips, tricks, and reviews on car sound effects.
      • -
      • Car Unlock Sound Effects Forum: This website has a forum where you can ask questions, share ideas, and get feedback on car unlock sound effects. You can also browse through previous posts and topics to find answers and inspiration.
      • -
      • Car Unlock Sound Effects YouTube Channel: This website has a YouTube channel that showcases various car unlock sound effects. You can watch videos of different cars unlocking with different sounds, and learn how to create and use them.
      • -
    • 401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Implosion - Never Lose Hope APK and Unlock All Levels with One-Time Purchase.md b/spaces/fatiXbelha/sd/Download Implosion - Never Lose Hope APK and Unlock All Levels with One-Time Purchase.md deleted file mode 100644 index 0e02dbaa278a9a4e0919777815f59b59b4ac1dce..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Implosion - Never Lose Hope APK and Unlock All Levels with One-Time Purchase.md +++ /dev/null @@ -1,157 +0,0 @@ -
      -

      Download Implosion Never Lose Hope APK: A Guide for Android Users

      -

      If you are looking for an action-packed game that will keep you on the edge of your seat, you should try Implosion Never Lose Hope. This is a sci-fi game that will take you to a post-apocalyptic world where you have to fight against a mysterious alien threat. In this article, we will tell you what Implosion Never Lose Hope is, why you should play it, how to download it on your Android device, and some tips and tricks for playing it. Let's get started!

      -

      download implosion never lose hope apk


      Downloadhttps://urllie.com/2uNEwU



      -

      What is Implosion Never Lose Hope?

      -

      Implosion Never Lose Hope is a game developed by Rayark International Limited, a company known for creating high-quality games with stunning graphics and soundtracks. Implosion Never Lose Hope is no exception. It is a game that combines elements of hack-and-slash, RPG, and shooter genres. It has a compelling story that takes place 20 years after the fall of Earth, where humanity is on the brink of extinction. You play as a pilot of a War-Mech series III battle suit, which is humanity's last hope against the XADA, a mysterious life form that wants to wipe out all life forms.

      -

      The game features amazing console-quality graphics that will immerse you in the futuristic world of Implosion. The game also has first-class voice acting and Hollywood-grade audio production that will make you feel like you are watching a movie. The game has a full orchestral score composed by John Kurlander, a Grammy Award winner who worked on The Lord of the Rings trilogy. The game has a very intuitive touch user-interface that makes it easy to control your character and unleash devastating attacks.

      -

      Why You Should Play Implosion Never Lose Hope?

      -

      There are many reasons why you should play Implosion Never Lose Hope. Here are some of them:

      -
        -
      • The game has an engaging gameplay that will challenge your skills and reflexes. You will face different types of enemies, from drones and robots to giant bosses. You will have to use your weapons and abilities wisely to survive.
      • -
      • The game has a vast arsenal of super-tech weaponry that you can customize and upgrade using the ARK Kernel system. You can choose from different types of suits, each with its own unique abilities and special moves. You can also collect and equip different badges that will boost your stats and performance.
      • -
      • The game has a captivating story that will keep you hooked until the end. You will discover the secrets behind the XADA invasion, the origin of your suit, and the fate of humanity. You will also meet different characters that will help you or hinder you along the way.
      • -
      • The game has a lot of replay value, as you can play different modes and levels with different difficulty settings. You can also compete with other players online and see how you rank on the leaderboards. You can also unlock achievements and trophies that will reward your efforts.
      • -
      -

      Implosion Never Lose Hope is a game that will not disappoint you. It is a game that will make you feel like a hero in a sci-fi movie. It is a game that will give you hours of fun and excitement.

      -

      How to Download Implosion Never Lose Hope APK?

      -

      If you want to play Implosion Never Lose Hope on your Android device, you will need to download and install the APK file. This is a file that contains the game's data and allows you to install it without using the Google Play Store. However, you need to be careful when downloading APK files, as some of them may contain viruses or malware that can harm your device. Therefore, you need to follow these steps to download and install Implosion Never Lose Hope APK safely and correctly:

      -

      Implosion - Never Lose Hope APK free download for Android
      -How to install Implosion - Never Lose Hope XAPK on Android devices
      -Implosion - Never Lose Hope APK latest version 1.5.6
      -Implosion - Never Lose Hope game review and features
      -Implosion - Never Lose Hope APK + OBB data download
      -Implosion - Never Lose Hope APK mod unlimited money
      -Implosion - Never Lose Hope APK offline mode
      -Implosion - Never Lose Hope APK for PC Windows 10/8/7
      -Implosion - Never Lose Hope APK for iOS iPhone/iPad
      -Implosion - Never Lose Hope APK for Mac OS X
      -Implosion - Never Lose Hope APK for Linux Ubuntu
      -Implosion - Never Lose Hope APK for Chromebook
      -Implosion - Never Lose Hope APK for Kindle Fire
      -Implosion - Never Lose Hope APK for Samsung Galaxy
      -Implosion - Never Lose Hope APK for Huawei Mate
      -Implosion - Never Lose Hope APK for Xiaomi Mi
      -Implosion - Never Lose Hope APK for OnePlus Nord
      -Implosion - Never Lose Hope APK for Oppo Reno
      -Implosion - Never Lose Hope APK for Vivo V20
      -Implosion - Never Lose Hope APK for Realme X7
      -Implosion - Never Lose Hope APK for Nokia 8.3
      -Implosion - Never Lose Hope APK for Sony Xperia
      -Implosion - Never Lose Hope APK for LG Wing
      -Implosion - Never Lose Hope APK for Asus Zenfone
      -Implosion - Never Lose Hope APK for Motorola Moto
      -Implosion - Never Lose Hope APK for Google Pixel
      -Implosion - Never Lose Hope APK for Blackberry Key2
      -Implosion - Never Lose Hope APK for HTC U12+
      -Implosion - Never Lose Hope APK for ZTE Axon 10 Pro
      -Implosion - Never Lose Hope APK for Alcatel 3X
      -Download and play Implosion - Never Lose Hope on Bluestacks emulator
      -Download and play Implosion - Never Lose Hope on NoxPlayer emulator
      -Download and play Implosion - Never Lose Hope on LDPlayer emulator
      -Download and play Implosion - Never Lose Hope on MEmu emulator
      -Download and play Implosion - Never Lose Hope on GameLoop emulator
      -Download and play Implosion - Never Lose Hope on Genymotion emulator
      -Download and play Implosion - Never Lose Hope on Andy emulator
      -Download and play Implosion - Never Lose Hope on Remix OS Player emulator
      -Download and play Implosion - Never Lose Hope on Phoenix OS emulator
      -Download and play Implosion - Never Lose Hope on PrimeOS emulator

      -

      Step 1: Enable Unknown Sources

      -

      Before you can install any APK file on your device, you need to enable the option to allow unknown sources. This means that you can install apps from sources other than the Google Play Store. To do this, follow these steps:

      -
        -
      1. Go to your device's settings and tap on Security or Privacy.
      2. -
      3. Find the option that says Unknown Sources or Install Unknown Apps and toggle it on.
      4. -
      5. A warning message will pop up, telling you that installing apps from unknown sources can be risky. Tap on OK or Allow to confirm.
      6. -
      -

      Now you have enabled unknown sources on your device, and you can proceed to the next step.

      -

      Step 2: Download the APK File

      -

      Now you need to find and download the APK file for Implosion Never Lose Hope. You can do this by using your device's browser and searching for a reliable source that offers the APK file. However, you need to be careful when choosing a source, as some of them may offer fake or corrupted files that can damage your device or steal your data. Therefore, we recommend using this link to download the APK file for Implosion Never Lose Hope. This is a trusted source that has been verified by many users and has positive reviews.

      -

      To download the APK file, follow these steps:

      -
        -
      1. Tap on the link above to go to the download page.
      2. -
      3. Tap on the Download button and wait for the file to be downloaded.
      4. -
      5. You may see a notification that says "This type of file can harm your device". Tap on OK or Keep to continue.
      6. -
      -

      Now you have downloaded the APK file for Implosion Never Lose Hope, and you can proceed to the next step.

      -

      Step 3: Install the APK File

      -

      Now you need to locate and install the APK file on your device. To do this, follow these steps:

      -
        -
      1. Go to your device's file manager and find the folder where you downloaded the APK file. It is usually in the Downloads folder.
      2. -
      3. Tap on the APK file and a pop-up window will appear, asking you if you want to install this application. Tap on Install or Next to continue.
      4. -
      5. The installation process will begin and may take a few minutes. Wait until it is finished.
      6. -
      -

      Now you have installed Implosion Never Lose Hope on your device, and you can proceed to the final step.

      -

      Step 4: Launch the Game and Enjoy

      -

      Now you are ready to launch the game and enjoy it. To do this, follow these steps:

      -
        -
      1. Go to your device's app drawer and find the icon for Implosion Never Lose Hope. Tap on it to open it.
      2. -
      3. The game will ask for some permissions, such as access to your storage, phone, and microphone. Tap on Allow or Grant to give them.
      4. -
      5. The game will also ask you if you want to download additional data, such as voice packs or subtitles. Tap on Yes or Download if you want them, or No or Skip if you don't.
      6. -
      7. The game will start and you will see the main menu. You can choose to start a new game, continue a previous game, or change the settings.
      8. -
      9. Enjoy playing Implosion Never Lose Hope and experience the thrill of saving humanity from the XADA invasion!
      10. -
      -

      Tips and Tricks for Playing Implosion Never Lose Hope

      -

      Implosion Never Lose Hope is a game that will test your skills and reflexes. It is not an easy game, but it is not impossible either. If you want to improve your performance and have more fun, you can follow these tips and tricks:

      -

      Tip 1: Choose Your Suit Wisely

      -

      In Implosion Never Lose Hope, you can choose from two different suits: the Crimson or the Avalon. Each suit has its own advantages and disadvantages, and you should choose the one that suits your playstyle and preferences. Here are some differences between them:

      - - - - - - - - - - - - - - - - -
      SuitAdvantagesDisadvantages
      Crimson- More agile and faster
      - Has a longer combo chain
      - Has a powerful special move that can deal massive damage to enemies
      - Less durable and more vulnerable to attacks
      - Has a shorter range of attack
      - Has a slower energy recovery rate
      Avalon- More sturdy and resilient
      - Has a longer range of attack
      - Has a faster energy recovery rate
      - Less mobile and slower
      - Has a shorter combo chain
      - Has a weaker special move that can only stun enemies
      -

      You can also unlock a third suit, the Raven, by completing certain achievements. The Raven is a balanced suit that has both offensive and defensive capabilities. You can switch between suits at any time by going to the suit selection screen.

      -

      Tip 2: Upgrade Your Weapons and Abilities

      -

      In Implosion Never Lose Hope, you can upgrade your weapons and abilities using the ARK Kernel system. The ARK Kernel is a currency that you can collect by defeating enemies, completing levels, or finding hidden items. You can use the ARK Kernel to enhance your suit's stats, such as health, attack, defense, speed, and energy. You can also use it to unlock new weapons, such as swords, guns, or shields. You can also use it to unlock new abilities, such as dash, dodge, or parry. You can access the ARK Kernel system by going to the upgrade screen.

      -

      Tip 3: Master the Combat System

      -

      Implosion Never Lose Hope has a very intuitive and responsive combat system that allows you to unleash devastating attacks on your enemies. You can use the touch controls to move, attack, dodge, or use special moves. You can also use the combos and skills to deal more damage and gain more energy. Here are some tips on how to master the combat system:

      -
        -
      • Use the virtual joystick on the left side of the screen to move your character. You can also tap on the screen to make your character move to that location.
      • -
      • Use the attack button on the right side of the screen to perform basic attacks. You can tap it repeatedly to perform a combo chain. You can also swipe it in different directions to perform different attacks.
      • -
      • Use the dodge button on the right side of the screen to evade enemy attacks. You can tap it once to perform a quick dodge. You can also swipe it in different directions to perform a longer dodge.
      • -
      • Use the special button on the right side of the screen to activate your suit's special move. You need to have enough energy to use it. You can gain energy by attacking or dodging enemies.
      • -
      • Use the skill buttons on the right side of the screen to use your suit's skills. You need to have enough energy to use them. You can unlock new skills by upgrading your suit.
      • -
      • Use the pause button on the top right corner of the screen to pause the game and access the menu.
      • -
      -

      Tip 4: Explore the Levels and Collect Items

      -

      Implosion Never Lose Hope has many levels that are full of secrets, bonuses, and collectibles. You should explore them thoroughly and find them all. They will help you in your quest and reward you with extra ARK Kernel, badges, trophies, or achievements. Here are some tips on how to explore the levels and collect items:

      -
        -
      • Look for hidden paths or doors that may lead you to secret areas or rooms.
      • -
      • Look for crates or containers that may contain ARK Kernel or other items.
      • -
      • Look for badges that may be hidden or guarded by enemies. Badges are items that you can equip to boost your suit's stats or abilities.
      • -
      • Look for trophies that may be awarded for completing certain tasks or challenges. Trophies are items that you can collect to unlock achievements or modes.
      • -
      • Look for achievements that may be triggered by performing certain actions or feats. Achievements are goals that you can accomplish to earn ARK Kernel or badges.
      • -
      -

      Tip 5: Challenge Yourself with Different Modes

      -

      Implosion Never Lose Hope has different modes that you can play to test your skills and have more fun. You can unlock these modes by completing certain levels or achievements. Here are some of the modes that you can play:

      -
        -
      • Hard Mode: This is a mode that increases the difficulty of the game by making the enemies stronger, faster, and smarter. You can unlock this mode by completing the Normal Mode.
      • -
      • Trial Mode: This is a mode that challenges you to complete a series of levels with specific objectives and conditions. You can unlock this mode by completing the Hard Mode.
      • -
      • Badland Mode: This is a mode that pits you against waves of enemies in a survival mode. You can unlock this mode by completing the Trial Mode.
      • -
      -

      These modes will give you more variety and challenge in the game. They will also reward you with more ARK Kernel, badges, trophies, or achievements.

      -

      Conclusion

      -

      Implosion Never Lose Hope is a game that you should not miss if you are a fan of action, sci-fi, or RPG games. It is a game that will take you to a post-apocalyptic world where you have to fight against a mysterious alien threat. It is a game that has amazing graphics, soundtracks, and voice acting. It is a game that has an engaging gameplay, a captivating story, and a lot of replay value. It is a game that you can download and install on your Android device using the APK file.

      -

      In this article, we have told you what Implosion Never Lose Hope is, why you should play it, how to download it on your Android device, and some tips and tricks for playing it. We hope that this article has been helpful and informative for you. Now go ahead and download Implosion Never Lose Hope APK and enjoy playing this awesome game!

      -

      FAQs

      -

      Here are some frequently asked questions and answers about Implosion Never Lose Hope:

      -
        -
      1. Is Implosion Never Lose Hope free to play?
        Yes, Implosion Never Lose Hope is free to play. However, it has some in-app purchases that you can buy to enhance your experience or support the developers.
      2. -
      3. Is Implosion Never Lose Hope compatible with my device?
        Implosion Never Lose Hope requires Android 4.0 or higher to run. It also requires at least 1 GB of RAM and 1.5 GB of storage space. You can check your device's specifications by going to your settings and tapping on About Phone or Device.
      4. -
      5. How can I save my progress in Implosion Never Lose Hope?
        Implosion Never Lose Hope has an auto-save feature that saves your progress every time you complete a level or exit the game. You can also manually save your progress by going to the pause menu and tapping on Save Game.
      6. -
      7. How can I restore my progress in Implosion Never Lose Hope?
        If you have lost your progress in Implosion Never Lose Hope due to uninstalling the game or changing your device, you can restore it by using the cloud save feature. To do this, you need to have a Google Play Games account and sync it with the game. You can then go to the main menu and tap on Load Game and choose Cloud Save.
      8. -
      9. How can I contact the developers of Implosion Never Lose Hope?
        If you have any questions, feedback, or issues with Implosion Never Lose Hope, you can contact the developers by going to the main menu and tapping on Support. You can also visit their website, Facebook page, or Twitter account for more information.
      10. -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download The Sun Shines Over Us APK Mod 2023 and Live a Happy Life with Menu Money and Tickets.md b/spaces/fatiXbelha/sd/Download The Sun Shines Over Us APK Mod 2023 and Live a Happy Life with Menu Money and Tickets.md deleted file mode 100644 index 598b3dac9e8e13bf6503023bf2d0f02ee96b7d68..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download The Sun Shines Over Us APK Mod 2023 and Live a Happy Life with Menu Money and Tickets.md +++ /dev/null @@ -1,83 +0,0 @@ -
      -

      The Sun Shines Over Us APK Mod: A Simulation Game with a Heartwarming Story

      -

      If you are looking for a simulation game that will make you feel happy and relaxed, you should try The Sun Shines Over Us. This is a game that tells the story of a girl who lost her memory and a boy who wants to help her recover it. Along the way, they will meet different people and experience various events that will shape their lives. The game has beautiful graphics, soothing music, and engaging dialogues that will keep you hooked.

      -

      What is The Sun Shines Over Us?

      -

      The plot and the characters

      -

      The Sun Shines Over Us is set in a small town called Sunnyville, where everyone lives peacefully and happily. The main character is a girl named Sunny, who wakes up one day without any memories of her past. She is found by a boy named Ray, who decides to take care of her and help her find her lost memories. Together, they will explore the town and interact with various characters, such as Lily, the cheerful florist; Leo, the cool musician; Mia, the shy baker; and many more. Each character has their own personality, backstory, and relationship with Sunny and Ray. As you play the game, you will discover more about them and their secrets.

      -

      the sun shines over us apk mod


      DOWNLOADhttps://urllie.com/2uNEPt



      -

      The gameplay and the features

      -

      The Sun Shines Over Us is a simulation game that lets you control Sunny's actions and choices. You can customize her appearance, clothes, accessories, and hairstyle. You can also choose how to spend your time in the town, whether you want to work, study, shop, or hang out with your friends. You can also develop your relationships with different characters by talking to them, giving them gifts, or going on dates with them. Depending on your choices, you will unlock different endings and scenarios for each character.

      -

      The game also has many features that make it more enjoyable and immersive. For example, you can collect various items and decorations for your room. You can also play mini-games such as fishing, cooking, gardening, or painting. You can also listen to the original soundtrack of the game or change it to your own music. You can also take screenshots of your favorite moments and share them with your friends.

      -

      Why download The Sun Shines Over Us APK Mod?

      -

      The benefits of the mod version

      -

      While The Sun Shines Over Us is a free game to download and play, it also has some in-app purchases that require real money. For example, you need to buy tickets to unlock some episodes or scenes in the game. You also need to buy coins to buy some items or gifts for your friends. However, if you download The Sun Shines Over Us APK Mod from [The Sun Shines Over Us 12.0 MOD APK Menu/Unlimited money, tickets](^1^), you can enjoy these benefits:

      -
        -
      • You will get unlimited money and tickets in the game.
      • -
      • You will get access to all episodes and scenes in the game.
      • -
      • You will get access to all items and gifts in the game.
      • -
      • You will get access to all outfits and hairstyles in the game.
      • -
      • You will get access to a menu that lets you modify some aspects of the game.
      • -
      -

      How to download and install the mod apk

      -

      To download and install The Sun Shines Over Us APK Mod on your Android device, you need to follow these steps:

      -
        -
      1. Go to [The Sun Shines Over Us 12.0 MOD APK Menu/Unlimited money, tickets](^1^) and click on the download button.
      2. -
      3. Wait for the download to finish and locate the file on your device.
      4. -
      5. Enable the installation of apps from unknown sources on your device settings.
      6. -
      7. Tap on the file and follow the instructions to install it.
      8. -
      9. Launch the game and enjoy the game.
      10. -
      -

      Conclusion

      -

      The Sun Shines Over Us is a simulation game that will make you feel warm and fuzzy inside. It has a heartwarming story, charming characters, and relaxing gameplay. You can customize your own character, make friends, fall in love, and discover the secrets of Sunnyville. You can also enjoy the mod version of the game that gives you unlimited resources and access to everything in the game. If you are looking for a game that will brighten up your day, you should download The Sun Shines Over Us APK Mod today.

      -

      FAQs

      -

      Here are some frequently asked questions about The Sun Shines Over Us APK Mod:

      - - - - - - - -
      QuestionAnswer
      Is The Sun Shines Over Us APK Mod safe to download and install?Yes, The Sun Shines Over Us APK Mod is safe to download and install. It does not contain any viruses or malware. However, you should always download it from a trusted source like [The Sun Shines Over Us 12.0 MOD APK Menu/Unlimited money, tickets].
      Do I need to root my device to use The Sun Shines Over Us APK Mod?No, you do not need to root your device to use The Sun Shines Over Us APK Mod. You can install it on any Android device that meets the minimum requirements of the game.
      Can I play The Sun Shines Over Us APK Mod offline?Yes, you can play The Sun Shines Over Us APK Mod offline. You do not need an internet connection to play the game. However, some features may not work properly without an internet connection, such as sharing screenshots or listening to your own music.
      Can I update The Sun Shines Over Us APK Mod?Yes, you can update The Sun Shines Over Us APK Mod whenever there is a new version available. However, you should always backup your data before updating the game, as some updates may cause compatibility issues or data loss.
      Can I play The Sun Shines Over Us APK Mod with my friends?Yes, you can play The Sun Shines Over Us APK Mod with your friends. You can connect with them through social media platforms such as Facebook or Twitter. You can also chat with them in the game and send them gifts or invitations.

      -

      the sun shines over us mod apk unlimited money
      -the sun shines over us mod apk download free
      -the sun shines over us mod apk latest version
      -the sun shines over us mod apk android 1
      -the sun shines over us mod apk 2023
      -the sun shines over us mod apk menu
      -the sun shines over us mod apk unlimited tickets
      -the sun shines over us mod apk hack
      -the sun shines over us mod apk offline
      -the sun shines over us mod apk no ads
      -the sun shines over us simulation game mod apk
      -the sun shines over us mod apk premium
      -the sun shines over us mod apk unlocked
      -the sun shines over us mod apk full version
      -the sun shines over us mod apk revdl
      -the sun shines over us mod apk rexdl
      -the sun shines over us mod apk happymod
      -the sun shines over us mod apk an1
      -the sun shines over us mod apk obb
      -the sun shines over us mod apk data
      -the sun shines over us mod apk cheat
      -the sun shines over us mod apk vip
      -the sun shines over us mod apk pro
      -the sun shines over us mod apk mega
      -the sun shines over us mod apk god mode
      -the sun shines over us mod apk all unlocked
      -the sun shines over us mod apk free shopping
      -the sun shines over us mod apk unlimited everything
      -the sun shines over us mod apk high damage
      -the sun shines over us mod apk one hit kill
      -the sun shines over us mod apk unlimited gems
      -the sun shines over us mod apk unlimited coins
      -the sun shines over us mod apk unlimited diamonds
      -the sun shines over us mod apk unlimited lives
      -the sun shines over us mod apk unlimited energy
      -the sun shines over us mod apk unlimited stars
      -the sun shines over us mod apk unlimited hearts
      -the sun shines over us mod apk unlimited keys
      -the sun shines over us mod apk unlimited gold
      -the sun shines over us mod apk unlimited cash

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download __TOP__ Rihanna If It 39s Lovin That You Want Mp3.md b/spaces/fatiXbelha/sd/Download __TOP__ Rihanna If It 39s Lovin That You Want Mp3.md deleted file mode 100644 index 98851a21a03c3ba58e9a4dea62933610ad4f5bb6..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download __TOP__ Rihanna If It 39s Lovin That You Want Mp3.md +++ /dev/null @@ -1,65 +0,0 @@ - -

      How to Download Rihanna's If It's Lovin' That You Want MP3

      -

      Are you a fan of Rihanna and her catchy songs? Do you want to listen to her hit single If It's Lovin' That You Want anytime, anywhere? If so, then you might be interested in downloading the MP3 version of the song. In this article, we will show you how to do that in two easy ways. But first, let's find out more about the song and why you should download it.

      -

      Introduction

      -

      What is If It's Lovin' That You Want?

      -

      If It's Lovin' That You Want is a song by Barbadian singer Rihanna from her debut studio album, Music of the Sun (2005). It was written by Samuel Barnes, Scott La Rock, Makeba Riddick, Jean-Claude Oliver, Lawrence Parker, and produced by Poke & Tone. It was released on August 16, 2005, as the second and final single from the album.

      -

      download rihanna if it 39;s lovin that you want mp3


      DOWNLOAD >>> https://urllie.com/2uNxCi



      -

      The song is a flirtatious track that follows Rihanna as she tries to convince a guy to be with her. She promises him that she can give him everything he needs, from love to friendship. The song has a reggae-pop vibe and features a sample from "No Guns, No Murder" by Rayvon. The song received positive reviews from critics, who praised Rihanna's vocals and the catchy chorus. The song also achieved moderate success on the charts, reaching the top ten in several countries, including Australia, Ireland, New Zealand, and the UK.

      -

      Why download If It's Lovin' That You Want MP3?

      -

      If you love this song and want to listen to it on repeat, then downloading the MP3 version is a good idea. Here are some benefits of downloading If It's Lovin' That You Want MP3:

      -
        -
      • You can listen to it offline without any internet connection.
      • -
      • You can save storage space on your device by deleting the video file.
      • -
      • You can transfer it to other devices or share it with your friends.
      • -
      • You can create your own playlist or mixtape with other songs.
      • -
      • You can enjoy the high-quality sound of the song without any ads or interruptions.
      • -
      -

      Now that you know why you should download If It's Lovin' That You Want MP3, let's see how you can do that in two easy ways.

      -

      How to download If It's Lovin' That You Want MP3

      -

      Option 1: YouTube

      -

      One of the easiest ways to download If It's Lovin' That You Want MP3 is to use YouTube. YouTube is a popular video-sharing platform that hosts millions of videos, including music videos. You can find the official music video of If It's Lovin' That You Want on Rihanna's YouTube channel. Here are the steps to download If It's Lovin' That You Want MP3 from YouTube:

      -

      -

      Step 1: Go to YouTube and search for the song

      -

      Open your web browser and go to YouTube.com. In the search box, type "Rihanna If It's Lovin' That You Want" and hit enter. You will see a list of videos related to your search. Look for the official music video of the song, which has a thumbnail of Rihanna in a blue dress and a beach background. It should have the title "Rihanna - If It's Lovin' That You Want (Official Music Video)" and the channel name "Rihanna". Click on the video to open it.

      -

      Step 2: Copy the URL of the video

      -

      Once the video is playing, look at the address bar of your browser. You will see a URL that starts with "https://www.youtube.com/watch?v=". This is the link to the video. Select the entire URL and copy it to your clipboard. You can use the keyboard shortcut Ctrl+C (Windows) or Command+C (Mac) to copy it.

      -

      Step 3: Go to a YouTube to MP3 converter website

      -

      Now that you have the URL of the video, you need to convert it to an MP3 file. To do that, you need to use a YouTube to MP3 converter website. There are many websites that offer this service for free, but some of them may have ads, pop-ups, or malware. To avoid that, we recommend using a reliable and safe website like ytmp3.cc. This website is easy to use and has no annoying ads or viruses. Go to ytmp3.cc in a new tab or window of your browser.

      -

      Step 4: Paste the URL and click convert

      -

      On the homepage of ytmp3.cc, you will see a box that says "Paste your YouTube link here". Paste the URL that you copied in step 2 into this box. You can use the keyboard shortcut Ctrl+V (Windows) or Command+V (Mac) to paste it. Then, click on the button that says "Convert". The website will start processing your video and converting it to an MP3 file.

      -

      Step 5: Download the MP3 file and enjoy

      -

      After a few seconds, you will see a message that says "Conversion finished". Below that, you will see a button that says "Download". Click on this button to download the MP3 file of If It's Lovin' That You Want to your device. You may also see another button that says "Save to Dropbox". If you have a Dropbox account and want to save the file there, you can click on this button instead. Once the download is complete, you can open the file and listen to the song. Enjoy!

      -

      Option 2: Spotify

      -

      Another way to download If It's Lovin' That You Want MP3 is to use Spotify. Spotify is a popular music streaming service that offers millions of songs, podcasts, and playlists. You can find If It's Lovin' That You Want on Rihanna's Music of the Sun album on Spotify. Here are the steps to download If It's Lovin' That You Want MP3 from Spotify:

      -

      Step 1: Download and install Spotify on your device

      -

      To use Spotify, you need to download and install it on your device. Spotify is available for Windows, Mac, iOS, Android, and other platforms. You can download it from Spotify.com or from your device's app store. Follow the instructions on how to install and launch Spotify on your device.

      -

      Step 2: Sign up or log in to Spotify

      -

      To access Spotify's features, you need to sign up or log in to Spotify. You can sign up for free with your email address or Facebook account. However, if you want to download songs for offline listening, you need to upgrade to Spotify Premium, which costs $9.99 per month (or less if you are eligible for a student or family plan). You can sign up or log in to Spotify from Spotify.com or from the app.

      -

      Step 3: Search for the song and add it to your playlist

      -

      Once you are signed in to Spotify, you can search for If It's Lovin' That You Want by typing it in the search box at the top of the app or website. You will see a list of results related to your search. Look for the song under the Songs section and click on it to open it. You will see the song's details, such as the artist, album, duration, and popularity. To download the song, you need to add it to your playlist. You can create a new playlist or add it to an existing one. To create a new playlist, click on the three dots icon next to the song and select "Add to playlist". Then, click on "New playlist" and give it a name. To add it to an existing playlist, click on the three dots icon next to the song and select "Add to playlist". Then, choose the playlist you want from the list.

      -

      Step 4: Download the playlist for offline listening

      -

      After you have added the song to your playlist, you can download the playlist for offline listening. To do that, go to your library and find the playlist you want. Then, click on the download button next to the playlist name. It looks like a downward arrow with a line under it. The button will turn green when the download is complete. You can also see the progress of the download by looking at the bar under the playlist name.

      -

      Step 5: Enjoy the song anytime, anywhere

      -

      Once you have downloaded the playlist, you can enjoy If It's Lovin' That You Want anytime, anywhere. You can access your downloaded playlists from your library or from the home screen of Spotify. You can also see a green icon next to the songs that are downloaded. To listen to the song offline, you need to switch to offline mode in Spotify. To do that, go to settings and toggle on "Offline mode". This will prevent Spotify from using any internet connection and only play your downloaded songs.

      -

      Conclusion

      -

      Summary of the main points

      -

      In this article, we have shown you how to download Rihanna's If It's Lovin' That You Want MP3 in two easy ways. You can use YouTube or Spotify to get the MP3 version of the song and listen to it offline. Both methods are simple and fast, but they have some differences. YouTube is free but requires a third-party website to convert the video to MP3. Spotify is not free but offers high-quality sound and other features like podcasts and playlists.

      -

      Call to action and final thoughts

      -

      Now that you know how to download If It's Lovin' That You Want MP3, what are you waiting for? Go ahead and try one of these methods and enjoy this catchy song by Rihanna. You can also check out her other songs and albums on YouTube or Spotify and discover more of her music. Rihanna is one of the most popular and talented singers in the world, and her songs are always worth listening to.

      -

      If you liked this article, please share it with your friends and leave us a comment below. We would love to hear your feedback and suggestions for future articles. Thank you for reading!

      -

      Frequently Asked Questions

      -

      Here are some common questions that people have about downloading If It's Lovin' That You Want MP3:

      -
        -
      1. Is it legal to download If It's Lovin' That You Want MP3 from YouTube or Spotify?
      2. -

        It depends on your country's laws and regulations regarding copyright infringement and fair use. Generally speaking, downloading music from YouTube or Spotify for personal use is not illegal, as long as you do not distribute, sell, or profit from it. However, downloading music from these platforms may violate their terms of service, which could result in your account being suspended or terminated. Therefore, we advise you to use these methods at your own risk and discretion.

        -
      3. What are some other ways to download If It's Lovin' That You Want MP3?
      4. -

        There are many other ways to download If It's Lovin' That You Want MP3, such as using other music streaming services, online music stores, torrent sites, or file-sharing apps. However, some of these methods may not be safe, legal, or reliable. They may also have poor sound quality, malware, or viruses. Therefore, we recommend you to use the methods we have shown you in this article, as they are the easiest and safest ways to download If It's Lovin' That You Want MP3.

        -
      5. How can I play If It's Lovin' That You Want MP3 on my device?
      6. -

        Once you have downloaded If It's Lovin' That You Want MP3 to your device, you can play it using any media player that supports MP3 format. For example, you can use Windows Media Player, iTunes, VLC, or Groove Music on your computer. You can also use the default music player on your smartphone or tablet. Alternatively, you can transfer the MP3 file to a USB drive, SD card, or CD and play it on any compatible device.

        -
      7. How can I edit If It's Lovin' That You Want MP3?
      8. -

        If you want to edit If It's Lovin' That You Want MP3, such as cutting, trimming, merging, or adding effects to it, you need to use an audio editing software. There are many audio editing software that you can download for free or for a fee, such as Audacity, WavePad, Adobe Audition, or GarageBand. You can use these software to open the MP3 file and make any changes you want. Then, you can save the edited file as a new MP3 file or in another format.

        -
      9. How can I make a ringtone from If It's Lovin' That You Want MP3?
      10. -

        If you want to make a ringtone from If It's Lovin' That You Want MP3, you need to use a ringtone maker app or website. There are many ringtone maker apps and websites that you can download or access for free or for a fee, such as Zedge, Ringtone Maker, Audiko, or Online Audio Cutter. You can use these apps or websites to upload the MP3 file and select the part of the song that you want as your ringtone. Then, you can download the ringtone file and set it as your phone's ringtone.

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Enjoy Blockman Go with Unlimited Money and Gcubes MOD APK 2023.md b/spaces/fatiXbelha/sd/Enjoy Blockman Go with Unlimited Money and Gcubes MOD APK 2023.md deleted file mode 100644 index 14ca6b4e5fc69ce342a860ec49906628cd774fc1..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Enjoy Blockman Go with Unlimited Money and Gcubes MOD APK 2023.md +++ /dev/null @@ -1,134 +0,0 @@ - -

        Blockman Go Mod Apk Unlimited Money and Gcubes 2023: A Guide for Gamers

        -

        If you are a fan of block style games, you might have heard of Blockman Go, a free app that offers various minigames, a social platform, and a sandbox game. But did you know that there is a way to get unlimited money and gcubes in Blockman Go? Yes, you heard it right. With Blockman Go Mod Apk, you can enjoy all the features and benefits of the game without spending a dime. In this article, we will tell you everything you need to know about Blockman Go Mod Apk, including what it is, how to download and install it, what are its features and benefits, and what are some tips and tricks to play it. So, without further ado, let's get started.

        -

        blockman go mod apk unlimited money and gcubes 2023


        Download Filehttps://urllie.com/2uNIPe



        -

        What is Blockman Go?

        -

        Blockman Go is a free app that was released by Blockman GO Studio in 2017. It is a game that provides a mobile gaming platform where you can play various block style minigames with other players from all over the world. Some of the popular minigames include Bed Wars, Egg War, Sky War, Survival Games, Build Battle, etc. You can join any game by a simple tap and have fun with your friends or strangers.

        -

        Blockman Go is also a social platform where you can chat and make friends with other players in the game. You can customize your avatar with different clothes and accessories, and show off your unique style. You can also join communities and participate in events. You can earn gold by playing minigames, which you can use to buy more items for your avatar.

        -

        Blockman Go is also a sandbox game where you can create and share your own experiences with other players. You can use blocks to build anything you want, from houses to castles, from islands to cities. You can also explore different virtual worlds created by other players or by the game itself. You can unleash your creativity and imagination in Blockman Go.

        -

        What is Blockman Go Mod Apk?

        -

        Blockman Go Mod Apk is a modified version of the original game that gives you unlimited money and gcubes in Blockman Go. Money and gcubes are the two main currencies in the game that you need to buy items, unlock minigames, upgrade your avatar, etc. However, earning money and gcubes in the game can be time-consuming and tedious. That's why some players look for ways to get them for free.

        -

        blockman go hack apk unlimited gcubes and money 2023
        -blockman go modded apk with infinite money and gcubes 2023
        -blockman go cheat apk unlimited gcubes and money 2023
        -blockman go premium apk free money and gcubes 2023
        -blockman go cracked apk unlimited money and gcubes 2023
        -blockman go mod apk download unlimited gcubes and money 2023
        -blockman go hack mod apk free money and gcubes 2023
        -blockman go modded apk download infinite money and gcubes 2023
        -blockman go cheat mod apk free gcubes and money 2023
        -blockman go premium mod apk unlimited money and gcubes 2023
        -blockman go cracked mod apk free gcubes and money 2023
        -blockman go unlimited money and gcubes mod apk 2023
        -blockman go infinite money and gcubes modded apk 2023
        -blockman go free money and gcubes cheat apk 2023
        -blockman go unlimited money and gcubes premium apk 2023
        -blockman go free money and gcubes cracked apk 2023
        -blockman go unlimited money and gcubes mod apk download 2023
        -blockman go infinite money and gcubes modded apk download 2023
        -blockman go free money and gcubes cheat apk download 2023
        -blockman go unlimited money and gcubes premium apk download 2023
        -blockman go free money and gcubes cracked apk download 2023
        -download blockman go mod apk unlimited money and gcubes 2023
        -download blockman go modded apk infinite money and gcubes 2023
        -download blockman go cheat apk free money and gcubes 2023
        -download blockman go premium apk unlimited money and gcubes 2023
        -download blockman go cracked apk free money and gcubes 2023
        -download blockman go hack mod apk unlimited gcubes and money 2023
        -download blockman go modded hack apk infinite money and gcubes 2023
        -download blockman go cheat hack apk free money and gcubes 2023
        -download blockman go premium hack apk unlimited money and gcubes 2023
        -download blockman go cracked hack apk free money and gcubes 2023
        -how to get unlimited money and gcubes in blockman go mod apk 2023
        -how to get infinite money and gcubes in blockman go modded apk 2023
        -how to get free money and gcubes in blockman go cheat apk 2023
        -how to get unlimited money and gcubes in blockman go premium apk 2023
        -how to get free money and gcubes in blockman go cracked apk 2023
        -how to install blockman go mod apk unlimited money and gcubes 2023
        -how to install blockman go modded apk infinite money and gcubes 2023
        -how to install blockman go cheat apk free money and gcubes 2023
        -how to install blockman go premium apk unlimited money and gcubes 2023
        -how to install blockman go cracked apk free money and gcubes 2023
        -best site to download blockman go mod apk unlimited money and gcubes 2023
        -best site to download blockman go modded apk infinite money and gcubes 2023
        -best site to download blockman go cheat apk free money and gcubes 2023
        -best site to download blockman go premium apk unlimited money and gcubes 2023
        -best site to download blockman go cracked apk free money and gcubes 2023
        -latest version of blockman go mod apk unlimited money and gcubes 2023
        -latest version of blockman go modded apk infinite money and gcubes 2023
        -latest version of blockman go cheat apk free money and gcubes 2023

        -

        With Blockman Go Mod Apk, you don't

        With Blockman Go Mod Apk, you don't have to worry about running out of money and gcubes. You can get unlimited amounts of them by using the mod apk file. You can use them to buy anything you want in the game, such as clothes, accessories, weapons, vehicles, pets, etc. You can also unlock all the minigames and virtual worlds in the game, and enjoy them without any restrictions. You can also access the premium features of the game, such as VIP membership, exclusive items, etc.

        -

        Blockman Go Mod Apk is a way to enjoy the game without spending any real money. It is a way to enhance your gaming experience and have more fun with Blockman Go. However, it is also a way to cheat and hack the game, which may not be fair to other players and may violate the terms and conditions of the game. Therefore, you should use Blockman Go Mod Apk at your own risk and responsibility.

        -

        How to Download and Install Blockman Go Mod Apk?

        -

        If you want to try out Blockman Go Mod Apk, you need to download and install it on your device. Here are the steps to do so:

        -
          -
        1. First, you need to find a reliable source to download the mod apk file. There are many websites that offer mod apk files for various games, but not all of them are safe and trustworthy. You should do some research and check the reviews and ratings of the websites before downloading anything from them. You can also use a VPN service to protect your privacy and security.
        2. -
        3. Second, you need to enable the unknown sources option on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and turn it on. You may also need to disable any antivirus or firewall software that may interfere with the installation process.
        4. -
        5. Third, you need to locate the downloaded mod apk file on your device. You can use a file manager app to find it in your downloads folder or wherever you saved it. Then, tap on the file and follow the instructions to install it on your device. It may take a few minutes for the installation to complete.
        6. -
        7. Fourth, you need to launch the game and enjoy the unlimited money and gcubes in Blockman Go. You can check your balance in the game menu and see how much money and gcubes you have. You can also explore all the features and benefits of Blockman Go Mod Apk.
        8. -
        -

        Before installing Blockman Go Mod Apk, you should take some precautions to avoid any problems or issues. For example, you should backup your original game data in case something goes wrong or you want to switch back to the original game. You should also uninstall any previous versions of Blockman Go before installing the mod apk file. You should also be careful not to update the game from the Google Play Store or any other sources, as this may overwrite the mod apk file and remove its features.

        -

        What are the Features and Benefits of Blockman Go Mod Apk?

        -

        Blockman Go Mod Apk has many features and benefits that make it different from the original game. Here are some of them:

        -
          -
        • Unlimited money and gcubes: This is the main feature of Blockman Go Mod Apk that allows you to get unlimited amounts of money and gcubes in Blockman Go. You can use them to buy anything you want in the game, such as clothes, accessories, weapons, vehicles, pets, etc. You can also unlock all the minigames and virtual worlds in the game, and enjoy them without any restrictions.
        • -
        • Access to all minigames and virtual worlds: Blockman Go has many minigames and virtual worlds that you can play with other players from all over the world. Some of them are free to play, while others require money or gcubes to unlock. With Blockman Go Mod Apk, you don't have to worry about that. You can access all the minigames and virtual worlds in the game for free.
        • -
        • No ads and no root required: Blockman Go Mod Apk does not have any annoying ads that may interrupt your gaming experience. It also does not require root access on your device, which means you don't have to modify your device's system settings or risk damaging it.
        • -
        -

        What are some Tips and Tricks to Play Blockman Go Mod Apk?

        -

        Blockman Go Mod Apk is a fun and exciting game that you can play with your friends or strangers online. However, if you want to improve your skills and have more fun with Blockman Go Mod Apk, here are some tips and

        Blockman Go Mod Apk is a fun and exciting game that you can play with your friends or strangers online. However, if you want to improve your skills and have more fun with Blockman Go Mod Apk, here are some tips and tricks that you can follow:

        -
          -
        • How to improve your PvP skills in Bed Wars, Egg War, Sky War, etc.: These are some of the most popular minigames in Blockman Go that involve player versus player combat. To win these games, you need to have good strategy, teamwork, and combat skills. Here are some tips to help you improve your PvP skills:
            -
          • Choose your weapons and items wisely. Different weapons and items have different advantages and disadvantages. For example, swords are good for close-range combat, bows are good for long-range combat, shields are good for defense, etc. You should also use items that can help you in different situations, such as blocks, ladders, TNT, etc.
          • -
          • Use the environment to your advantage. You can use the terrain, structures, and objects in the map to hide, ambush, or escape from your enemies. You can also use them to create traps or obstacles for your enemies. For example, you can place TNT near a bridge or a tower to blow it up when your enemies cross it.
          • -
          • Communicate and cooperate with your teammates. You can use the chat feature or voice chat feature in Blockman Go to communicate with your teammates. You can share information, plan strategies, coordinate attacks, or ask for help. You can also use the team color feature to identify your teammates and enemies easily.
          • -
          -
        • -
        • How to customize your avatar and show off your style: Blockman Go allows you to customize your avatar with different clothes and accessories that you can buy with money or gcubes. You can also use the skin editor feature to create your own skin or import a skin from other sources. Here are some tips to help you customize your avatar and show off your style:
            -
          • Choose clothes and accessories that suit your personality and preferences. You can choose from different categories, such as casual, formal, cute, cool, etc. You can also mix and match different items to create your own unique look.
          • -
          • Use the color picker feature to change the color of your clothes and accessories. You can choose from different shades and hues of colors, or use the custom color feature to create your own color.
          • -
          • Use the preview feature to see how your avatar looks before buying or applying any items. You can also use the zoom feature to see the details of your avatar.
          • -
          -
        • -
        • How to chat and make friends with other players: Blockman Go is not only a game, but also a social platform where you can chat and make friends with other players from all over the world. You can use the chat feature or voice chat feature in Blockman Go to talk to other players in the game. You can also use the friend feature or community feature to add other players as friends or join communities. Here are some tips to help you chat and make friends with other players:
            -
          • Be polite and respectful to other players. Don't use any offensive or inappropriate language or behavior that may hurt or annoy other players. Don't spam or advertise in the chat. Don't cheat or hack in the game.
          • -
          • Be friendly and helpful to other players. Compliment other players on their skills or appearance. Offer help or advice to new or struggling players. Share tips or tricks with other players.
          • -
          • Be active and sociable in the game. Join different minigames and virtual worlds that interest you. Participate in events and activities that are organized by the game or by the communities. Chat with other players about common topics or interests.
          • -
          -
        • -
        -

        Conclusion

        -

        Blockman Go is a free app that offers various block style minigames, a social platform, and a sandbox game for mobile gamers. Blockman Go Mod Apk is a modified version of the original game that gives you unlimited money and gcubes in Blockman Go. With Blockman Go Mod Apk, you can enjoy all the features and benefits of the game without spending any real money.

        -

        If you want to try out Blockman Go Mod Apk, you need to download and install it on your device following the steps we mentioned above. However, you should also be aware of the risks and responsibilities of using Blockman Go Mod Apk, as it may not be fair to other players and may violate the terms and conditions of the game.

        -

        We hope this article has helped you learn more

        We hope this article has helped you learn more about Blockman Go Mod Apk and how to use it. If you are interested in playing Blockman Go with unlimited money and gcubes, you can download and install Blockman Go Mod Apk from the link below. However, remember to use it at your own risk and responsibility, and respect other players and the game itself.

        -

        Thank you for reading this article. We hope you have a great time playing Blockman Go Mod Apk. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you.

        -

        FAQs

        -

        Here are some frequently asked questions about Blockman Go Mod Apk that you may find useful:

        - - - - - - - - - - - - - - - - - - - - - - - - - -
        QuestionAnswer
        Is Blockman Go Mod Apk safe to use?Blockman Go Mod Apk is not an official version of the game, and it may contain viruses or malware that can harm your device or steal your personal information. Therefore, you should only download Blockman Go Mod Apk from a reliable source, and scan it with an antivirus software before installing it. You should also backup your original game data and device data before using Blockman Go Mod Apk.
        Is Blockman Go Mod Apk legal to use?Blockman Go Mod Apk is not legal to use, as it violates the terms and conditions of the game and the intellectual property rights of the game developers. Using Blockman Go Mod Apk may result in your account being banned or suspended, or legal actions being taken against you. Therefore, you should use Blockman Go Mod Apk at your own risk and responsibility.
        Does Blockman Go Mod Apk work on all devices?Blockman Go Mod Apk may not work on all devices, as it may depend on the device model, operating system, compatibility, etc. Therefore, you should check the requirements and specifications of Blockman Go Mod Apk before downloading and installing it. You should also update your device software and drivers to ensure optimal performance.
        How to update Blockman Go Mod Apk?Blockman Go Mod Apk may not be updated automatically, as it is not connected to the Google Play Store or any other sources. Therefore, you should check for updates manually from the website where you downloaded Blockman Go Mod Apk. However, you should be careful not to update the game from the Google Play Store or any other sources, as this may overwrite the mod apk file and remove its features.
        How to uninstall Blockman Go Mod Apk?If you want to uninstall Blockman Go Mod Apk, you can do so by following the same steps as uninstalling any other app on your device. You can go to your device settings, then apps, then Blockman Go, then uninstall. You can also delete the mod apk file from your device storage. However, you should make sure to restore your original game data and device data before uninstalling Blockman Go Mod Apk.

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/fb700/chatglm-fitness-RLHF/speaker_encoder/data_objects/speaker_batch.py b/spaces/fb700/chatglm-fitness-RLHF/speaker_encoder/data_objects/speaker_batch.py deleted file mode 100644 index 4485605e3ece5b491d1e7d0f223c543b6c91eb96..0000000000000000000000000000000000000000 --- a/spaces/fb700/chatglm-fitness-RLHF/speaker_encoder/data_objects/speaker_batch.py +++ /dev/null @@ -1,12 +0,0 @@ -import numpy as np -from typing import List -from speaker_encoder.data_objects.speaker import Speaker - -class SpeakerBatch: - def __init__(self, speakers: List[Speaker], utterances_per_speaker: int, n_frames: int): - self.speakers = speakers - self.partials = {s: s.random_partial(utterances_per_speaker, n_frames) for s in speakers} - - # Array of shape (n_speakers * n_utterances, n_frames, mel_n), e.g. for 3 speakers with - # 4 utterances each of 160 frames of 40 mel coefficients: (12, 160, 40) - self.data = np.array([frames for s in speakers for _, frames, _ in self.partials[s]]) diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/inherits/inherits.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/inherits/inherits.js deleted file mode 100644 index f71f2d93294a67ad5d9300aae07973e259f26068..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/inherits/inherits.js +++ /dev/null @@ -1,9 +0,0 @@ -try { - var util = require('util'); - /* istanbul ignore next */ - if (typeof util.inherits !== 'function') throw ''; - module.exports = util.inherits; -} catch (e) { - /* istanbul ignore next */ - module.exports = require('./inherits_browser.js'); -} diff --git "a/spaces/fkhuggingme/gpt-academic/crazy_functions/\350\257\242\351\227\256\345\244\232\344\270\252\345\244\247\350\257\255\350\250\200\346\250\241\345\236\213.py" "b/spaces/fkhuggingme/gpt-academic/crazy_functions/\350\257\242\351\227\256\345\244\232\344\270\252\345\244\247\350\257\255\350\250\200\346\250\241\345\236\213.py" deleted file mode 100644 index 5cf239e1d44ace85e1a32561513cfd37ee3255d9..0000000000000000000000000000000000000000 --- "a/spaces/fkhuggingme/gpt-academic/crazy_functions/\350\257\242\351\227\256\345\244\232\344\270\252\345\244\247\350\257\255\350\250\200\346\250\241\345\236\213.py" +++ /dev/null @@ -1,59 +0,0 @@ -from toolbox import CatchException, update_ui -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -import datetime -@CatchException -def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,如温度和top_p等,一般原样传递下去就行 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - history = [] # 清空历史,以免输入溢出 - chatbot.append((txt, "正在同时咨询gpt-3.5和gpt-4……")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - - # llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 - llm_kwargs['llm_model'] = 'gpt-3.5-turbo&gpt-4' # 支持任意数量的llm接口,用&符号分隔 - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=txt, inputs_show_user=txt, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, - sys_prompt=system_prompt, - retry_times_at_unknown_error=0 - ) - - history.append(txt) - history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - - -@CatchException -def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,如温度和top_p等,一般原样传递下去就行 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - history = [] # 清空历史,以免输入溢出 - chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - - # llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 - llm_kwargs['llm_model'] = plugin_kwargs.get("advanced_arg", 'chatglm&gpt-3.5-turbo') # 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=txt, inputs_show_user=txt, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, - sys_prompt=system_prompt, - retry_times_at_unknown_error=0 - ) - - history.append(txt) - history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 \ No newline at end of file diff --git a/spaces/foduucom/CandleStickScan-Stock-trading-yolov8/README.md b/spaces/foduucom/CandleStickScan-Stock-trading-yolov8/README.md deleted file mode 100644 index 2e6f1dde31eea1859b06e4d61d647e42f827327a..0000000000000000000000000000000000000000 --- a/spaces/foduucom/CandleStickScan-Stock-trading-yolov8/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: CandleStickScan Stock Trading Yolov8 -emoji: 📈 -colorFrom: red -colorTo: green -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/geetu040/video-gen/README.md b/spaces/geetu040/video-gen/README.md deleted file mode 100644 index d28bbb3b0d585bc68cf644397b316c6b351f3442..0000000000000000000000000000000000000000 --- a/spaces/geetu040/video-gen/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Video Gen -emoji: ⚡ -colorFrom: indigo -colorTo: indigo -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/glitch0011/MendoBERT_NER/README.md b/spaces/glitch0011/MendoBERT_NER/README.md deleted file mode 100644 index 8fb33d3dca56caed3379b5b22b947e772922d60a..0000000000000000000000000000000000000000 --- a/spaces/glitch0011/MendoBERT_NER/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Ner App -emoji: 🏢 -colorFrom: indigo -colorTo: pink -sdk: streamlit -sdk_version: 1.19.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/gorkemgoknar/gptChatYourTTS/README.md b/spaces/gorkemgoknar/gptChatYourTTS/README.md deleted file mode 100644 index 5fc5996be64134b55c20050403a33a3486d7029c..0000000000000000000000000000000000000000 --- a/spaces/gorkemgoknar/gptChatYourTTS/README.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Moviechatbot - GPT chatbot with Coqui YourTTS -emoji: 🐸 -colorFrom: blue -colorTo: indigo -sdk: gradio -app_file: app.py -pinned: false -sdk_version: 3.1.1 -duplicated_from: gorkemgoknar/movie_chat_gpt_yourtts ---- - -# Configuration - -`title`: _string_ -Poc with no model first - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/gradio/autocomplete/run.py b/spaces/gradio/autocomplete/run.py deleted file mode 100644 index 579d22faaecddc12ed3062c42104b9e16d1e8ce6..0000000000000000000000000000000000000000 --- a/spaces/gradio/autocomplete/run.py +++ /dev/null @@ -1,21 +0,0 @@ -import gradio as gr -import os - -# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting -auth_token = os.getenv("auth_token") - -# load a model from https://hf.co/models as an interface, then use it as an api -# you can remove the api_key parameter if you don't care about rate limiting. -api = gr.load("huggingface/gpt2-xl", hf_token=auth_token) - -def complete_with_gpt(text): - return text[:-50] + api(text[-50:]) - -with gr.Blocks() as demo: - textbox = gr.Textbox(placeholder="Type here...", lines=4) - btn = gr.Button("Autocomplete") - - # define what will run when the button is clicked, here the textbox is used as both an input and an output - btn.click(fn=complete_with_gpt, inputs=textbox, outputs=textbox, queue=False) - -demo.launch() \ No newline at end of file diff --git a/spaces/gradio/video_component/README.md b/spaces/gradio/video_component/README.md deleted file mode 100644 index a1253ccf23ffe230ece0e0f0cdb9f40772376a7f..0000000000000000000000000000000000000000 --- a/spaces/gradio/video_component/README.md +++ /dev/null @@ -1,12 +0,0 @@ - ---- -title: video_component -emoji: 🔥 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 4.1.2 -app_file: run.py -pinned: false -hf_oauth: true ---- diff --git a/spaces/grosenthal/aineid/src/aineid/src/ColorModeSwitcher.tsx b/spaces/grosenthal/aineid/src/aineid/src/ColorModeSwitcher.tsx deleted file mode 100644 index 60bd7fa89f1cd7c36dcccacd86b7da38ed746f85..0000000000000000000000000000000000000000 --- a/spaces/grosenthal/aineid/src/aineid/src/ColorModeSwitcher.tsx +++ /dev/null @@ -1,30 +0,0 @@ -import * as React from "react" -import { - useColorMode, - useColorModeValue, - IconButton, - IconButtonProps, -} from "@chakra-ui/react" -import { FaMoon, FaSun } from "react-icons/fa" - -type ColorModeSwitcherProps = Omit - -export const ColorModeSwitcher: React.FC = (props) => { - const { toggleColorMode } = useColorMode() - const text = useColorModeValue("light", "dark") - const SwitchIcon = useColorModeValue(FaMoon, FaSun) - - return ( - } - aria-label={`Switch to ${text} mode`} - {...props} - /> - ) -} diff --git a/spaces/guney/photo-with-code/app.py b/spaces/guney/photo-with-code/app.py deleted file mode 100644 index 4e54d22bfda2e0cc14323360b9284ceabb58955d..0000000000000000000000000000000000000000 --- a/spaces/guney/photo-with-code/app.py +++ /dev/null @@ -1,38 +0,0 @@ -import gradio as gr -from photowithcode import * - -def type2im(type): - path = f'code_images/{type}.png' - return cv2.imread(path, cv2.IMREAD_COLOR) - -def imgproc_1(photo, type, mirror_code): - print(type) - code = type2im(type) - out = photowithcode_proc(photo, code, True, mirror_code) - return out - -def imgproc_2(photo, code, dim_background, mirror_code): - out = photowithcode_proc(photo, code, dim_background, mirror_code) - return out - -with gr.Blocks() as demo: - gr.Markdown('Overlay your code on to you photo with this demo.') - with gr.Tab('Use existing code'): - with gr.Row(): - with gr.Column(): - input_1 = [gr.Image(), gr.Radio(['Rust', 'C++', 'Python'], value='Rust', label='Choose your programming language'), gr.Checkbox(label='Mirror the code?')] - with gr.Column(): - output_1 = gr.Image() - button_1 = gr.Button('Submit') - with gr.Tab('Use your own code'): - with gr.Row(): - with gr.Column(): - input_2 = [gr.Image(), gr.Image(), gr.Checkbox(label='Dim the code background?'), gr.Checkbox(label='Mirror the code?')] - with gr.Column(): - output_2 = gr.Image() - button_2 = gr.Button('Submit') - - button_1.click(imgproc_1, inputs=input_1, outputs=output_1) - button_2.click(imgproc_2, inputs=input_2, outputs=output_2) - -demo.launch() \ No newline at end of file diff --git a/spaces/gwang-kim/DATID-3D/pose_estimation/nvdiffrast/samples/tensorflow/__init__.py b/spaces/gwang-kim/DATID-3D/pose_estimation/nvdiffrast/samples/tensorflow/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/hackathon-pln-es/sonnet-poetry-generator-spanish/app.py b/spaces/hackathon-pln-es/sonnet-poetry-generator-spanish/app.py deleted file mode 100644 index 86faab38c7793c8af9786f264cdf5f264b344820..0000000000000000000000000000000000000000 --- a/spaces/hackathon-pln-es/sonnet-poetry-generator-spanish/app.py +++ /dev/null @@ -1,58 +0,0 @@ -import gradio as gr -from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline - -pretrained_model = "datificate/gpt2-small-spanish" -tokenizer = AutoTokenizer.from_pretrained(pretrained_model, use_fast=True) -sonnets_tuned_model = 'hackathon-pln-es/gpt2-small-spanish-disco-poetry' -sonnets_pipe = pipeline('text2text-generation', model=sonnets_tuned_model, tokenizer=tokenizer) - -def make_new_sonnet(prompt, max_lenght): - ouputs = sonnets_pipe(prompt, max_length=max_lenght, - num_beams=5, - early_stopping=True, - repetition_penalty=20.0, - num_return_sequences=1) - return ouputs[0]['generated_text'] - - -description = """ -

        Este experimento pretende poner a prueba la capacidad de un modelo de inteligencia artificial para generar texto a partir de patrones observados en la hechicería del lenguaje poético en español. -Algo fascinante de lo que Borges habla en una conferencia de su Arte Poética: "Uso la palabra «modelo» porque las metáforas que voy a citar, aunque parezcan muy distintas a la imaginación, para un lógico serían casi idénticas. Así que podríamos hablar de ellas como ecuaciones." -

        -Contacto: Jorge Henao 🇨🇴 Twitter LinkedIn -

        -""" -article = """ -

        -

        ¿Por qué sonetos?

        -Este espacio usa un modelo gpt-2 entrenado con un corpus de sonetos del siglo 15 al 20 en español (DISCO), el cual que fue portado al formato de Datasets de Hugging Face. La elección de los sonetos obedece a una razón fundamental: su estilo y métrica específica, buscando entre otras cosas, tener consistencia en la rima y coherencia en el texto generado. -

        -

        -

        Sobre el impacto de la inteligencia artificial en el arte

        -La poesía, esa cosa liviana, alada y sagrada como expresión artística ha acompañado la humanidad desde tiempos inmemoriales. Desde la oralidad, hasta la escritura. Lo que puede significar, que la inteligencia artificial se involucre en una expresión artística que raya en lo espiritual, la reflexión y la contemplación de lo bello, sobrepasa el alcance de este experimento. ¿Cuál es el impacto del arte generado con inteligencia artificial?, ¿cómo se puede medir ese impacto?, éstas son algunas de las preguntas a lugar. -
        Por otro lado, está la simbiosis humano-máquina a la hora de co-crear en conjunto. Las posibilidades mostradas en este experimento, también ilustran un escenario donde la inteligencia artificial aumenta y/o complementa la creatividad de un escritor a la hora del acto creativo. Contribuyendo así, a que hayan más creaciones bellas en el mundo que generan bienestar en las personas (3 objetivo ODS). -La discusión está abierta y este espacio pretende contribuir con esa reflexión. -

        - -

        👉 Don't miss this other cool space based on different styles of poetry in spanish: poem-generation-es 👈

        -""" -examples = [ - ['al despertar el dinosaurio todavía estaba ahí', 20 ], - ['La inmensidad de tu imaginación', 140], - ['con ansias de amores inflamada', 140] -] - -iface = gr.Interface(fn=make_new_sonnet, - title= "Generador de poesía basada en sonetos en español", - description = description, - inputs=[ - gr.inputs.Textbox(lines=2, placeholder="Escrbe algo para comenzar", label='Escribe algo para comenzar'), - gr.inputs.Slider(minimum = 20, maximum = 200, default = 140, step = 5, label='Salida máxima de caracteres')], - outputs=[ - gr.outputs.Textbox(label="Tu poema"), - ], - article= article, - examples = examples, - theme = 'peach' - ) -iface.launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/hackathon-somos-nlp-2023/SalpiBloomZ-1b7-v1/README.md b/spaces/hackathon-somos-nlp-2023/SalpiBloomZ-1b7-v1/README.md deleted file mode 100644 index 84fd4f0a3387d5497ba779ca4872d199d5c53c9c..0000000000000000000000000000000000000000 --- a/spaces/hackathon-somos-nlp-2023/SalpiBloomZ-1b7-v1/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: SalpiBloomZ 1b7 V1 -emoji: 🐢 -colorFrom: indigo -colorTo: green -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false ---- - -## Motivación - -El presente trabajo se basa en la idea de entrenar modelos de procesamiento natural del lenguaje usando LLMs en base a datasets especializados. - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/hands012/gpt-academic/request_llm/edge_gpt.py b/spaces/hands012/gpt-academic/request_llm/edge_gpt.py deleted file mode 100644 index bbf84000d84a42de80d3c051a24f06336af76aaf..0000000000000000000000000000000000000000 --- a/spaces/hands012/gpt-academic/request_llm/edge_gpt.py +++ /dev/null @@ -1,409 +0,0 @@ -""" -======================================================================== -第一部分:来自EdgeGPT.py -https://github.com/acheong08/EdgeGPT -======================================================================== -""" - -import argparse -import asyncio -import json -import os -import random -import re -import ssl -import sys -import uuid -from enum import Enum -from typing import Generator -from typing import Literal -from typing import Optional -from typing import Union -import websockets.client as websockets - -DELIMITER = "\x1e" - - -# Generate random IP between range 13.104.0.0/14 -FORWARDED_IP = ( - f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}" -) - -HEADERS = { - "accept": "application/json", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/json", - "sec-ch-ua": '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"', - "sec-ch-ua-arch": '"x86"', - "sec-ch-ua-bitness": '"64"', - "sec-ch-ua-full-version": '"109.0.1518.78"', - "sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-model": "", - "sec-ch-ua-platform": '"Windows"', - "sec-ch-ua-platform-version": '"15.0.0"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "x-ms-client-request-id": str(uuid.uuid4()), - "x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32", - "Referer": "https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx", - "Referrer-Policy": "origin-when-cross-origin", - "x-forwarded-for": FORWARDED_IP, -} - -HEADERS_INIT_CONVER = { - "authority": "edgeservices.bing.com", - "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7", - "accept-language": "en-US,en;q=0.9", - "cache-control": "max-age=0", - "sec-ch-ua": '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"', - "sec-ch-ua-arch": '"x86"', - "sec-ch-ua-bitness": '"64"', - "sec-ch-ua-full-version": '"110.0.1587.69"', - "sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-model": '""', - "sec-ch-ua-platform": '"Windows"', - "sec-ch-ua-platform-version": '"15.0.0"', - "sec-fetch-dest": "document", - "sec-fetch-mode": "navigate", - "sec-fetch-site": "none", - "sec-fetch-user": "?1", - "upgrade-insecure-requests": "1", - "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69", - "x-edge-shopping-flag": "1", - "x-forwarded-for": FORWARDED_IP, -} - -def get_ssl_context(): - import certifi - ssl_context = ssl.create_default_context() - ssl_context.load_verify_locations(certifi.where()) - return ssl_context - - - -class NotAllowedToAccess(Exception): - pass - - -class ConversationStyle(Enum): - creative = "h3imaginative,clgalileo,gencontentv3" - balanced = "galileo" - precise = "h3precise,clgalileo" - - -CONVERSATION_STYLE_TYPE = Optional[ - Union[ConversationStyle, Literal["creative", "balanced", "precise"]] -] - - -def _append_identifier(msg: dict) -> str: - """ - Appends special character to end of message to identify end of message - """ - # Convert dict to json string - return json.dumps(msg) + DELIMITER - - -def _get_ran_hex(length: int = 32) -> str: - """ - Returns random hex string - """ - return "".join(random.choice("0123456789abcdef") for _ in range(length)) - - -class _ChatHubRequest: - """ - Request object for ChatHub - """ - - def __init__( - self, - conversation_signature: str, - client_id: str, - conversation_id: str, - invocation_id: int = 0, - ) -> None: - self.struct: dict = {} - - self.client_id: str = client_id - self.conversation_id: str = conversation_id - self.conversation_signature: str = conversation_signature - self.invocation_id: int = invocation_id - - def update( - self, - prompt, - conversation_style, - options, - ) -> None: - """ - Updates request object - """ - if options is None: - options = [ - "deepleo", - "enable_debug_commands", - "disable_emoji_spoken_text", - "enablemm", - ] - if conversation_style: - if not isinstance(conversation_style, ConversationStyle): - conversation_style = getattr(ConversationStyle, conversation_style) - options = [ - "nlu_direct_response_filter", - "deepleo", - "disable_emoji_spoken_text", - "responsible_ai_policy_235", - "enablemm", - conversation_style.value, - "dtappid", - "cricinfo", - "cricinfov2", - "dv3sugg", - ] - self.struct = { - "arguments": [ - { - "source": "cib", - "optionsSets": options, - "sliceIds": [ - "222dtappid", - "225cricinfo", - "224locals0", - ], - "traceId": _get_ran_hex(32), - "isStartOfSession": self.invocation_id == 0, - "message": { - "author": "user", - "inputMethod": "Keyboard", - "text": prompt, - "messageType": "Chat", - }, - "conversationSignature": self.conversation_signature, - "participant": { - "id": self.client_id, - }, - "conversationId": self.conversation_id, - }, - ], - "invocationId": str(self.invocation_id), - "target": "chat", - "type": 4, - } - self.invocation_id += 1 - - -class _Conversation: - """ - Conversation API - """ - - def __init__( - self, - cookies, - proxy, - ) -> None: - self.struct: dict = { - "conversationId": None, - "clientId": None, - "conversationSignature": None, - "result": {"value": "Success", "message": None}, - } - import httpx - self.proxy = proxy - proxy = ( - proxy - or os.environ.get("all_proxy") - or os.environ.get("ALL_PROXY") - or os.environ.get("https_proxy") - or os.environ.get("HTTPS_PROXY") - or None - ) - if proxy is not None and proxy.startswith("socks5h://"): - proxy = "socks5://" + proxy[len("socks5h://") :] - self.session = httpx.Client( - proxies=proxy, - timeout=30, - headers=HEADERS_INIT_CONVER, - ) - for cookie in cookies: - self.session.cookies.set(cookie["name"], cookie["value"]) - - # Send GET request - response = self.session.get( - url=os.environ.get("BING_PROXY_URL") - or "https://edgeservices.bing.com/edgesvc/turing/conversation/create", - ) - if response.status_code != 200: - response = self.session.get( - "https://edge.churchless.tech/edgesvc/turing/conversation/create", - ) - if response.status_code != 200: - print(f"Status code: {response.status_code}") - print(response.text) - print(response.url) - raise Exception("Authentication failed") - try: - self.struct = response.json() - except (json.decoder.JSONDecodeError, NotAllowedToAccess) as exc: - raise Exception( - "Authentication failed. You have not been accepted into the beta.", - ) from exc - if self.struct["result"]["value"] == "UnauthorizedRequest": - raise NotAllowedToAccess(self.struct["result"]["message"]) - - -class _ChatHub: - """ - Chat API - """ - - def __init__(self, conversation) -> None: - self.wss = None - self.request: _ChatHubRequest - self.loop: bool - self.task: asyncio.Task - print(conversation.struct) - self.request = _ChatHubRequest( - conversation_signature=conversation.struct["conversationSignature"], - client_id=conversation.struct["clientId"], - conversation_id=conversation.struct["conversationId"], - ) - - async def ask_stream( - self, - prompt: str, - wss_link: str, - conversation_style: CONVERSATION_STYLE_TYPE = None, - raw: bool = False, - options: dict = None, - ) -> Generator[str, None, None]: - """ - Ask a question to the bot - """ - if self.wss and not self.wss.closed: - await self.wss.close() - # Check if websocket is closed - self.wss = await websockets.connect( - wss_link, - extra_headers=HEADERS, - max_size=None, - ssl=get_ssl_context() - ) - await self._initial_handshake() - # Construct a ChatHub request - self.request.update( - prompt=prompt, - conversation_style=conversation_style, - options=options, - ) - # Send request - await self.wss.send(_append_identifier(self.request.struct)) - final = False - while not final: - objects = str(await self.wss.recv()).split(DELIMITER) - for obj in objects: - if obj is None or not obj: - continue - response = json.loads(obj) - if response.get("type") != 2 and raw: - yield False, response - elif response.get("type") == 1 and response["arguments"][0].get( - "messages", - ): - resp_txt = response["arguments"][0]["messages"][0]["adaptiveCards"][ - 0 - ]["body"][0].get("text") - yield False, resp_txt - elif response.get("type") == 2: - final = True - yield True, response - - async def _initial_handshake(self) -> None: - await self.wss.send(_append_identifier({"protocol": "json", "version": 1})) - await self.wss.recv() - - async def close(self) -> None: - """ - Close the connection - """ - if self.wss and not self.wss.closed: - await self.wss.close() - - -class NewbingChatbot: - """ - Combines everything to make it seamless - """ - - def __init__( - self, - cookies, - proxy - ) -> None: - if cookies is None: - cookies = {} - self.cookies = cookies - self.proxy = proxy - self.chat_hub: _ChatHub = _ChatHub( - _Conversation(self.cookies, self.proxy), - ) - - async def ask( - self, - prompt: str, - wss_link: str, - conversation_style: CONVERSATION_STYLE_TYPE = None, - options: dict = None, - ) -> dict: - """ - Ask a question to the bot - """ - async for final, response in self.chat_hub.ask_stream( - prompt=prompt, - conversation_style=conversation_style, - wss_link=wss_link, - options=options, - ): - if final: - return response - await self.chat_hub.wss.close() - return None - - async def ask_stream( - self, - prompt: str, - wss_link: str, - conversation_style: CONVERSATION_STYLE_TYPE = None, - raw: bool = False, - options: dict = None, - ) -> Generator[str, None, None]: - """ - Ask a question to the bot - """ - async for response in self.chat_hub.ask_stream( - prompt=prompt, - conversation_style=conversation_style, - wss_link=wss_link, - raw=raw, - options=options, - ): - yield response - - async def close(self) -> None: - """ - Close the connection - """ - await self.chat_hub.close() - - async def reset(self) -> None: - """ - Reset the conversation - """ - await self.close() - self.chat_hub = _ChatHub(_Conversation(self.cookies, self.proxy)) - - diff --git a/spaces/haseeb-heaven/AutoBard-Coder/bash_src/code_runner.sh b/spaces/haseeb-heaven/AutoBard-Coder/bash_src/code_runner.sh deleted file mode 100644 index ebfd35ea209ce7cf80217eb6965ccc46dfd4b8c9..0000000000000000000000000000000000000000 --- a/spaces/haseeb-heaven/AutoBard-Coder/bash_src/code_runner.sh +++ /dev/null @@ -1,113 +0,0 @@ -#!/bin/bash - -filename=$1 -debug=0 -cpp_version="c++17" - -if [[ $filename == *.c ]]; then - extension=".c" - compiler="gcc" - language="c" -elif [[ $filename == *.cpp ]]; then - extension=".cpp" - compiler="g++" - language="c++" -elif [[ $filename == *.java ]]; then - extension=".java" - compiler="javac" - language="java" -elif [[ $filename == *.go ]]; then - extension=".go" - compiler="go run" - language="go" -elif [[ $filename == *.cs ]]; then - extension=".cs" - compiler="csc" - language="csharp" -elif [[ $filename == *.swift ]]; then - extension=".swift" - compiler="swift" - language="swift" -# add for python -elif [[ $filename == *.py ]]; then - extension=".py" - compiler="python3" - language="python" -elif [[ $filename == *.js ]]; then - extension=".js" - compiler="node" - language="javascript" -elif [[ $filename == *.rs ]]; then - extension=".rs" - compiler="rustc" - language="rust" -else - echo "Error: Unsupported file type" - exit 1 -fi - -if [ $language == "c++" ]; then - if [[ $3 == c++* ]]; then - version=${3#c++} - if [[ $version == 17 || $version == 14 || $version == 11 || $version == 0x ]]; then - cpp_version="c++$version" - fi - fi -fi - -if [[ $2 == "--debug" ]]; then - debug=1 -fi - -if [ $debug -eq 1 ]; then - if [ $language == "c++" ]; then - echo "Compiling $filename with $compiler (C++ $cpp_version)..." - else - echo "Compiling $filename with $compiler..." - fi -fi - -if [ $language == "c" ]; then - $compiler $filename -o ${filename%.*} -elif [ $language == "c++" ]; then - $compiler $filename -std=$cpp_version -o ${filename%.*} -elif [ $language == "java" ]; then - $compiler $filename -elif [ $language == "go" ]; then - $compiler $filename -elif [ $language == "csharp" ]; then - $compiler /out:${filename%.*}.exe $filename -elif [ $language == "swift" ]; then - $compiler $filename -elif [ $language == "python" ]; then - $compiler $filename -elif [ $language == "javascript" ]; then - $compiler $filename -elif [ $language == "rust" ]; then - $compiler $filename -else - echo "Error: Unsupported file type" - exit 1 -fi - -if [ $? -ne 0 ]; then - echo "Compilation failed" - exit 1 -fi - -if [ $debug -eq 1 ]; then - echo "Running ${filename%.*}..." -fi - -if [ $language == "java" ]; then - java ${filename%.*} -elif [ $language == "go" ]; then - $compiler $filename -else - ./${filename%.*} -fi - -if [ $debug -eq 1 ]; then - echo "Finished running ${filename%.*}" -fi - diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/model_zoo/model_zoo.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/model_zoo/model_zoo.py deleted file mode 100644 index 68d0ce5dc442864474bb1086bf04d6e40708c190..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/model_zoo/model_zoo.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import os -import pkg_resources -import torch - -from detectron2.checkpoint import DetectionCheckpointer -from detectron2.config import get_cfg -from detectron2.modeling import build_model - - -class _ModelZooUrls(object): - """ - Mapping from names to officially released Detectron2 pre-trained models. - """ - - S3_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/" - - # format: {config_path.yaml} -> model_id/model_final_{commit}.pkl - CONFIG_PATH_TO_URL_SUFFIX = { - # COCO Detection with Faster R-CNN - "COCO-Detection/faster_rcnn_R_50_C4_1x.yaml": "137257644/model_final_721ade.pkl", - "COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml": "137847829/model_final_51d356.pkl", - "COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml": "137257794/model_final_b275ba.pkl", - "COCO-Detection/faster_rcnn_R_50_C4_3x.yaml": "137849393/model_final_f97cb7.pkl", - "COCO-Detection/faster_rcnn_R_50_DC5_3x.yaml": "137849425/model_final_68d202.pkl", - "COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml": "137849458/model_final_280758.pkl", - "COCO-Detection/faster_rcnn_R_101_C4_3x.yaml": "138204752/model_final_298dad.pkl", - "COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml": "138204841/model_final_3e0943.pkl", - "COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml": "137851257/model_final_f6e8b1.pkl", - "COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml": "139173657/model_final_68b088.pkl", - # COCO Detection with RetinaNet - "COCO-Detection/retinanet_R_50_FPN_1x.yaml": "137593951/model_final_b796dc.pkl", - "COCO-Detection/retinanet_R_50_FPN_3x.yaml": "137849486/model_final_4cafe0.pkl", - "COCO-Detection/retinanet_R_101_FPN_3x.yaml": "138363263/model_final_59f53c.pkl", - # COCO Detection with RPN and Fast R-CNN - "COCO-Detection/rpn_R_50_C4_1x.yaml": "137258005/model_final_450694.pkl", - "COCO-Detection/rpn_R_50_FPN_1x.yaml": "137258492/model_final_02ce48.pkl", - "COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml": "137635226/model_final_e5f7ce.pkl", - # COCO Instance Segmentation Baselines with Mask R-CNN - "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml": "137259246/model_final_9243eb.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml": "137260150/model_final_4f86c3.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml": "137260431/model_final_a54504.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml": "137849525/model_final_4ce675.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml": "137849551/model_final_84107b.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml": "137849600/model_final_f10217.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml": "138363239/model_final_a2914c.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x.yaml": "138363294/model_final_0464b7.pkl", - "COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml": "138205316/model_final_a3ec72.pkl", - "COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml": "139653917/model_final_2d9806.pkl", # noqa - # COCO Person Keypoint Detection Baselines with Keypoint R-CNN - "COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml": "137261548/model_final_04e291.pkl", - "COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml": "137849621/model_final_a6e10b.pkl", - "COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml": "138363331/model_final_997cc7.pkl", - "COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml": "139686956/model_final_5ad38f.pkl", - # COCO Panoptic Segmentation Baselines with Panoptic FPN - "COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml": "139514544/model_final_dbfeb4.pkl", - "COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml": "139514569/model_final_c10459.pkl", - "COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml": "139514519/model_final_cafdb1.pkl", - # LVIS Instance Segmentation Baselines with Mask R-CNN - "LVIS-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml": "144219072/model_final_571f7c.pkl", - "LVIS-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml": "144219035/model_final_824ab5.pkl", - "LVIS-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml": "144219108/model_final_5e3439.pkl", # noqa - # Cityscapes & Pascal VOC Baselines - "Cityscapes/mask_rcnn_R_50_FPN.yaml": "142423278/model_final_af9cf5.pkl", - "PascalVOC-Detection/faster_rcnn_R_50_C4.yaml": "142202221/model_final_b1acc2.pkl", - # Other Settings - "Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml": "138602867/model_final_65c703.pkl", - "Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5.yaml": "144998336/model_final_821d0b.pkl", - "Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml": "138602847/model_final_e9d89b.pkl", - "Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml": "144998488/model_final_480dd8.pkl", - "Misc/mask_rcnn_R_50_FPN_3x_syncbn.yaml": "169527823/model_final_3b3c51.pkl", - "Misc/mask_rcnn_R_50_FPN_3x_gn.yaml": "138602888/model_final_dc5d9e.pkl", - "Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml": "138602908/model_final_01ca85.pkl", - "Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml": "139797668/model_final_be35db.pkl", - "Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml": "18131413/model_0039999_e76410.pkl", # noqa - # D1 Comparisons - "Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x.yaml": "137781054/model_final_7ab50c.pkl", # noqa - "Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml": "137781281/model_final_62ca52.pkl", # noqa - "Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x.yaml": "137781195/model_final_cce136.pkl", - } - - -def get_checkpoint_url(config_path): - """ - Returns the URL to the model trained using the given config - - Args: - config_path (str): config file name relative to detectron2's "configs/" - directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" - - Returns: - str: a URL to the model - """ - name = config_path.replace(".yaml", "") - if config_path in _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX: - suffix = _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX[config_path] - return _ModelZooUrls.S3_PREFIX + name + "/" + suffix - raise RuntimeError("{} not available in Model Zoo!".format(name)) - - -def get_config_file(config_path): - """ - Returns path to a builtin config file. - - Args: - config_path (str): config file name relative to detectron2's "configs/" - directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" - - Returns: - str: the real path to the config file. - """ - cfg_file = pkg_resources.resource_filename( - "detectron2.model_zoo", os.path.join("configs", config_path) - ) - if not os.path.exists(cfg_file): - raise RuntimeError("{} not available in Model Zoo!".format(config_path)) - return cfg_file - - -def get(config_path, trained: bool = False): - """ - Get a model specified by relative path under Detectron2's official ``configs/`` directory. - - Args: - config_path (str): config file name relative to detectron2's "configs/" - directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" - trained (bool): If True, will initialize the model with the trained model zoo weights. - If False, the checkpoint specified in the config file's ``MODEL.WEIGHTS`` is used - instead; this will typically (though not always) initialize a subset of weights using - an ImageNet pre-trained model, while randomly initializing the other weights. - - Example: - - .. code-block:: python - - from detectron2 import model_zoo - model = model_zoo.get("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml", trained=True) - """ - cfg_file = get_config_file(config_path) - - cfg = get_cfg() - cfg.merge_from_file(cfg_file) - if trained: - cfg.MODEL.WEIGHTS = get_checkpoint_url(config_path) - if not torch.cuda.is_available(): - cfg.MODEL.DEVICE = "cpu" - - model = build_model(cfg) - DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) - return model diff --git a/spaces/hilmyblaze/WebUI-Counterfeit-V2.5/Touchfwtools-Apk.md b/spaces/hilmyblaze/WebUI-Counterfeit-V2.5/Touchfwtools-Apk.md deleted file mode 100644 index f5cf6f40ab71df776da71f62122072ce7d86a076..0000000000000000000000000000000000000000 --- a/spaces/hilmyblaze/WebUI-Counterfeit-V2.5/Touchfwtools-Apk.md +++ /dev/null @@ -1,59 +0,0 @@ -## Touchfwtools Apk - - - -**Click Here 🆗 [https://ditzcosupo.blogspot.com/?d=2twsiu](https://ditzcosupo.blogspot.com/?d=2twsiu)** - - - -# What is Touchfwtools Apk and How to Use It? - - - -Touchfwtools Apk is a free application that can help you fix some common problems with your touchscreen. If you have ever experienced issues such as unresponsive touch, inaccurate calibration, or slow performance, you might want to give this app a try. - - - -Touchfwtools Apk has two main features: file checking and installation folder selection. The file checking feature scans every file and its size before copying them to flash, which can prevent errors and corruption. The installation folder selection feature allows you to choose where you want to install the app, which can save space and improve speed. - - - -To use Touchfwtools Apk, you need to download it from a reliable source[^1^] [^2^] [^3^] and install it on your device. Then, you can launch the app and follow the instructions on the screen. You can also customize some settings such as language, theme, and vibration. After the app finishes its work, you can reboot your device and enjoy a smoother touchscreen experience. - - - -Touchfwtools Apk is not the only app that can help you fix touchscreen problems. There are some other apps that you can try if Touchfwtools Apk does not work for you or if you want to improve your touchscreen performance even more. - - - -One of these apps is \*\*Touchscreen Repair\*\*[^3^], which is a lightweight app that calibrates your screen to reduce response times. The app asks you to consecutively tap several sections of the touchscreen. Based on your phone's internals, it then artificially drops the latency as much as possible. - - - -Another app that you can use is \*\*Touchscreen Calibration\*\*[^2^], which is a tool that helps you calibrate the touchscreen of your Android phone. There are many issues due to which the touch screen of the phone could be not working properly. If this problem occurs, there are chances that your phone needs calibration. This app can help you with that by guiding you through some simple steps. - - - -These are some of the best apps that can help you fix touchscreen problems on your Android device. However, if none of these apps work for you, you might need to contact the manufacturer of your device or take it to a repair shop. - - - -If you don't want to use any apps to fix touchscreen problems, you can also try some other ways that might work without installing anything. Here are some of them: - - - -- Restart your device. Sometimes, a simple reboot can solve many issues, including touchscreen problems. To restart your device, press and hold the power button until you see a menu with options such as Power off and Restart. Choose Restart and wait for your device to boot up again. - -- Update your device. Updating your device can fix some bugs and glitches that might affect your touchscreen performance. To update your device, go to Settings > System > System update and check for any available updates. If there are any, download and install them. - -- Calibrate your touchscreen. Calibrating your touchscreen can improve its accuracy and responsiveness. To calibrate your touchscreen, go to Settings > Display > Touchscreen calibration and follow the instructions on the screen. - -- Clean your touchscreen. Dirt, dust, or grease can interfere with your touchscreen sensitivity and cause problems. To clean your touchscreen, turn off your device and disconnect the power cable. Then, use a soft cloth or a microfiber cloth to gently wipe the screen. Do not use any harsh chemicals or abrasive materials that might damage the screen. - -- Disable and re-enable your touchscreen. Disabling and re-enabling your touchscreen can reset its settings and clear any errors. To disable and re-enable your touchscreen, go to Settings > Device > Device manager and tap on Human Interface Devices. Then, tap on HID-compliant touch screen and select Disable device. Confirm your choice by tapping Yes. Then, tap on HID-compliant touch screen again and select Enable device. - - - -These are some of the ways to fix touchscreen problems without apps. However, if none of these methods work for you, you might need to contact the manufacturer of your device or take it to a repair shop. - - 1b8d091108 \ No newline at end of file diff --git a/spaces/hlydecker/Augmented-Retrieval-qa-ChatGPT/streamlit_langchain_chat/customized_langchain/__init__.py b/spaces/hlydecker/Augmented-Retrieval-qa-ChatGPT/streamlit_langchain_chat/customized_langchain/__init__.py deleted file mode 100644 index a2f5835bbc2e5b03a8b33464008e8183c04307da..0000000000000000000000000000000000000000 --- a/spaces/hlydecker/Augmented-Retrieval-qa-ChatGPT/streamlit_langchain_chat/customized_langchain/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from streamlit_langchain_chat.customized_langchain.docstore.in_memory import InMemoryDocstore -from streamlit_langchain_chat.customized_langchain.vectorstores import FAISS -from streamlit_langchain_chat.customized_langchain.vectorstores import Pinecone - - -__all__ = [ - "FAISS", - "InMemoryDocstore", - "Pinecone", -] diff --git a/spaces/housexu123/bingo-2.0/src/components/chat-attachments.tsx b/spaces/housexu123/bingo-2.0/src/components/chat-attachments.tsx deleted file mode 100644 index ef43d4e262935d263b6099138c56f7daade5299d..0000000000000000000000000000000000000000 --- a/spaces/housexu123/bingo-2.0/src/components/chat-attachments.tsx +++ /dev/null @@ -1,37 +0,0 @@ -import Image from 'next/image' -import ClearIcon from '@/assets/images/clear.svg' -import RefreshIcon from '@/assets/images/refresh.svg' -import { FileItem } from '@/lib/bots/bing/types' -import { cn } from '@/lib/utils' -import { useBing } from '@/lib/hooks/use-bing' - -type ChatAttachmentsProps = Pick, 'attachmentList' | 'setAttachmentList' | 'uploadImage'> - -export function ChatAttachments({ attachmentList = [], setAttachmentList, uploadImage }: ChatAttachmentsProps) { - return attachmentList.length ? ( -
        - {attachmentList.map(file => ( -
        - {file.status === 'loading' && ( -
        -
        -
        ) - } - {file.status !== 'error' && ( -
        - -
        ) - } - {file.status === 'error' && ( -
        - refresh uploadImage(file.url)} /> -
        - )} - -
        - ))} -
        - ) : null -} diff --git a/spaces/huggingface-projects/wordalle/frontend/src/app.css b/spaces/huggingface-projects/wordalle/frontend/src/app.css deleted file mode 100644 index 7ae0ebc8e68dc5f307e5f952284673a916da9b70..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/wordalle/frontend/src/app.css +++ /dev/null @@ -1,25 +0,0 @@ -@tailwind base; -@tailwind components; -@tailwind utilities; - -:root { - --cols: 19; -} - -.correct, -.present, -.absent { - color: #fff !important; -} - -.correct { - background-color: #6aaa64 !important; -} - -.present { - background-color: #c9b458 !important; -} - -.absent { - background-color: #787c7e !important; -} diff --git a/spaces/hussain-shk/IndiSent/subword-nmt/subword_nmt/tests/test_glossaries.py b/spaces/hussain-shk/IndiSent/subword-nmt/subword_nmt/tests/test_glossaries.py deleted file mode 100644 index 2ff7da19fb00a8b8c9e7d33a67d6db4f0c72ef6c..0000000000000000000000000000000000000000 --- a/spaces/hussain-shk/IndiSent/subword-nmt/subword_nmt/tests/test_glossaries.py +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -import unittest -import mock - -import os,sys,inspect -currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) -parentdir = os.path.dirname(currentdir) -sys.path.insert(0,parentdir) - -from apply_bpe import isolate_glossary, BPE - -class TestIsolateGlossaryFunction(unittest.TestCase): - - def setUp(self): - self.glossary = 'like' - - def _run_test_case(self, test_case): - orig, expected = test_case - out = isolate_glossary(orig, self.glossary) - self.assertEqual(out, expected) - - def test_empty_string(self): - orig = '' - exp = [''] - test_case = (orig, exp) - self._run_test_case(test_case) - - def test_no_glossary(self): - orig = 'word' - exp = ['word'] - test_case = (orig, exp) - self._run_test_case(test_case) - - def test_isolated_glossary(self): - orig = 'like' - exp = ['like'] - test_case = (orig, exp) - self._run_test_case(test_case) - - def test_word_one_side(self): - orig = 'likeword' - exp = ['like', 'word'] - test_case = (orig, exp) - self._run_test_case(test_case) - - def test_words_both_sides(self): - orig = 'wordlikeword' - exp = ['word', 'like', 'word'] - test_case = (orig, exp) - self._run_test_case(test_case) - - def test_back_to_back_glossary(self): - orig = 'likelike' - exp = ['like', 'like'] - test_case = (orig, exp) - self._run_test_case(test_case) - - def test_multiple_glossaries(self): - orig = 'wordlikewordlike' - exp = ['word', 'like', 'word', 'like'] - test_case = (orig, exp) - self._run_test_case(test_case) - -class TestBPEIsolateGlossariesMethod(unittest.TestCase): - - def setUp(self): - - amock = mock.MagicMock() - amock.readline.return_value = 'something' - glossaries = ['like', 'Manuel', 'USA'] - self.bpe = BPE(amock, glossaries=glossaries) - - def _run_test_case(self, test_case): - orig, expected = test_case - out = self.bpe._isolate_glossaries(orig) - self.assertEqual(out, expected) - - def test_multiple_glossaries(self): - orig = 'wordlikeUSAwordManuelManuelwordUSA' - exp = ['word', 'like', 'USA', 'word', 'Manuel', 'Manuel', 'word', 'USA'] - test_case = (orig, exp) - self._run_test_case(test_case) - -class TestRegexIsolateGlossaries(unittest.TestCase): - - def setUp(self): - - amock = mock.MagicMock() - amock.readline.return_value = 'something' - glossaries = ["\w*", "\w*", "\d+"] - self.bpe = BPE(amock, glossaries=glossaries) - - def _run_test_case(self, test_case): - orig, expected = test_case - out = self.bpe._isolate_glossaries(orig) - self.assertEqual(out, expected) - - def test_regex_glossaries(self): - orig = 'wordlikeUSAword10001wordManuelwordUSA' - exp = ['wordlike', 'USA', 'word', '10001', 'word', 'Manuel', 'word', 'USA'] - test_case = (orig, exp) - self._run_test_case(test_case) - -def encode_mock(segment, x2, x3, x4, x5, x6, x7, glosses, dropout): - if glosses.match(segment): - return (segment,) - else: - l = len(segment) - return (segment[:l//2], segment[l//2:]) - -class TestBPESegmentMethod(unittest.TestCase): - - def setUp(self): - - amock = mock.MagicMock() - amock.readline.return_value = 'something' - glossaries = ['like', 'Manuel', 'USA'] - self.bpe = BPE(amock, glossaries=glossaries) - - @mock.patch('apply_bpe.encode', side_effect=encode_mock) - def _run_test_case(self, test_case, encode_function): - - orig, expected = test_case - out = self.bpe.segment(orig) - - self.assertEqual(out, expected) - - def test_multiple_glossaries(self): - orig = 'wordlikeword likeManuelword' - exp = 'wo@@ rd@@ like@@ wo@@ rd like@@ Manuel@@ wo@@ rd' - test_case = (orig, exp) - self._run_test_case(test_case) - -if __name__ == '__main__': - unittest.main() diff --git a/spaces/iamironman4279/SadTalker/src/facerender/sync_batchnorm/replicate.py b/spaces/iamironman4279/SadTalker/src/facerender/sync_batchnorm/replicate.py deleted file mode 100644 index b71c7b8ed51a1d6c55b1f753bdd8d90bad79bd06..0000000000000000000000000000000000000000 --- a/spaces/iamironman4279/SadTalker/src/facerender/sync_batchnorm/replicate.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# File : replicate.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import functools - -from torch.nn.parallel.data_parallel import DataParallel - -__all__ = [ - 'CallbackContext', - 'execute_replication_callbacks', - 'DataParallelWithCallback', - 'patch_replication_callback' -] - - -class CallbackContext(object): - pass - - -def execute_replication_callbacks(modules): - """ - Execute an replication callback `__data_parallel_replicate__` on each module created by original replication. - - The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` - - Note that, as all modules are isomorphism, we assign each sub-module with a context - (shared among multiple copies of this module on different devices). - Through this context, different copies can share some information. - - We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback - of any slave copies. - """ - master_copy = modules[0] - nr_modules = len(list(master_copy.modules())) - ctxs = [CallbackContext() for _ in range(nr_modules)] - - for i, module in enumerate(modules): - for j, m in enumerate(module.modules()): - if hasattr(m, '__data_parallel_replicate__'): - m.__data_parallel_replicate__(ctxs[j], i) - - -class DataParallelWithCallback(DataParallel): - """ - Data Parallel with a replication callback. - - An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by - original `replicate` function. - The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` - - Examples: - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - # sync_bn.__data_parallel_replicate__ will be invoked. - """ - - def replicate(self, module, device_ids): - modules = super(DataParallelWithCallback, self).replicate(module, device_ids) - execute_replication_callbacks(modules) - return modules - - -def patch_replication_callback(data_parallel): - """ - Monkey-patch an existing `DataParallel` object. Add the replication callback. - Useful when you have customized `DataParallel` implementation. - - Examples: - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallel(sync_bn, device_ids=[0, 1]) - > patch_replication_callback(sync_bn) - # this is equivalent to - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - """ - - assert isinstance(data_parallel, DataParallel) - - old_replicate = data_parallel.replicate - - @functools.wraps(old_replicate) - def new_replicate(module, device_ids): - modules = old_replicate(module, device_ids) - execute_replication_callbacks(modules) - return modules - - data_parallel.replicate = new_replicate diff --git a/spaces/ilmhona/api/ml.py b/spaces/ilmhona/api/ml.py deleted file mode 100644 index 50c6147e74f6c120a5ed6aa06f6ec583a6944681..0000000000000000000000000000000000000000 --- a/spaces/ilmhona/api/ml.py +++ /dev/null @@ -1,265 +0,0 @@ -import openai -import os -import logging -from langchain.document_loaders.csv_loader import CSVLoader -from langchain.embeddings import OpenAIEmbeddings -from langchain.vectorstores import FAISS -from langchain.text_splitter import RecursiveCharacterTextSplitter -import chardet -import time -from deep_translator import GoogleTranslator -from config import USER_PROMPT, CITE_PROMPT, SYSTEM_MESSAGE -from openai.error import OpenAIError -from deep_translator.exceptions import LanguageNotSupportedException -import json -import sqlite3 - -# Initialize logging -logging.basicConfig(level=logging.INFO) - - -class DocumentLoader: - """ - DocumentLoader class is responsible for loading and processing documents. - """ - - def __init__(self, openai_key: str, csv_file_path: str): - """ - Initialize DocumentLoader with OpenAI key, CSV file path and filename. - """ - self.openai_key = openai_key - openai.api_key = openai_key - try: - self.embedding = OpenAIEmbeddings(openai_api_key=openai_key) - except OpenAIError as e: - raise ValueError(f"Invalid OpenAI API Key: {e}") - self.csv_file_path = csv_file_path - self.vectordb = self.initialize_vector_db() - - def get_text_chunk(self, documents: list) -> list: - """ - Split the documents into chunks. - """ - text_splitter = RecursiveCharacterTextSplitter( - chunk_size=1500, - chunk_overlap=150, - ) - try: - splits = text_splitter.split_documents(documents) - except Exception as e: - raise ValueError( - f"RecursiveCharacterTextSplitter failed to split documents: {e}" - ) - return splits - - def get_csv_text(self, csv_file_path: str) -> dict: - """ - Load and return the text from the CSV file. - """ - if not os.path.exists(csv_file_path): - raise FileNotFoundError(f"CSV file not found at {csv_file_path}") - encoding = self.detect_encoding(csv_file_path) - - try: - loader = CSVLoader( - file_path=csv_file_path, - source_column="file_name", - encoding=encoding, - ) - data = loader.load() - except Exception as e: - raise ValueError(f"CSVLoader failed to load data: {e}") - return data - - def detect_encoding(self, file_path: str) -> str: - """ - Detect and return the encoding of the file. - """ - with open(file_path, "rb") as f: - result = chardet.detect(f.read()) - return result["encoding"] - - def initialize_vector_db(self) -> FAISS: - """ - Initialize and return the vector database. - """ - documents = self.get_csv_text(self.csv_file_path) - splits = self.get_text_chunk(documents) - vectordb = FAISS.from_documents( - documents=splits, - embedding=self.embedding, - ) - return vectordb - - -class AIEducator: - """ - AIEducator class is responsible for generating responses. - """ - - MAX_HISTORY_SIZE = 12 # Define the constant here - - def __init__( - self, - document_loader: DocumentLoader, - db_path: str, - user_prompt: str = USER_PROMPT, - cite_prompt: str = CITE_PROMPT, - system_message: str = SYSTEM_MESSAGE, - ): - """ - Initialize AIEducator with a DocumentLoader and prompts. - """ - self.document_loader = document_loader - self.user_prompt = user_prompt - self.cite_prompt = cite_prompt - self.system_message = system_message - self.relevance_status = None - self.docs = None - self.all_metadata = None - self.db_path = db_path - - def get_chat_history(self, student_id: int) -> list: - """Fetch the chat history for the given student ID from the database.""" - conn = sqlite3.connect(self.db_path) - cur = conn.cursor() - cur.execute( - "SELECT chat_history FROM chat_histories WHERE student_id = ?", - (student_id,), - ) - row = cur.fetchone() - conn.close() - - if not row: - return [] - - full_chat_history = json.loads(row[0]) - original_chat_history = [] - for message in full_chat_history: - if message["role"] == "assistant": - message["content"] = message.get("original_content", message["content"]) - message.pop("original_content", None) - original_chat_history.append(message) - - return original_chat_history - - def update_chat_history(self, student_id: int, chat_history: list): - """Update the chat history for the given student ID in the database.""" - conn = sqlite3.connect(self.db_path) - cur = conn.cursor() - cur.execute( - "INSERT OR REPLACE INTO chat_histories (student_id, chat_history) VALUES (?, ?)", - (student_id, json.dumps(chat_history)), - ) - conn.commit() - conn.close() - - def predict( - self, - student_id: int, - message: str, - student_profile: dict, - target_language: str = None, - retries: int = 0, - ): - """ - Generate and return a response. - """ - - # Fetch the existing chat history from the database - chat_history = self.get_chat_history(student_id) - truncated_history = chat_history[-self.MAX_HISTORY_SIZE :] - history_str = "\n".join( - [f"{item['role']}: {item['content']}" for item in truncated_history] - ) - - translator = ( - GoogleTranslator(source="auto", target=target_language) - if target_language - else None - ) - - if message: - start_time = time.time() - try: - docs = self.document_loader.vectordb.similarity_search_with_score( - message - ) - except Exception as e: - raise ValueError(f"Chroma failed to retrieve answer: {e}") - end_time = time.time() - - logging.info("Retrieved documents: %s", docs) - logging.info("Time taken to retrieve docs: %s", end_time - start_time) - - all_metadata = [document.metadata for document, score in docs] - - if docs[0][1] < 0.25: - self.relevance_status = "REQUIRED" - logging.info( - f"Retrieved docs will be {self.relevance_status} to generate answer, because the similarity score of the first document is {docs[0][1]}" - ) - else: - self.relevance_status = "OPTIONAL" - logging.info( - f"Retrieved docs will be {self.relevance_status} to generate answer, because the similarity score of the first document is {docs[0][1]}" - ) - - logging.info(f"Relevance status: {self.relevance_status}") - - try: - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo-16k", - messages=[ - { - "role": "system", - "content": self.system_message, - }, - { - "role": "user", - "content": self.user_prompt.format( - history=history_str, - student_profile=student_profile, - message=message, - ), - }, - { - "role": "user", - "content": self.cite_prompt.format( - relevance_status=self.relevance_status, - docs=docs, - all_metadata=all_metadata, - ), - }, - ], - temperature=0, - ) - response = response.choices[0].message["content"] - - if translator: - try: - translated_response = translator.translate(response) - except LanguageNotSupportedException as e: - logging.error(f"Translation failed: {e}") - translated_response = response - - chat_history.append( - {"role": "user", "content": message, "language": "original"} - ) - chat_history.append( - { - "role": "assistant", - "content": translated_response, - "original_content": response, - "language": target_language if target_language else "original", - } - ) - - self.update_chat_history(student_id, chat_history) - - return { - "original_response": response, - "translated_response": translated_response, - } - except OpenAIError as e: - logging.error(f"OpenAI ChatCompletion failed to generate response: {e}") diff --git a/spaces/imdebamrita/whatsapp_chat_analysis/app.py b/spaces/imdebamrita/whatsapp_chat_analysis/app.py deleted file mode 100644 index 0e0af9d977ed2ceaac485df5f768d3011eaa1491..0000000000000000000000000000000000000000 --- a/spaces/imdebamrita/whatsapp_chat_analysis/app.py +++ /dev/null @@ -1,215 +0,0 @@ -import streamlit as st -import matplotlib.pyplot as plt -import plotly.express as px -import preprocessor -import helper - - -color_2 = 'rgb(237, 59, 59)' -f = open('number_of_files_upload.txt', 'r+') -num_file_upload = int(f.read()) - -st.set_page_config(page_title="Whatsapp Chat Data Analysis App", - initial_sidebar_state='expanded', - page_icon="📊", - menu_items={ - 'Get Help': 'https://www.linkedin.com/in/imdebamritapaul/', - 'Report a bug': "mailto:imdebamrita@1gmail.com", - 'About': "Introducing the WhatsApp Chat Analysis App! Analyze your chat history, uncover trends, and gain insights into your messaging habits. < ___Made by : Debamrita Paul___ > Connect in LinkedIn: https://www.linkedin.com/in/imdebamritapaul/ssssss 📲🚀 #WhatsAppChatAnalysis #DataInsights" - }) - -st.sidebar.title("Whatsapp Chat Analyzer") - - -uploaded_file = st.sidebar.file_uploader("Choose a file", type=['txt']) - -if uploaded_file is not None: - bytes_data = uploaded_file.getvalue() - data = bytes_data.decode("utf-8") - check = preprocessor.checker(data) - - if check: - num_file_upload += 1 - f.seek(0) - f.write(str(num_file_upload)) - - df = preprocessor.preprocess(data) - - # Get unique users - - user_list = df['user'].unique().tolist() - if 'group_notification' in user_list: - user_list.remove('group_notification') - user_list.sort() - user_list.insert(0, 'Overall') - - selected_user = st.sidebar.selectbox("Show analysis wrt", user_list) - - if st.sidebar.button("Show Analysis"): - - # States Area - num_messages, words, num_media_messages, num_links = helper.fetch_states( - selected_user, df) - st.title("Top Statistics") - - col1, col2, col3, col4 = st.columns(4) - with col1: - st.header("Total Messages") - st.title(num_messages) - with col2: - st.header("Total Words") - st.title(words) - with col3: - st.header("Media Shared") - st.title(num_media_messages) - with col4: - st.header("Links Shared") - st.title(num_links) - - # Timeline Data - st.title("Timeline") - col1, col2 = st.columns(2) - # Monthly Timeline - with col1: - monthly_timeline = helper.monthly_timeline(selected_user, df) - fig = px.line(monthly_timeline, x='Timeline', y='Message', - title='Monthly Timeline') - st.plotly_chart(fig, use_container_width=True) - - # Daily Timeline - with col2: - daily_timeline = helper.daily_timeline(selected_user, df) - fig = px.line(daily_timeline, x='Date', y='Message', - title='Daily Timeline') - fig.update_traces(line_color=color_2) - st.plotly_chart(fig, use_container_width=True) - - # Activity Map - st.title("Activity Map") - col1, col2 = st.columns(2) - - # Weekly Activity - with col1: - week_activity = helper.week_activity_map(selected_user, df) - fig = px.bar(week_activity, x='Day', y='Message', - title='Weekly Activity') - fig.update_traces(marker_color=color_2) - st.plotly_chart(fig, use_container_width=True) - - # Monthly Activity - with col2: - month_activity = helper.month_activity_map(selected_user, df) - fig = px.bar(month_activity, x='Month', y='Message', - title='Monthly Activity') - st.plotly_chart(fig, use_container_width=True) - - # Heat Map - user_heatmap = helper.activity_heatmap(selected_user, df) - fig = px.imshow(user_heatmap, title='Activity Heatmap') - st.plotly_chart(fig) - - # Finding the most active user in the Group - if selected_user == 'Overall': - st.title("Most Active Users") - x, per = helper.most_active_user(df) - - col1, col2 = st.columns(2) - - with col1: - fig = px.bar(x, x='User', y='Count') - fig.update_traces(marker_color=color_2) - st.plotly_chart(fig, use_container_width=True) - # st.bar_chart(x) - with col2: - st.dataframe(per) - - # WordCloud - st.title("Word Cloud") - df_wc = helper.create_wordcloud(selected_user, df) - fig, ax = plt.subplots() - ax.imshow(df_wc) - ax.set_axis_off() - st.pyplot(fig) - - # Most common words - most_common_df = helper.most_common_words(selected_user, df) - - st.title("Most Common Words") - - fig = px.bar(most_common_df, - x='Count', y='Message', orientation='h') - st.write(fig) - - # Emoji Analysis - emoji_df = helper.emoji_data(selected_user, df) - st.title("Emoji Analysis") - - col1, col2 = st.columns(2) - - if emoji_df.empty: - with col1: - st.dataframe(emoji_df) - else: - - with col1: - st.dataframe(emoji_df) - with col2: - fig = px.pie(emoji_df.head(10), values='Count', - names='Emoji', title='Top 10 Emojis') - st.plotly_chart(fig, use_container_width=True) - - # Data Time Freame - st.title("Data Timeframe") - - timeframe = helper.data_timeframe(df) - - st.write(timeframe) - else: - st.write("In sidebar click on ___Show Analysis___ button...") - else: - st.write("Uploaded file doesn't match with the default format") - st.write("!!! Upload the correct file.") - -else: - st.text("Go to Sidebar and Upload the file") - st.text("How to export Whatsapp chat:") - st.write("- Open the indivitual or group chat.") - st.write("- At the top-right Info (⋮) -> More -> Export chat (Without Media).") - st.write("- Save that .txt file and upload it.") - - -st.sidebar.text("Made by Debamrita Paul") -st.sidebar.write("[Connect ⤴](https://www.linkedin.com/in/imdebamritapaul/)") -st.sidebar.text("Number of Analysis Done: " + str(num_file_upload)) - - -hide_default_format = """ - - """ -st.markdown(hide_default_format, unsafe_allow_html=True) - -custom_footer_style = """ - -""" - -def custom_footer(): - st.markdown('', - unsafe_allow_html=True) - - -st.markdown(custom_footer_style, unsafe_allow_html=True) - -custom_footer() diff --git a/spaces/innnky/nene-emotion/transforms.py b/spaces/innnky/nene-emotion/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/innnky/nene-emotion/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Descargar Gratis Win8activate.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Descargar Gratis Win8activate.md deleted file mode 100644 index 8aea1c6b1a6950378a9658d05ee58f0c5f0db938..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Descargar Gratis Win8activate.md +++ /dev/null @@ -1,15 +0,0 @@ -

        Descargar Gratis Win8activate


        Download Filehttps://urlin.us/2uEySr



        - -cha khan wagamama recipe book museum of, into science and play. ... /2465242-download-mp3-paw-mp3-new-song-download-2-29-mb-mp3-free-download-joalege ... Download the song joe joe - drizzle for free in mp3 and listen online. -Song lyrics ... -All the songs by Joe Joe. -Drizzle (New Song) Listen / Download. -Download Joe songs - listen to music online for free, Download ... -Joe songs to listen online and download for free. -Listen and download mp3 songs and music you can listen online and download for free Joe Joe. -Drizzle (New Song). -Drizzle (New Song), (C) 2006 J.A.Y.I. - The Best Of Joe Joe - Drizzle (New Song) ... -Joe Joe - Drizzle (New Song) - (C.) 8a78ff9644
        -
        -
        -

        diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/FastPictureViewer Professional Edition V1.9 Build 358 Free Download.md b/spaces/inplisQlawa/anything-midjourney-v4-1/FastPictureViewer Professional Edition V1.9 Build 358 Free Download.md deleted file mode 100644 index 06c7f09c2597e205be906bdd10cfc22374077d82..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/FastPictureViewer Professional Edition V1.9 Build 358 Free Download.md +++ /dev/null @@ -1,9 +0,0 @@ - -

        sirvid g4x bb mp3 player crack 3.9.1 1.rar 001.rar fender modified dvxr wms 200 factory accessories bb.rar
        suits of the king 3.0 torrent
        5bd35b6a26 hr2 songz theme 3.0.2 crack serial keygen cd key.rar 11
        torchwood: the new world season 1 part 2 english in hindi torrent
        nostos (2011) trailer hindi download
        tulka superstar version 3.2 (latest) download full toh live ranveer stars full mp3 songs free download
        secret superstar tamil dubbed movie free download

        -

        FastPictureViewer Professional Edition v1.9 build 358 free download


        Downloadhttps://urlin.us/2uEyvH



        -

        4f8c9c8613 15-9-2013 15.01.12 crack serial keygen cd key.rar 11
        nostos (2011) trailer hindi download
        tulka superstar version 3.2 (latest) download full toh live ranveer stars full mp3 songs free download
        secret superstar tamil dubbed movie free download

        -

        5bd35b6a26 best of the best 2019 unlimited full version crack 70 audio cd version [iso] linux amazon player full version torrent code 6 5 x64 win 7 x64 win 8 32bit win 10 32bit 64bit wine [activator] watch movies for free here download full version movie the forgotten evil free download full version torrents 360-2017 вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђ“ вђпигададавай вам пяжакаалаааа вме

        -

        tanugae f91c64177c remake in hindi 720p is mlm.exe gandirii pozitive carte pdf free boxset utorrent full version nulled latest rar 64bit sadykell pro 10.0.2 crack 2020 full build ~upd~ name and password for www naughtyamerica com engineering full crack 30 pro professional free nulled 32bit residential 4.

        -

        899543212b
        -
        -
        \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Macosx Incl.keygen __TOP__-r211 Allok Virtuales Qued.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Macosx Incl.keygen __TOP__-r211 Allok Virtuales Qued.md deleted file mode 100644 index aa2d126b7f1b0e45b4dcd6aec0016e7839755320..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Macosx Incl.keygen __TOP__-r211 Allok Virtuales Qued.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Macosx Incl.keygen-r211 allok virtuales qued


        Download ⚹⚹⚹ https://urlin.us/2uEvTF



        -
        -Macosx Incl.keygen-r211 allok virtuales qued ... AutoKrypt 11.18 MACOSX Incl Keygen-AMPED.zip . ... CompeGPS land 7.3.1 crack.67. Crack.CompeGPS. 4d29de3e1b
        -
        -
        -

        diff --git a/spaces/inreVtussa/clothingai/Examples/Betternet VPN 5.1.0 Crack Premium Latest 2019 Download HOT.md b/spaces/inreVtussa/clothingai/Examples/Betternet VPN 5.1.0 Crack Premium Latest 2019 Download HOT.md deleted file mode 100644 index da04776bb5dce7706f3a3b2036ef1e9d2ea64b77..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Betternet VPN 5.1.0 Crack Premium Latest 2019 Download HOT.md +++ /dev/null @@ -1,11 +0,0 @@ -
        -

        betternet vpn has provided a simple vpn application. you can use it to connect to different internet services such as facebook, youtube, google, dailymotion, twitch, and many more. all you have to do is enter the specific url of the site you want to visit, and you are connected.

        -

        Betternet VPN 5.1.0 Crack Premium Latest 2019 Download


        Download >>>>> https://tiurll.com/2uClPS



        -

        betternet vpn 5.1.0 crack premium latest 2019 download is a versatile vpn application that enables its users to unblock restricted websites in the web browser. this application uses all-in-one proxy and protects your privacy. it allows you to surf the internet anonymously and safely. furthermore, it has powerful firewall technology, which helps you to protect your device from hackers.

        -

        betternet vpn premium crack 2019 keeps your data private and secure while you are connected to the internet. with this vpn app you can stay anonymous while surfing the web. furthermore, it has been designed with exclusive security and features that enable its users to access the web anonymously.

        -

        the betternet vpn premium crack 2019 provides you a secure and reliable connection to the internet. with this vpn app, you can be anonymous while browsing the internet. you can also access geo-restricted content in the web browser.

        -

        -

        the betternet vpn premium crack 2019 is a versatile vpn application that enables its users to unblock websites in the web browser. it provides you with an advanced vpn application that helps you to access geo-restricted content in the web browser. moreover, it has powerful firewall technology, which helps you to protect your device from hackers.

        -

        betternet vpn premium crack 2019 provides you a secure and reliable connection to the internet. with this vpn app, you can be anonymous while browsing the internet. you can also access geo-restricted content in the web browser.

        899543212b
        -
        -
        \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/Cims Drug Book India Free Download Pdf BEST.md b/spaces/inreVtussa/clothingai/Examples/Cims Drug Book India Free Download Pdf BEST.md deleted file mode 100644 index 5704162c5d180e2b2b2c66ca7fd2c960450cbab2..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Cims Drug Book India Free Download Pdf BEST.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Cims Drug Book India Free Download Pdf


        DOWNLOADhttps://tiurll.com/2uCm8P



        - -Books Clothing & Accessories Electronics Gift Cards Grocery Handmade Health ... Play Flash Games free online Games on the Best Flash Games site, ... Merge PDF, split PDF, compress PDF, office to PDF, PDF to JPG and more! ... Scan the QR code to download the WEBTOON app on the App Store or Google Play. 4d29de3e1b
        -
        -
        -

        diff --git a/spaces/ismot/1702t1/models/other/criterion.py b/spaces/ismot/1702t1/models/other/criterion.py deleted file mode 100644 index 04d0db3913b5dc36afb91798d3d1a33fde63dcb1..0000000000000000000000000000000000000000 --- a/spaces/ismot/1702t1/models/other/criterion.py +++ /dev/null @@ -1,72 +0,0 @@ -""" -@date: 2021/7/19 -@description: -""" -import torch -import loss - -from utils.misc import tensor2np - - -def build_criterion(config, logger): - criterion = {} - device = config.TRAIN.DEVICE - - for k in config.TRAIN.CRITERION.keys(): - sc = config.TRAIN.CRITERION[k] - if sc.WEIGHT is None or float(sc.WEIGHT) == 0: - continue - criterion[sc.NAME] = { - 'loss': getattr(loss, sc.LOSS)(), - 'weight': float(sc.WEIGHT), - 'sub_weights': sc.WEIGHTS, - 'need_all': sc.NEED_ALL - } - - criterion[sc.NAME]['loss'] = criterion[sc.NAME]['loss'].to(device) - if config.AMP_OPT_LEVEL != "O0" and 'cuda' in device: - criterion[sc.NAME]['loss'] = criterion[sc.NAME]['loss'].type(torch.float16) - - # logger.info(f"Build criterion:{sc.WEIGHT}_{sc.NAME}_{sc.LOSS}_{sc.WEIGHTS}") - return criterion - - -def calc_criterion(criterion, gt, dt, epoch_loss_d): - loss = None - postfix_d = {} - for k in criterion.keys(): - if criterion[k]['need_all']: - single_loss = criterion[k]['loss'](gt, dt) - ws_loss = None - for i, sub_weight in enumerate(criterion[k]['sub_weights']): - if sub_weight == 0: - continue - if ws_loss is None: - ws_loss = single_loss[i] * sub_weight - else: - ws_loss = ws_loss + single_loss[i] * sub_weight - single_loss = ws_loss if ws_loss is not None else single_loss - else: - assert k in gt.keys(), "ground label is None:" + k - assert k in dt.keys(), "detection key is None:" + k - if k == 'ratio' and gt[k].shape[-1] != dt[k].shape[-1]: - gt[k] = gt[k].repeat(1, dt[k].shape[-1]) - single_loss = criterion[k]['loss'](gt[k], dt[k]) - - postfix_d[k] = tensor2np(single_loss) - if k not in epoch_loss_d.keys(): - epoch_loss_d[k] = [] - epoch_loss_d[k].append(postfix_d[k]) - - single_loss = single_loss * criterion[k]['weight'] - if loss is None: - loss = single_loss - else: - loss = loss + single_loss - - k = 'loss' - postfix_d[k] = tensor2np(loss) - if k not in epoch_loss_d.keys(): - epoch_loss_d[k] = [] - epoch_loss_d[k].append(postfix_d[k]) - return loss, postfix_d, epoch_loss_d diff --git a/spaces/jackyccl/segment-anything/groundingdino/models/GroundingDINO/fuse_modules.py b/spaces/jackyccl/segment-anything/groundingdino/models/GroundingDINO/fuse_modules.py deleted file mode 100644 index 2753b3ddee43c7a9fe28d1824db5d786e7e1ad59..0000000000000000000000000000000000000000 --- a/spaces/jackyccl/segment-anything/groundingdino/models/GroundingDINO/fuse_modules.py +++ /dev/null @@ -1,297 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ - -import torch -import torch.nn as nn -import torch.nn.functional as F -from timm.models.layers import DropPath - - -class FeatureResizer(nn.Module): - """ - This class takes as input a set of embeddings of dimension C1 and outputs a set of - embedding of dimension C2, after a linear transformation, dropout and normalization (LN). - """ - - def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True): - super().__init__() - self.do_ln = do_ln - # Object feature encoding - self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True) - self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12) - self.dropout = nn.Dropout(dropout) - - def forward(self, encoder_features): - x = self.fc(encoder_features) - if self.do_ln: - x = self.layer_norm(x) - output = self.dropout(x) - return output - - -def l1norm(X, dim, eps=1e-8): - """L1-normalize columns of X""" - norm = torch.abs(X).sum(dim=dim, keepdim=True) + eps - X = torch.div(X, norm) - return X - - -def l2norm(X, dim, eps=1e-8): - """L2-normalize columns of X""" - norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps - X = torch.div(X, norm) - return X - - -def func_attention(query, context, smooth=1, raw_feature_norm="softmax", eps=1e-8): - """ - query: (n_context, queryL, d) - context: (n_context, sourceL, d) - """ - batch_size_q, queryL = query.size(0), query.size(1) - batch_size, sourceL = context.size(0), context.size(1) - - # Get attention - # --> (batch, d, queryL) - queryT = torch.transpose(query, 1, 2) - - # (batch, sourceL, d)(batch, d, queryL) - # --> (batch, sourceL, queryL) - attn = torch.bmm(context, queryT) - if raw_feature_norm == "softmax": - # --> (batch*sourceL, queryL) - attn = attn.view(batch_size * sourceL, queryL) - attn = nn.Softmax()(attn) - # --> (batch, sourceL, queryL) - attn = attn.view(batch_size, sourceL, queryL) - elif raw_feature_norm == "l2norm": - attn = l2norm(attn, 2) - elif raw_feature_norm == "clipped_l2norm": - attn = nn.LeakyReLU(0.1)(attn) - attn = l2norm(attn, 2) - else: - raise ValueError("unknown first norm type:", raw_feature_norm) - # --> (batch, queryL, sourceL) - attn = torch.transpose(attn, 1, 2).contiguous() - # --> (batch*queryL, sourceL) - attn = attn.view(batch_size * queryL, sourceL) - attn = nn.Softmax()(attn * smooth) - # --> (batch, queryL, sourceL) - attn = attn.view(batch_size, queryL, sourceL) - # --> (batch, sourceL, queryL) - attnT = torch.transpose(attn, 1, 2).contiguous() - - # --> (batch, d, sourceL) - contextT = torch.transpose(context, 1, 2) - # (batch x d x sourceL)(batch x sourceL x queryL) - # --> (batch, d, queryL) - weightedContext = torch.bmm(contextT, attnT) - # --> (batch, queryL, d) - weightedContext = torch.transpose(weightedContext, 1, 2) - - return weightedContext, attnT - - -class BiMultiHeadAttention(nn.Module): - def __init__(self, v_dim, l_dim, embed_dim, num_heads, dropout=0.1, cfg=None): - super(BiMultiHeadAttention, self).__init__() - - self.embed_dim = embed_dim - self.num_heads = num_heads - self.head_dim = embed_dim // num_heads - self.v_dim = v_dim - self.l_dim = l_dim - - assert ( - self.head_dim * self.num_heads == self.embed_dim - ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})." - self.scale = self.head_dim ** (-0.5) - self.dropout = dropout - - self.v_proj = nn.Linear(self.v_dim, self.embed_dim) - self.l_proj = nn.Linear(self.l_dim, self.embed_dim) - self.values_v_proj = nn.Linear(self.v_dim, self.embed_dim) - self.values_l_proj = nn.Linear(self.l_dim, self.embed_dim) - - self.out_v_proj = nn.Linear(self.embed_dim, self.v_dim) - self.out_l_proj = nn.Linear(self.embed_dim, self.l_dim) - - self.stable_softmax_2d = True - self.clamp_min_for_underflow = True - self.clamp_max_for_overflow = True - - self._reset_parameters() - - def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): - return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() - - def _reset_parameters(self): - nn.init.xavier_uniform_(self.v_proj.weight) - self.v_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.l_proj.weight) - self.l_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.values_v_proj.weight) - self.values_v_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.values_l_proj.weight) - self.values_l_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.out_v_proj.weight) - self.out_v_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.out_l_proj.weight) - self.out_l_proj.bias.data.fill_(0) - - def forward(self, v, l, attention_mask_v=None, attention_mask_l=None): - """_summary_ - - Args: - v (_type_): bs, n_img, dim - l (_type_): bs, n_text, dim - attention_mask_v (_type_, optional): _description_. bs, n_img - attention_mask_l (_type_, optional): _description_. bs, n_text - - Returns: - _type_: _description_ - """ - # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': - # import ipdb; ipdb.set_trace() - bsz, tgt_len, _ = v.size() - - query_states = self.v_proj(v) * self.scale - key_states = self._shape(self.l_proj(l), -1, bsz) - value_v_states = self._shape(self.values_v_proj(v), -1, bsz) - value_l_states = self._shape(self.values_l_proj(l), -1, bsz) - - proj_shape = (bsz * self.num_heads, -1, self.head_dim) - query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) - key_states = key_states.view(*proj_shape) - value_v_states = value_v_states.view(*proj_shape) - value_l_states = value_l_states.view(*proj_shape) - - src_len = key_states.size(1) - attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) # bs*nhead, nimg, ntxt - - if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): - raise ValueError( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}" - ) - - if self.stable_softmax_2d: - attn_weights = attn_weights - attn_weights.max() - - if self.clamp_min_for_underflow: - attn_weights = torch.clamp( - attn_weights, min=-50000 - ) # Do not increase -50000, data type half has quite limited range - if self.clamp_max_for_overflow: - attn_weights = torch.clamp( - attn_weights, max=50000 - ) # Do not increase 50000, data type half has quite limited range - - attn_weights_T = attn_weights.transpose(1, 2) - attn_weights_l = attn_weights_T - torch.max(attn_weights_T, dim=-1, keepdim=True)[0] - if self.clamp_min_for_underflow: - attn_weights_l = torch.clamp( - attn_weights_l, min=-50000 - ) # Do not increase -50000, data type half has quite limited range - if self.clamp_max_for_overflow: - attn_weights_l = torch.clamp( - attn_weights_l, max=50000 - ) # Do not increase 50000, data type half has quite limited range - - # mask vison for language - if attention_mask_v is not None: - attention_mask_v = ( - attention_mask_v[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) - ) - attn_weights_l.masked_fill_(attention_mask_v, float("-inf")) - - attn_weights_l = attn_weights_l.softmax(dim=-1) - - # mask language for vision - if attention_mask_l is not None: - attention_mask_l = ( - attention_mask_l[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) - ) - attn_weights.masked_fill_(attention_mask_l, float("-inf")) - attn_weights_v = attn_weights.softmax(dim=-1) - - attn_probs_v = F.dropout(attn_weights_v, p=self.dropout, training=self.training) - attn_probs_l = F.dropout(attn_weights_l, p=self.dropout, training=self.training) - - attn_output_v = torch.bmm(attn_probs_v, value_l_states) - attn_output_l = torch.bmm(attn_probs_l, value_v_states) - - if attn_output_v.size() != (bsz * self.num_heads, tgt_len, self.head_dim): - raise ValueError( - f"`attn_output_v` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output_v.size()}" - ) - - if attn_output_l.size() != (bsz * self.num_heads, src_len, self.head_dim): - raise ValueError( - f"`attn_output_l` should be of size {(bsz, self.num_heads, src_len, self.head_dim)}, but is {attn_output_l.size()}" - ) - - attn_output_v = attn_output_v.view(bsz, self.num_heads, tgt_len, self.head_dim) - attn_output_v = attn_output_v.transpose(1, 2) - attn_output_v = attn_output_v.reshape(bsz, tgt_len, self.embed_dim) - - attn_output_l = attn_output_l.view(bsz, self.num_heads, src_len, self.head_dim) - attn_output_l = attn_output_l.transpose(1, 2) - attn_output_l = attn_output_l.reshape(bsz, src_len, self.embed_dim) - - attn_output_v = self.out_v_proj(attn_output_v) - attn_output_l = self.out_l_proj(attn_output_l) - - return attn_output_v, attn_output_l - - -# Bi-Direction MHA (text->image, image->text) -class BiAttentionBlock(nn.Module): - def __init__( - self, - v_dim, - l_dim, - embed_dim, - num_heads, - dropout=0.1, - drop_path=0.0, - init_values=1e-4, - cfg=None, - ): - """ - Inputs: - embed_dim - Dimensionality of input and attention feature vectors - hidden_dim - Dimensionality of hidden layer in feed-forward network - (usually 2-4x larger than embed_dim) - num_heads - Number of heads to use in the Multi-Head Attention block - dropout - Amount of dropout to apply in the feed-forward network - """ - super(BiAttentionBlock, self).__init__() - - # pre layer norm - self.layer_norm_v = nn.LayerNorm(v_dim) - self.layer_norm_l = nn.LayerNorm(l_dim) - self.attn = BiMultiHeadAttention( - v_dim=v_dim, l_dim=l_dim, embed_dim=embed_dim, num_heads=num_heads, dropout=dropout - ) - - # add layer scale for training stability - self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() - self.gamma_v = nn.Parameter(init_values * torch.ones((v_dim)), requires_grad=True) - self.gamma_l = nn.Parameter(init_values * torch.ones((l_dim)), requires_grad=True) - - def forward(self, v, l, attention_mask_v=None, attention_mask_l=None): - v = self.layer_norm_v(v) - l = self.layer_norm_l(l) - delta_v, delta_l = self.attn( - v, l, attention_mask_v=attention_mask_v, attention_mask_l=attention_mask_l - ) - # v, l = v + delta_v, l + delta_l - v = v + self.drop_path(self.gamma_v * delta_v) - l = l + self.drop_path(self.gamma_l * delta_l) - return v, l - - # def forward(self, v:List[torch.Tensor], l, attention_mask_v=None, attention_mask_l=None) diff --git a/spaces/jackyccl/segment-anything/segment_anything/__init__.py b/spaces/jackyccl/segment-anything/segment_anything/__init__.py deleted file mode 100644 index 34383d83f5e76bc801f31b20e5651e383be348b6..0000000000000000000000000000000000000000 --- a/spaces/jackyccl/segment-anything/segment_anything/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from .build_sam import ( - build_sam, - build_sam_vit_h, - build_sam_vit_l, - build_sam_vit_b, - sam_model_registry, -) -from .predictor import SamPredictor -from .automatic_mask_generator import SamAutomaticMaskGenerator diff --git a/spaces/jbetker/tortoise/README.md b/spaces/jbetker/tortoise/README.md deleted file mode 100644 index f3cac2cf867d31013c6c408c55e602bda1eeae45..0000000000000000000000000000000000000000 --- a/spaces/jbetker/tortoise/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: TorToiSe -emoji: 🐢 -colorFrom: yellow -colorTo: green -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: apache-2.0 -models: jbetker/tortoise-tts-v2 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/jbilcke-hf/MusicGen/audiocraft/data/zip.py b/spaces/jbilcke-hf/MusicGen/audiocraft/data/zip.py deleted file mode 100644 index 1f1154231da321dd38d151ff285dbcff5e38a6e0..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/MusicGen/audiocraft/data/zip.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing -import zipfile - -from dataclasses import dataclass -from functools import lru_cache -from typing_extensions import Literal - - -DEFAULT_SIZE = 32 -MODE = Literal['r', 'w', 'x', 'a'] - - -@dataclass(order=True) -class PathInZip: - """Class for holding a path of file within a zip file. - - Args: - path: The convention is : - Let's assume there is a zip file /some/location/foo.zip - and inside of it is a json file located at /data/file1.json, - Then we expect path = "/some/location/foo.zip:/data/file1.json" - """ - - INFO_PATH_SEP = ':' - zip_path: str - file_path: str - - def __init__(self, path: str) -> None: - split_path = path.split(self.INFO_PATH_SEP) - assert len(split_path) == 2 - self.zip_path, self.file_path = split_path - - @classmethod - def from_paths(cls, zip_path: str, file_path: str): - return cls(zip_path + cls.INFO_PATH_SEP + file_path) - - def __str__(self) -> str: - return self.zip_path + self.INFO_PATH_SEP + self.file_path - - -def _open_zip(path: str, mode: MODE = 'r'): - return zipfile.ZipFile(path, mode) - - -_cached_open_zip = lru_cache(DEFAULT_SIZE)(_open_zip) - - -def set_zip_cache_size(max_size: int): - """Sets the maximal LRU caching for zip file opening. - - Args: - max_size: the maximal LRU cache. - """ - global _cached_open_zip - _cached_open_zip = lru_cache(max_size)(_open_zip) - - -def open_file_in_zip(path_in_zip: PathInZip, mode: str = 'r') -> typing.IO: - """Opens a file stored inside a zip and returns a file-like object. - - Args: - path_in_zip: A PathInZip object representing the file to return a file-like object of. - mode: The mode in which to open the file with. - Returns: - A file-like object for PathInZip. - """ - zf = _cached_open_zip(path_in_zip.zip_path) - return zf.open(path_in_zip.file_path) diff --git a/spaces/jbilcke-hf/hotshot-xl-server-1/Dockerfile b/spaces/jbilcke-hf/hotshot-xl-server-1/Dockerfile deleted file mode 100644 index 41d58cc40c5a53a6182a0b39b4cb102827fbbb59..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/hotshot-xl-server-1/Dockerfile +++ /dev/null @@ -1,43 +0,0 @@ -# Use an official PyTorch image with CUDA support as the base image -FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime - -# Install Git and system libraries required for OpenGL without interactive prompts -ENV DEBIAN_FRONTEND=noninteractive - -# Install Git and OpenGL libraries, and libglib2.0 -RUN apt-get update && apt-get install -y git libgl1-mesa-glx libglib2.0-0 - -# Set up a new user named "user" with user ID 1000 -RUN useradd -m -u 1000 user - -# Switch to the "user" user -USER user - -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH \ - PYTHONPATH=$HOME/app \ - PYTHONUNBUFFERED=1 \ - GRADIO_ALLOW_FLAGGING=never \ - GRADIO_NUM_PORTS=1 \ - GRADIO_SERVER_NAME=0.0.0.0 \ - GRADIO_THEME=huggingface \ - GRADIO_SHARE=False \ - SYSTEM=spaces - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -# Clone your repository or add your code to the container -RUN git clone -b main https://github.com/jbilcke-hf/Hotshot-XL-Gradio-API $HOME/app - -# Install dependencies -RUN pip install --no-cache-dir -r requirements.txt gradio accelerate - -RUN find $HOME/app - -# Set the environment variable to specify the GPU device -ENV CUDA_DEVICE_ORDER=PCI_BUS_ID -ENV CUDA_VISIBLE_DEVICES=0 - -# Run your app.py script -CMD ["python", "app_w_lora.py"] \ No newline at end of file diff --git a/spaces/jbilcke-hf/webapp-factory-llama2/README.md b/spaces/jbilcke-hf/webapp-factory-llama2/README.md deleted file mode 100644 index 073a7054fea55fb069231b2dc759a5d1cb7a1a6e..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/webapp-factory-llama2/README.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Webapp Factory Llama -emoji: 🏭🦙 -colorFrom: yellow -colorTo: red -sdk: docker -pinned: false -app_port: 7860 ---- - -A minimalist Docker project to generate web apps on demand using Llama2. - -Note: this is for demonstration only: this endpoint isn't supposed to be duplicated, as it uses a private Hugging Face Inference Endpoint. - -# Examples - -## Local prompt examples - -``` -http://localhost:7860/?prompt=A%20simple%20page%20to%20compute%20the%20BMI%20(use%20SI%20units) -``` - -# Installation -## Building and run without Docker - -```bash -nvm use -npm i -HF_API_TOKEN=******* HF_END_POINT_URL=https://*******.endpoints.huggingface.cloud npm run start -``` - -## Building and running with Docker - -```bash -npm run docker -``` - -This script is a shortcut executing the following commands: - -```bash -docker build -t webapp-factory-llama2 . -docker run -it -p 7860:7860 webapp-factory-llama2 -``` \ No newline at end of file diff --git a/spaces/jgurzoni/image_background_swapper/inpainter.py b/spaces/jgurzoni/image_background_swapper/inpainter.py deleted file mode 100644 index b44e0875fae96c508425bf6bc0cabbaee57f24c1..0000000000000000000000000000000000000000 --- a/spaces/jgurzoni/image_background_swapper/inpainter.py +++ /dev/null @@ -1,110 +0,0 @@ -import os -import cv2 -import numpy as np -import torch -import tqdm -import yaml -from omegaconf import OmegaConf -from PIL import Image -from torch.utils.data._utils.collate import default_collate -from saicinpainting.training.trainers import load_checkpoint -from saicinpainting.evaluation.utils import move_to_device, load_image, prepare_image, pad_img_to_modulo, scale_image -from saicinpainting.evaluation.refinement import refine_predict - -refiner_config = { - 'gpu_ids': '0,', - 'modulo': 8, - 'n_iters': 15, - 'lr': 0.002, - 'min_side': 512, - 'max_scales': 3, - 'px_budget': 1800000 -} - -class Inpainter(): - def __init__(self, config): - self.model = None - self.config = config - self.device = 'cuda' if torch.cuda.is_available() else 'cpu' - self.scale_factor = config['scale_factor'] - self.pad_out_to_modulo = config['pad_out_to_modulo'] - self.predict_config = config['predict'] - self.predict_config['model_path'] = 'big-lama' - self.predict_config['model_checkpoint'] = 'best.ckpt' - self.refiner_config = refiner_config - - def load_model_from_checkpoint(self, model_path, checkpoint): - train_config_path = os.path.join(model_path, 'config.yaml') - with open(train_config_path, 'r') as f: - train_config = OmegaConf.create(yaml.safe_load(f)) - - train_config.training_model.predict_only = True - train_config.visualizer.kind = 'noop' - - checkpoint_path = os.path.join(model_path, - 'models', - checkpoint) - self.model = load_checkpoint(train_config, checkpoint_path, strict=False, map_location='cpu') - - - def load_batch_data(self, img_, mask_): - """Loads the image and mask from the given filenames. - """ - image = prepare_image(img_, mode='RGB') - mask = prepare_image(mask_, mode='L') - - result = dict(image=image, mask=mask[None, ...]) - - if self.scale_factor is not None: - result['image'] = scale_image(result['image'], self.scale_factor) - result['mask'] = scale_image(result['mask'], self.scale_factor, interpolation=cv2.INTER_NEAREST) - - if self.pad_out_to_modulo is not None and self.pad_out_to_modulo > 1: - result['unpad_to_size'] = result['image'].shape[1:] - result['image'] = pad_img_to_modulo(result['image'], self.pad_out_to_modulo) - result['mask'] = pad_img_to_modulo(result['mask'], self.pad_out_to_modulo) - - return result - - def inpaint_img(self, original_img, mask_img, refine=False) -> Image: - """ Inpaints the image region defined by the given mask. - White pixels are to be masked and black pixels kept. - args: - refine: if True, uses the refinement model to enhance the inpainting result, at the cost of speed. - - returns: the inpainted image - """ - # in case we are given filenames instead of images - if isinstance(original_img, str): - original_img = load_image(original_img, mode='RGB') - mask_img = load_image(mask_img, mode='L') - - self.model.eval() - if not refine: - self.model.to(self.device) - # load the image and mask - batch = default_collate([self.load_batch_data(original_img, mask_img)]) - - if refine: - assert 'unpad_to_size' in batch, "Unpadded size is required for the refinement" - # image unpadding is taken care of in the refiner, so that output image - # is same size as the input image - cur_res = refine_predict(batch, self.model, **self.refiner_config) - cur_res = cur_res[0].permute(1,2,0).detach().cpu().numpy() - else: - with torch.no_grad(): - batch = move_to_device(batch, self.device) - batch['mask'] = (batch['mask'] > 0) * 1 - batch = self.model(batch) - cur_res = batch[self.predict_config['out_key']][0].permute(1, 2, 0).detach().cpu().numpy() - unpad_to_size = batch.get('unpad_to_size', None) - if unpad_to_size is not None: - orig_height, orig_width = unpad_to_size - cur_res = cur_res[:orig_height, :orig_width] - - cur_res = np.clip(cur_res * 255, 0, 255).astype('uint8') - rslt_image = Image.fromarray(cur_res, 'RGB') - #cur_res = cv2.cvtColor(cur_res, cv2.COLOR_RGB2BGR) - - return rslt_image - diff --git a/spaces/jhparmar/Blip-image-captioning-base/README.md b/spaces/jhparmar/Blip-image-captioning-base/README.md deleted file mode 100644 index e6ff7cfea58aaa84e4ffd4488a932347029f6315..0000000000000000000000000000000000000000 --- a/spaces/jhparmar/Blip-image-captioning-base/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Blip Image Captioning Base -emoji: 📈 -colorFrom: pinkred -colorTo: indigo -sdk: gradio -sdk_version: 3.42.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/jhwen/bingo/src/app/layout.tsx b/spaces/jhwen/bingo/src/app/layout.tsx deleted file mode 100644 index 8b5122759987177b8dc4e4356d1d06cea25c15ea..0000000000000000000000000000000000000000 --- a/spaces/jhwen/bingo/src/app/layout.tsx +++ /dev/null @@ -1,47 +0,0 @@ -import { Metadata } from 'next' -import { Toaster } from 'react-hot-toast' -import { TailwindIndicator } from '@/components/tailwind-indicator' -import { Providers } from '@/components/providers' -import { Header } from '@/components/header' - -import '@/app/globals.scss' - - -export const metadata: Metadata = { - title: { - default: 'Bing AI Chatbot', - template: `%s - Bing AI Chatbot` - }, - description: 'Bing AI Chatbot Web App.', - themeColor: [ - { media: '(prefers-color-scheme: light)', color: 'white' }, - { media: '(prefers-color-scheme: dark)', color: 'dark' } - ], - icons: { - icon: '/favicon.ico', - shortcut: '../assets/images/logo.svg', - apple: '../assets/images/logo.svg' - } -} - -interface RootLayoutProps { - children: React.ReactNode -} - -export default function RootLayout({ children }: RootLayoutProps) { - return ( - - - - -
        - {/* @ts-ignore */} -
        -
        {children}
        -
        - -
        - - - ) -} diff --git a/spaces/jiaqingj/ConZIC/README.md b/spaces/jiaqingj/ConZIC/README.md deleted file mode 100644 index 620d83093951545778845c1c66bb18c94377d170..0000000000000000000000000000000000000000 --- a/spaces/jiaqingj/ConZIC/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ConZIC -emoji: 📚 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: unknown ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/jiejiejie0420/bingo/src/components/button-scroll-to-bottom.tsx b/spaces/jiejiejie0420/bingo/src/components/button-scroll-to-bottom.tsx deleted file mode 100644 index b68ab9c0e48320c356e51a52d11b9ca63909e6c5..0000000000000000000000000000000000000000 --- a/spaces/jiejiejie0420/bingo/src/components/button-scroll-to-bottom.tsx +++ /dev/null @@ -1,34 +0,0 @@ -'use client' - -import * as React from 'react' - -import { cn } from '@/lib/utils' -import { useAtBottom } from '@/lib/hooks/use-at-bottom' -import { Button, type ButtonProps } from '@/components/ui/button' -import { IconArrowDown } from '@/components/ui/icons' - -export function ButtonScrollToBottom({ className, ...props }: ButtonProps) { - const isAtBottom = useAtBottom() - - return ( - - ) -} diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Hash/test_SHA3_224.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Hash/test_SHA3_224.py deleted file mode 100644 index f92147a734c66db38a01bfcae015619c6ecc147b..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Hash/test_SHA3_224.py +++ /dev/null @@ -1,79 +0,0 @@ -# -*- coding: utf-8 -*- -# -# SelfTest/Hash/test_SHA3_224.py: Self-test for the SHA-3/224 hash function -# -# =================================================================== -# The contents of this file are dedicated to the public domain. To -# the extent that dedication to the public domain is not available, -# everyone is granted a worldwide, perpetual, royalty-free, -# non-exclusive license to exercise all rights associated with the -# contents of this file for any purpose whatsoever. -# No rights are reserved. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# =================================================================== - -"""Self-test suite for Crypto.Hash.SHA3_224""" - -import unittest -from binascii import hexlify - -from Crypto.SelfTest.loader import load_test_vectors -from Crypto.SelfTest.st_common import list_test_cases -from Crypto.Hash import SHA3_224 as SHA3 -from Crypto.Util.py3compat import b - - -class APITest(unittest.TestCase): - - def test_update_after_digest(self): - msg=b("rrrrttt") - - # Normally, update() cannot be done after digest() - h = SHA3.new(data=msg[:4]) - dig1 = h.digest() - self.assertRaises(TypeError, h.update, msg[4:]) - dig2 = SHA3.new(data=msg).digest() - - # With the proper flag, it is allowed - h = SHA3.new(data=msg[:4], update_after_digest=True) - self.assertEqual(h.digest(), dig1) - # ... and the subsequent digest applies to the entire message - # up to that point - h.update(msg[4:]) - self.assertEqual(h.digest(), dig2) - - -def get_tests(config={}): - from .common import make_hash_tests - - tests = [] - - test_vectors = load_test_vectors(("Hash", "SHA3"), - "ShortMsgKAT_SHA3-224.txt", - "KAT SHA-3 224", - { "len" : lambda x: int(x) } ) or [] - - test_data = [] - for tv in test_vectors: - if tv.len == 0: - tv.msg = b("") - test_data.append((hexlify(tv.md), tv.msg, tv.desc)) - - tests += make_hash_tests(SHA3, "SHA3_224", test_data, - digest_size=SHA3.digest_size, - oid="2.16.840.1.101.3.4.2.7") - tests += list_test_cases(APITest) - return tests - -if __name__ == '__main__': - import unittest - suite = lambda: unittest.TestSuite(get_tests()) - unittest.main(defaultTest='suite') diff --git a/spaces/jone/GFPGAN/tests/test_ffhq_degradation_dataset.py b/spaces/jone/GFPGAN/tests/test_ffhq_degradation_dataset.py deleted file mode 100644 index fa56c03fb8e23df26aa6ed8442a86b3c676eec78..0000000000000000000000000000000000000000 --- a/spaces/jone/GFPGAN/tests/test_ffhq_degradation_dataset.py +++ /dev/null @@ -1,96 +0,0 @@ -import pytest -import yaml - -from gfpgan.data.ffhq_degradation_dataset import FFHQDegradationDataset - - -def test_ffhq_degradation_dataset(): - - with open('tests/data/test_ffhq_degradation_dataset.yml', mode='r') as f: - opt = yaml.load(f, Loader=yaml.FullLoader) - - dataset = FFHQDegradationDataset(opt) - assert dataset.io_backend_opt['type'] == 'disk' # io backend - assert len(dataset) == 1 # whether to read correct meta info - assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations - assert dataset.color_jitter_prob == 1 - - # test __getitem__ - result = dataset.__getitem__(0) - # check returned keys - expected_keys = ['gt', 'lq', 'gt_path'] - assert set(expected_keys).issubset(set(result.keys())) - # check shape and contents - assert result['gt'].shape == (3, 512, 512) - assert result['lq'].shape == (3, 512, 512) - assert result['gt_path'] == 'tests/data/gt/00000000.png' - - # ------------------ test with probability = 0 -------------------- # - opt['color_jitter_prob'] = 0 - opt['color_jitter_pt_prob'] = 0 - opt['gray_prob'] = 0 - opt['io_backend'] = dict(type='disk') - dataset = FFHQDegradationDataset(opt) - assert dataset.io_backend_opt['type'] == 'disk' # io backend - assert len(dataset) == 1 # whether to read correct meta info - assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations - assert dataset.color_jitter_prob == 0 - - # test __getitem__ - result = dataset.__getitem__(0) - # check returned keys - expected_keys = ['gt', 'lq', 'gt_path'] - assert set(expected_keys).issubset(set(result.keys())) - # check shape and contents - assert result['gt'].shape == (3, 512, 512) - assert result['lq'].shape == (3, 512, 512) - assert result['gt_path'] == 'tests/data/gt/00000000.png' - - # ------------------ test lmdb backend -------------------- # - opt['dataroot_gt'] = 'tests/data/ffhq_gt.lmdb' - opt['io_backend'] = dict(type='lmdb') - - dataset = FFHQDegradationDataset(opt) - assert dataset.io_backend_opt['type'] == 'lmdb' # io backend - assert len(dataset) == 1 # whether to read correct meta info - assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations - assert dataset.color_jitter_prob == 0 - - # test __getitem__ - result = dataset.__getitem__(0) - # check returned keys - expected_keys = ['gt', 'lq', 'gt_path'] - assert set(expected_keys).issubset(set(result.keys())) - # check shape and contents - assert result['gt'].shape == (3, 512, 512) - assert result['lq'].shape == (3, 512, 512) - assert result['gt_path'] == '00000000' - - # ------------------ test with crop_components -------------------- # - opt['crop_components'] = True - opt['component_path'] = 'tests/data/test_eye_mouth_landmarks.pth' - opt['eye_enlarge_ratio'] = 1.4 - opt['gt_gray'] = True - opt['io_backend'] = dict(type='lmdb') - - dataset = FFHQDegradationDataset(opt) - assert dataset.crop_components is True - - # test __getitem__ - result = dataset.__getitem__(0) - # check returned keys - expected_keys = ['gt', 'lq', 'gt_path', 'loc_left_eye', 'loc_right_eye', 'loc_mouth'] - assert set(expected_keys).issubset(set(result.keys())) - # check shape and contents - assert result['gt'].shape == (3, 512, 512) - assert result['lq'].shape == (3, 512, 512) - assert result['gt_path'] == '00000000' - assert result['loc_left_eye'].shape == (4, ) - assert result['loc_right_eye'].shape == (4, ) - assert result['loc_mouth'].shape == (4, ) - - # ------------------ lmdb backend should have paths ends with lmdb -------------------- # - with pytest.raises(ValueError): - opt['dataroot_gt'] = 'tests/data/gt' - opt['io_backend'] = dict(type='lmdb') - dataset = FFHQDegradationDataset(opt) diff --git a/spaces/jordonpeter01/AWS-CHATBOOT-SUPER/app.py b/spaces/jordonpeter01/AWS-CHATBOOT-SUPER/app.py deleted file mode 100644 index f2756788e4f590ab5b5ed2e05f8281a4fdffc997..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/AWS-CHATBOOT-SUPER/app.py +++ /dev/null @@ -1,604 +0,0 @@ -import concurrent -import functools -import logging -import os -import random -import re -import traceback -import uuid -import datetime -from collections import deque -import itertools - -from collections import defaultdict -from time import sleep -from typing import Generator, Tuple, List, Dict - -import boto3 -import gradio as gr -import requests -from datasets import load_dataset - -logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO")) -logging.getLogger("httpx").setLevel(logging.WARNING) - -# Create a DynamoDB client -dynamodb = boto3.resource('dynamodb', region_name='us-east-1') -# Get a reference to the table -table = dynamodb.Table('oaaic_chatbot_arena') - - -def prompt_human_instruct(system_msg, history): - return system_msg.strip() + "\n" + \ - "\n".join(["\n".join(["###Human: "+item[0], "###Assistant: "+item[1]]) - for item in history]) - - -def prompt_instruct(system_msg, history): - return system_msg.strip() + "\n" + \ - "\n".join(["\n".join(["### Instruction: "+item[0], "### Response: "+item[1]]) - for item in history]) - - -def prompt_chat(system_msg, history): - return system_msg.strip() + "\n" + \ - "\n".join(["\n".join(["USER: "+item[0], "ASSISTANT: "+item[1]]) - for item in history]) - - -def prompt_roleplay(system_msg, history): - return "<|system|>" + system_msg.strip() + "\n" + \ - "\n".join(["\n".join(["<|user|>"+item[0], "<|model|>"+item[1]]) - for item in history]) - - -class Pipeline: - prefer_async = True - - def __init__(self, endpoint_id, name, prompt_fn, stop_tokens=None): - self.endpoint_id = endpoint_id - self.name = name - self.prompt_fn = prompt_fn - stop_tokens = stop_tokens or [] - self.generation_config = { - "max_new_tokens": 1024, - "top_k": 40, - "top_p": 0.90, - "temperature": 0.72, - "repetition_penalty": 1.22, - "last_n_tokens": 64, - "seed": -1, - "batch_size": 8, - "threads": -1, - "stop": ["
        ", "USER:", "### Instruction:"] + stop_tokens, - } - - def get_generation_config(self): - return self.generation_config.copy() - - def __call__(self, prompt, config=None) -> Generator[List[Dict[str, str]], None, None]: - input = config if config else self.generation_config.copy() - input["prompt"] = prompt - - if self.prefer_async: - url = f"https://api.runpod.ai/v2/{self.endpoint_id}/run" - else: - url = f"https://api.runpod.ai/v2/{self.endpoint_id}/runsync" - headers = { - "Authorization": f"Bearer {os.environ['RUNPOD_AI_API_KEY']}" - } - response = requests.post(url, headers=headers, json={"input": input}) - - if response.status_code == 200: - data = response.json() - task_id = data.get('id') - return self.stream_output(task_id) - - def stream_output(self,task_id) -> Generator[List[Dict[str, str]], None, None]: - url = f"https://api.runpod.ai/v2/{self.endpoint_id}/stream/{task_id}" - headers = { - "Authorization": f"Bearer {os.environ['RUNPOD_AI_API_KEY']}" - } - - while True: - try: - response = requests.get(url, headers=headers) - if response.status_code == 200: - data = response.json() - yield [{"generated_text": "".join([s["output"] for s in data["stream"]])}] - if data.get('status') == 'COMPLETED': - return - elif response.status_code >= 400: - logging.error(response.json()) - except ConnectionError: - pass - - def poll_for_status(self, task_id): - url = f"https://api.runpod.ai/v2/{self.endpoint_id}/status/{task_id}" - headers = { - "Authorization": f"Bearer {os.environ['RUNPOD_AI_API_KEY']}" - } - - while True: - response = requests.get(url, headers=headers) - if response.status_code == 200: - data = response.json() - if data.get('status') == 'COMPLETED': - return [{"generated_text": data["output"]}] - elif response.status_code >= 400: - logging.error(response.json()) - # Sleep for 3 seconds between each request - sleep(3) - - def transform_prompt(self, system_msg, history): - return self.prompt_fn(system_msg, history) - - -AVAILABLE_MODELS = { - "hermes-13b": ("p0zqb2gkcwp0ww", prompt_instruct), - "manticore-13b-chat": ("u6tv84bpomhfei", prompt_chat), - "airoboros-13b": ("rglzxnk80660ja", prompt_chat), - "wizard-vicuna-13b": ("9vvpikt4ttyqos", prompt_chat), - "lmsys-vicuna-13b": ("2nlb32ydkaz6yd", prompt_chat), - "supercot-13b": ("0be7865dwxpwqk", prompt_instruct, ["Instruction:"]), - "mpt-7b-instruct": ("jpqbvnyluj18b0", prompt_instruct), - "guanaco-13b": ("yxl8w98z017mw2", prompt_instruct), - # "minotaur-13b": ("6f1baphxjpjk7b", prompt_chat), - "minotaur-13b-fixed": ("sjnkstd3e40ojj", prompt_roleplay), - "wizardlm-13b": ("k0chcxsgukov8x", prompt_instruct), - "selfee-13b": ("50rnvxln9bmf4c", prompt_instruct), - "robin-v2-13b": ("4cw4vwzzhsl5pq", prompt_human_instruct, ["###Human"]), - "minotaur-15b-8k": ("zdk804d2txtt68", prompt_chat), -} - -OAAIC_MODELS = [ - "minotaur-15b-8k", - "minotaur-13b-fixed", - "manticore-13b-chat", - # "minotaur-mpt-7b", -] -OAAIC_MODELS_ROLEPLAY = { - "manticore-13b-chat-roleplay": ("u6tv84bpomhfei", prompt_roleplay), - "minotaur-13b-roleplay": ("6f1baphxjpjk7b", prompt_roleplay), - "minotaur-13b-fixed-roleplay": ("sjnkstd3e40ojj", prompt_roleplay), - "minotaur-15b-8k-roleplay": ("zdk804d2txtt68", prompt_roleplay), - # "minotaur-mpt-7b": ("vm1wcsje126x1x", prompt_chat), -} - -_memoized_models = defaultdict() - - -def get_model_pipeline(model_name): - if not _memoized_models.get(model_name): - kwargs = {} - if model_name in AVAILABLE_MODELS: - if len(AVAILABLE_MODELS[model_name]) >= 3: - kwargs["stop_tokens"] = AVAILABLE_MODELS[model_name][2] - _memoized_models[model_name] = Pipeline(AVAILABLE_MODELS[model_name][0], model_name, AVAILABLE_MODELS[model_name][1], **kwargs) - elif model_name in OAAIC_MODELS_ROLEPLAY: - _memoized_models[model_name] = Pipeline(OAAIC_MODELS_ROLEPLAY[model_name][0], model_name, OAAIC_MODELS_ROLEPLAY[model_name][1], **kwargs) - return _memoized_models.get(model_name) - -start_message = """Below is a dialogue between a USER and an ASSISTANT. The USER may ask questions, request information, or provide instructions for a task, often supplementing with additional context. The ASSISTANT responds accurately and effectively, offering insights, answering questions, or executing tasks to the best of its ability based on the given information. -""" - - -def user(message, nudge_msg, history1, history2): - history1 = history1 or [] - history2 = history2 or [] - # Append the user's message to the conversation history - history1.append([message, nudge_msg]) - history2.append([message, nudge_msg]) - - return "", nudge_msg, history1, history2 - - -def token_generator(generator1, generator2, mapping_fn=None, fillvalue=None): - if not fillvalue: - fillvalue = '' - if not mapping_fn: - mapping_fn = lambda x: x - for output1, output2 in itertools.zip_longest(generator1, generator2, fillvalue=fillvalue): - tokens1 = re.findall(r'(.*?)(\s|$)', mapping_fn(output1)) - tokens2 = re.findall(r'(.*?)(\s|$)', mapping_fn(output2)) - - for token1, token2 in itertools.zip_longest(tokens1, tokens2, fillvalue=''): - yield "".join(token1), "".join(token2) - - -def chat(history1, history2, system_msg, state): - history1 = history1 or [] - history2 = history2 or [] - - arena_bots = None - if state and "models" in state and state['models']: - arena_bots = state['models'] - if not arena_bots: - arena_bots = list(AVAILABLE_MODELS.keys()) - random.shuffle(arena_bots) - # bootstrap a new bot into the arena more often - if "minotaur-15b-8k" not in arena_bots[0:2] and random.choice([True, False, False]): - arena_bots.insert(random.choice([0,1]), "minotaur-15b-8k") - - battle = arena_bots[0:2] - model1 = get_model_pipeline(battle[0]) - model2 = get_model_pipeline(battle[1]) - - messages1 = model1.transform_prompt(system_msg, history1) - messages2 = model2.transform_prompt(system_msg, history2) - - # remove last space from assistant, some models output a ZWSP if you leave a space - messages1 = messages1.rstrip() - messages2 = messages2.rstrip() - - model1_res = model1(messages1) # type: Generator[str, None, None] - model2_res = model2(messages2) # type: Generator[str, None, None] - res = token_generator(model1_res, model2_res, lambda x: x[0]['generated_text'], fillvalue=[{'generated_text': ''}]) # type: Generator[Tuple[str, str], None, None] - logging.info({"models": [model1.name, model2.name]}) - for t1, t2 in res: - if t1 is not None: - history1[-1][1] += t1 - if t2 is not None: - history2[-1][1] += t2 - # stream the response - # [arena_chatbot1, arena_chatbot2, arena_message, reveal1, reveal2, arena_state] - yield history1, history2, "", gr.update(value=battle[0]), gr.update(value=battle[1]), {"models": [model1.name, model2.name]} - sleep(0.05) - - -def chosen_one(label, choice1_history, choice2_history, system_msg, nudge_msg, rlhf_persona, state): - if not state: - logging.error("missing state!!!") - # Generate a uuid for each submission - arena_battle_id = str(uuid.uuid4()) - - # Get the current timestamp - timestamp = datetime.datetime.now().isoformat() - - # Put the item in the table - table.put_item( - Item={ - 'arena_battle_id': arena_battle_id, - 'timestamp': timestamp, - 'system_msg': system_msg, - 'nudge_prefix': nudge_msg, - 'choice1_name': state["models"][0], - 'choice1': choice1_history, - 'choice2_name': state["models"][1], - 'choice2': choice2_history, - 'label': label, - 'rlhf_persona': rlhf_persona, - } - ) - -chosen_one_first = functools.partial(chosen_one, 1) -chosen_one_second = functools.partial(chosen_one, 2) -chosen_one_tie = functools.partial(chosen_one, 0) -chosen_one_suck = functools.partial(chosen_one, 1) - -leaderboard_intro = """### TBD -- This is very much a work-in-progress, if you'd like to help build this out, join us on [Discord](https://discord.gg/QYF8QrtEUm) - -""" -elo_scores = load_dataset("openaccess-ai-collective/chatbot-arena-elo-scores") -elo_scores = elo_scores["train"].sort("elo_score", reverse=True) - - -def refresh_md(): - return leaderboard_intro + "\n" + dataset_to_markdown() - - -def fetch_elo_scores(): - elo_scores = load_dataset("openaccess-ai-collective/chatbot-arena-elo-scores") - elo_scores = elo_scores["train"].sort("elo_score", reverse=True) - return elo_scores - - -def dataset_to_markdown(): - dataset = fetch_elo_scores() - # Get column names (dataset features) - columns = list(dataset.features.keys()) - # Start markdown string with table headers - markdown_string = "| " + " | ".join(columns) + " |\n" - # Add markdown table row separator for headers - markdown_string += "| " + " | ".join("---" for _ in columns) + " |\n" - - # Add each row from dataset to the markdown string - for i in range(len(dataset)): - row = dataset[i] - markdown_string += "| " + " | ".join(str(row[column]) for column in columns) + " |\n" - - return markdown_string - - -""" -OpenAccess AI Chatbots chat -""" - -def open_clear_chat(chat_history_state, chat_message, nudge_msg): - chat_history_state = [] - chat_message = '' - nudge_msg = '' - return chat_history_state, chat_message, nudge_msg - - -def open_user(message, nudge_msg, history): - history = history or [] - # Append the user's message to the conversation history - history.append([message, nudge_msg]) - return "", nudge_msg, history - - -def open_chat(model_name, history, system_msg, max_new_tokens, temperature, top_p, top_k, repetition_penalty): - history = history or [] - - model = get_model_pipeline(model_name) - config = model.get_generation_config() - config["max_new_tokens"] = max_new_tokens - config["temperature"] = temperature - config["temperature"] = temperature - config["top_p"] = top_p - config["top_k"] = top_k - config["repetition_penalty"] = repetition_penalty - - messages = model.transform_prompt(system_msg, history) - - # remove last space from assistant, some models output a ZWSP if you leave a space - messages = messages.rstrip() - - model_res = model(messages, config=config) # type: Generator[List[Dict[str, str]], None, None] - for res in model_res: - # tokens = re.findall(r'\s*\S+\s*', res[0]['generated_text']) - tokens = re.findall(r'(.*?)(\s|$)', res[0]['generated_text']) - for subtoken in tokens: - subtoken = "".join(subtoken) - history[-1][1] += subtoken - # stream the response - yield history, history, "" - sleep(0.01) - - -def open_rp_chat(model_name, history, system_msg, max_new_tokens, temperature, top_p, top_k, repetition_penalty): - history = history or [] - - model = get_model_pipeline(f"{model_name}-roleplay") - config = model.get_generation_config() - config["max_new_tokens"] = max_new_tokens - config["temperature"] = temperature - config["temperature"] = temperature - config["top_p"] = top_p - config["top_k"] = top_k - config["repetition_penalty"] = repetition_penalty - - messages = model.transform_prompt(system_msg, history) - - # remove last space from assistant, some models output a ZWSP if you leave a space - messages = messages.rstrip() - - model_res = model(messages, config=config) # type: Generator[List[Dict[str, str]], None, None] - for res in model_res: - tokens = re.findall(r'(.*?)(\s|$)', res[0]['generated_text']) - # tokens = re.findall(r'\s*\S+\s*', res[0]['generated_text']) - for subtoken in tokens: - subtoken = "".join(subtoken) - history[-1][1] += subtoken - # stream the response - yield history, history, "" - sleep(0.01) - - -with gr.Blocks() as arena: - with gr.Row(): - with gr.Column(): - gr.Markdown(f""" - ### brought to you by OpenAccess AI Collective - - Checkout out [our writeup on how this was built.](https://medium.com/@winglian/inference-any-llm-with-serverless-in-15-minutes-69eeb548a41d) - - This Space runs on CPU only, and uses GGML with GPU support via Runpod Serverless. - - Responses may not stream immediately due to cold starts on Serverless. - - Some responses WILL take AT LEAST 20 seconds to respond - - The Chatbot Arena (for now), is single turn only. Responses will be cleared after submission. - - Responses from the Arena will be used for building reward models. These reward models can be bucketed by Personas. - - [💵 Consider Donating on our Patreon](http://patreon.com/OpenAccessAICollective) or become a [GitHub Sponsor](https://github.com/sponsors/OpenAccess-AI-Collective) - - Join us on [Discord](https://discord.gg/PugNNHAF5r) - """) - with gr.Tab("Chatbot Arena"): - with gr.Row(): - with gr.Column(): - arena_chatbot1 = gr.Chatbot(label="Chatbot A") - with gr.Column(): - arena_chatbot2 = gr.Chatbot(label="Chatbot B") - with gr.Row(): - choose1 = gr.Button(value="👈 Prefer left (A)", variant="secondary", visible=False).style(full_width=True) - choose2 = gr.Button(value="👉 Prefer right (B)", variant="secondary", visible=False).style(full_width=True) - choose3 = gr.Button(value="🤝 Tie", variant="secondary", visible=False).style(full_width=True) - choose4 = gr.Button(value="🤮 Both are bad", variant="secondary", visible=False).style(full_width=True) - with gr.Row(): - reveal1 = gr.Textbox(label="Model Name", value="", interactive=False, visible=False).style(full_width=True) - reveal2 = gr.Textbox(label="Model Name", value="", interactive=False, visible=False).style(full_width=True) - with gr.Row(): - dismiss_reveal = gr.Button(value="Dismiss & Continue", variant="secondary", visible=False).style(full_width=True) - with gr.Row(): - with gr.Column(): - arena_message = gr.Textbox( - label="What do you want to ask?", - placeholder="Ask me anything.", - lines=3, - ) - with gr.Column(): - arena_rlhf_persona = gr.Textbox( - "", label="Persona Tags", interactive=True, visible=True, placeholder="Tell us about how you are judging the quality. ex: #CoT #SFW #NSFW #helpful #ethical #creativity", lines=2) - arena_system_msg = gr.Textbox( - start_message, label="System Message", interactive=True, visible=True, placeholder="system prompt", lines=8) - - arena_nudge_msg = gr.Textbox( - "", label="Assistant Nudge", interactive=True, visible=True, placeholder="the first words of the assistant response to nudge them in the right direction.", lines=2) - with gr.Row(): - arena_submit = gr.Button(value="Send message", variant="secondary").style(full_width=True) - arena_clear = gr.Button(value="New topic", variant="secondary").style(full_width=False) - # arena_regenerate = gr.Button(value="Regenerate", variant="secondary").style(full_width=False) - arena_state = gr.State({}) - - arena_clear.click(lambda: None, None, arena_chatbot1, queue=False) - arena_clear.click(lambda: None, None, arena_chatbot2, queue=False) - arena_clear.click(lambda: None, None, arena_message, queue=False) - arena_clear.click(lambda: None, None, arena_nudge_msg, queue=False) - arena_clear.click(lambda: None, None, arena_state, queue=False) - - submit_click_event = arena_submit.click( - lambda *args: ( - gr.update(visible=False, interactive=False), - gr.update(visible=False), - gr.update(visible=False), - ), - inputs=[], outputs=[arena_message, arena_clear, arena_submit], queue=True - ).then( - fn=user, inputs=[arena_message, arena_nudge_msg, arena_chatbot1, arena_chatbot2], outputs=[arena_message, arena_nudge_msg, arena_chatbot1, arena_chatbot2], queue=True - ).then( - fn=chat, inputs=[arena_chatbot1, arena_chatbot2, arena_system_msg, arena_state], outputs=[arena_chatbot1, arena_chatbot2, arena_message, reveal1, reveal2, arena_state], queue=True - ).then( - lambda *args: ( - gr.update(visible=False, interactive=False), - gr.update(visible=True), - gr.update(visible=True), - gr.update(visible=True), - gr.update(visible=True), - gr.update(visible=False), - gr.update(visible=False), - ), - inputs=[arena_message, arena_nudge_msg, arena_system_msg], outputs=[arena_message, choose1, choose2, choose3, choose4, arena_clear, arena_submit], queue=True - ) - - choose1_click_event = choose1.click( - fn=chosen_one_first, inputs=[arena_chatbot1, arena_chatbot2, arena_system_msg, arena_nudge_msg, arena_rlhf_persona, arena_state], outputs=[], queue=True - ).then( - lambda *args: ( - gr.update(visible=False), - gr.update(visible=False), - gr.update(visible=False), - gr.update(visible=False), - gr.update(visible=True), - gr.update(visible=True), - gr.update(visible=True), - ), - inputs=[], outputs=[choose1, choose2, choose3, choose4, dismiss_reveal, reveal1, reveal2], queue=True - ) - - choose2_click_event = choose2.click( - fn=chosen_one_second, inputs=[arena_chatbot1, arena_chatbot2, arena_system_msg, arena_nudge_msg, arena_rlhf_persona, arena_state], outputs=[], queue=True - ).then( - lambda *args: ( - gr.update(visible=False), - gr.update(visible=False), - gr.update(visible=False), - gr.update(visible=False), - gr.update(visible=True), - gr.update(visible=True), - gr.update(visible=True), - ), - inputs=[], outputs=[choose1, choose2, choose3, choose4, dismiss_reveal, reveal1, reveal2], queue=True - ) - - choose3_click_event = choose3.click( - fn=chosen_one_tie, inputs=[arena_chatbot1, arena_chatbot2, arena_system_msg, arena_nudge_msg, arena_rlhf_persona, arena_state], outputs=[], queue=True - ).then( - lambda *args: ( - gr.update(visible=False), - gr.update(visible=False), - gr.update(visible=False), - gr.update(visible=False), - gr.update(visible=True), - gr.update(visible=True), - gr.update(visible=True), - ), - inputs=[], outputs=[choose1, choose2, choose3, choose4, dismiss_reveal, reveal1, reveal2], queue=True - ) - - choose4_click_event = choose4.click( - fn=chosen_one_suck, inputs=[arena_chatbot1, arena_chatbot2, arena_system_msg, arena_nudge_msg, arena_rlhf_persona, arena_state], outputs=[], queue=True - ).then( - lambda *args: ( - gr.update(visible=False), - gr.update(visible=False), - gr.update(visible=False), - gr.update(visible=False), - gr.update(visible=True), - gr.update(visible=True), - gr.update(visible=True), - ), - inputs=[], outputs=[choose1, choose2, choose3, choose4, dismiss_reveal, reveal1, reveal2], queue=True - ) - - dismiss_click_event = dismiss_reveal.click( - lambda *args: ( - gr.update(visible=True, interactive=True), - gr.update(visible=False), - gr.update(visible=True), - gr.update(visible=True), - gr.update(visible=False), - gr.update(visible=False), - None, - None, - None, - ), - inputs=[], outputs=[ - arena_message, - dismiss_reveal, - arena_clear, arena_submit, - reveal1, reveal2, - arena_chatbot1, arena_chatbot2, - arena_state, - ], queue=True - ) - with gr.Tab("Leaderboard"): - with gr.Column(): - leaderboard_markdown = gr.Markdown(f"""{leaderboard_intro} -{dataset_to_markdown()} -""") - leaderboad_refresh = gr.Button(value="Refresh Leaderboard", variant="secondary").style(full_width=True) - leaderboad_refresh.click(fn=refresh_md, inputs=[], outputs=[leaderboard_markdown]) - with gr.Tab("OAAIC Chatbots"): - gr.Markdown("# GGML Spaces Chatbot Demo") - open_model_choice = gr.Dropdown(label="Model", choices=OAAIC_MODELS, value=OAAIC_MODELS[0]) - open_chatbot = gr.Chatbot().style(height=400) - with gr.Row(): - open_message = gr.Textbox( - label="What do you want to chat about?", - placeholder="Ask me anything.", - lines=3, - ) - with gr.Row(): - open_submit = gr.Button(value="Send message", variant="secondary").style(full_width=True) - open_roleplay = gr.Button(value="Roleplay", variant="secondary").style(full_width=True) - open_clear = gr.Button(value="New topic", variant="secondary").style(full_width=False) - open_stop = gr.Button(value="Stop", variant="secondary").style(full_width=False) - with gr.Row(): - with gr.Column(): - open_max_tokens = gr.Slider(20, 1000, label="Max Tokens", step=20, value=300) - open_temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=0.8) - open_top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.95) - open_top_k = gr.Slider(0, 100, label="Top K", step=1, value=40) - open_repetition_penalty = gr.Slider(0.0, 2.0, label="Repetition Penalty", step=0.1, value=1.1) - - open_system_msg = gr.Textbox( - start_message, label="System Message", interactive=True, visible=True, placeholder="system prompt, useful for RP", lines=5) - - open_nudge_msg = gr.Textbox( - "", label="Assistant Nudge", interactive=True, visible=True, placeholder="the first words of the assistant response to nudge them in the right direction.", lines=1) - - open_chat_history_state = gr.State() - open_clear.click(open_clear_chat, inputs=[open_chat_history_state, open_message, open_nudge_msg], outputs=[open_chat_history_state, open_message, open_nudge_msg], queue=False) - open_clear.click(lambda: None, None, open_chatbot, queue=False) - - open_submit_click_event = open_submit.click( - fn=open_user, inputs=[open_message, open_nudge_msg, open_chat_history_state], outputs=[open_message, open_nudge_msg, open_chat_history_state], queue=True - ).then( - fn=open_chat, inputs=[open_model_choice, open_chat_history_state, open_system_msg, open_max_tokens, open_temperature, open_top_p, open_top_k, open_repetition_penalty], outputs=[open_chatbot, open_chat_history_state, open_message], queue=True - ) - open_roleplay_click_event = open_roleplay.click( - fn=open_user, inputs=[open_message, open_nudge_msg, open_chat_history_state], outputs=[open_message, open_nudge_msg, open_chat_history_state], queue=True - ).then( - fn=open_rp_chat, inputs=[open_model_choice, open_chat_history_state, open_system_msg, open_max_tokens, open_temperature, open_top_p, open_top_k, open_repetition_penalty], outputs=[open_chatbot, open_chat_history_state, open_message], queue=True - ) - open_stop.click(fn=None, inputs=None, outputs=None, cancels=[open_submit_click_event, open_roleplay_click_event], queue=False) - -arena.queue(concurrency_count=5, max_size=16).launch(debug=True, server_name="0.0.0.0", server_port=7860) \ No newline at end of file diff --git a/spaces/jordyvl/ece/app.py b/spaces/jordyvl/ece/app.py deleted file mode 100644 index 0f23e40295610529c43fa28c8ec5f8f14c1541cc..0000000000000000000000000000000000000000 --- a/spaces/jordyvl/ece/app.py +++ /dev/null @@ -1,167 +0,0 @@ -import evaluate -import json -import sys -from pathlib import Path -import gradio as gr - -import numpy as np -import pandas as pd -import ast -import matplotlib.pyplot as plt -import matplotlib.patches as mpatches - -plt.rcParams["figure.dpi"] = 300 -plt.switch_backend( - "agg" -) # ; https://stackoverflow.com/questions/14694408/runtimeerror-main-thread-is-not-in-main-loop - - -def default_plot(): - fig = plt.figure() - ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2) - ax2 = plt.subplot2grid((3, 1), (2, 0)) - ranged = np.linspace(0, 1, 10) - ax1.plot( - ranged, - ranged, - color="darkgreen", - ls="dotted", - label="Perfect", - ) - - # Bin differences - ax1.set_ylabel("Conditional Expectation") - ax1.set_ylim([0, 1.05]) - ax1.set_title("Reliability Diagram") - ax1.set_xlim([-0.05, 1.05]) # respective to bin range - - # Bin frequencies - ax2.set_xlabel("Confidence") - ax2.set_ylabel("Count") - ax2.set_xlim([-0.05, 1.05]) # respective to bin range - - return fig, ax1, ax2 - - -def reliability_plot(results): - # DEV: might still need to write tests in case of equal mass binning - # DEV: nicer would be to plot like a polygon - # see: https://github.com/markus93/fit-on-the-test/blob/main/Experiments_Synthetic/binnings.py - - fig, ax1, ax2 = default_plot() - - # Bin differences - bins_with_left_edge = np.insert(results["y_bar"], 0, 0, axis=0) - bins_with_right_edge = np.insert(results["y_bar"], -1, 1.0, axis=0) - bins_with_leftright_edge = np.insert(bins_with_left_edge, -1, 1.0, axis=0) - weights = np.nan_to_num(results["p_bar"], copy=True, nan=0) - - # NOTE: the histogram API is strange - _, _, patches = ax1.hist( - bins_with_left_edge, - weights=weights, - bins=bins_with_leftright_edge, - ) - for b in range(len(patches)): - perfect = bins_with_right_edge[b] # if b != n_bins else - empirical = weights[b] # patches[b]._height - bin_color = ( - "limegreen" - if perfect == empirical - else "dodgerblue" - if empirical < perfect - else "orangered" - ) - patches[b].set_facecolor(bin_color) # color based on over/underconfidence - - ax1handles = [ - mpatches.Patch(color="orangered", label="Overconfident"), - mpatches.Patch(color="limegreen", label="Perfect", linestyle="dotted"), - mpatches.Patch(color="dodgerblue", label="Underconfident"), - ] - - # Bin frequencies - anindices = np.where(~np.isnan(results["p_bar"]))[0] - bin_freqs = np.zeros(len(results["p_bar"])) - bin_freqs[anindices] = results["bin_freq"] - ax2.hist( - bins_with_left_edge, weights=bin_freqs, color="midnightblue", bins=bins_with_leftright_edge - ) - - acc_plt = ax2.axvline(x=results["accuracy"], ls="solid", lw=3, c="black", label="Accuracy") - conf_plt = ax2.axvline( - x=results["p_bar_cont"], ls="dotted", lw=3, c="#444", label="Avg. confidence" - ) - - ax1.legend(loc="lower right", handles=ax1handles) - ax2.legend(handles=[acc_plt, conf_plt]) - ax1.set_xticks(bins_with_left_edge) - ax2.set_xticks(bins_with_left_edge) - plt.tight_layout() - return fig - - -def compute_and_plot(data, n_bins, bin_range, scheme, proxy, p): - # DEV: check on invalid datatypes with better warnings - - if isinstance(data, pd.DataFrame): - data.dropna(inplace=True) - - predictions = [ - ast.literal_eval(prediction) if not isinstance(prediction, list) else prediction - for prediction in data["predictions"] - ] - references = [reference for reference in data["references"]] - - results = metric._compute( - predictions, - references, - n_bins=n_bins, - scheme=scheme, - proxy=proxy, - p=p, - detail=True, - ) - plot = reliability_plot(results) - return results["ECE"], plot - - -sliders = [ - gr.Slider(0, 100, value=10, label="n_bins"), - gr.Slider( - 0, 100, value=None, label="bin_range", visible=False - ), # DEV: need to have a double slider - gr.Dropdown(choices=["equal-range", "equal-mass"], value="equal-range", label="scheme"), - gr.Dropdown(choices=["upper-edge", "center"], value="upper-edge", label="proxy"), - gr.Dropdown(choices=[1, 2, np.inf], value=1, label="p"), -] - -slider_defaults = [slider.value for slider in sliders] - -# example data -component = gr.inputs.Dataframe( - headers=["predictions", "references"], col_count=2, datatype="number", type="pandas" -) - -component.value = [ - [[0.6, 0.2, 0.2], 0], - [[0.7, 0.1, 0.2], 2], - [[0, 0.95, 0.05], 1], -] -sample_data = [[component] + slider_defaults] - -local_path = Path(sys.path[0]) -metric = evaluate.load("jordyvl/ece") -outputs = [gr.outputs.Textbox(label="ECE"), gr.Plot(label="Reliability diagram")] -# outputs[1].value = default_plot().__dict__ #DEV: Does not work in gradio; needs to be JSON encoded - - -iface = gr.Interface( - fn=compute_and_plot, - inputs=[component] + sliders, - outputs=outputs, - description=metric.info.description, - article=evaluate.utils.parse_readme(local_path / "README.md"), - title=f"Metric: {metric.name}", - # examples=sample_data; #DEV: ValueError: Examples argument must either be a directory or a nested list, where each sublist represents a set of inputs. -).launch() diff --git a/spaces/jordyvl/ece/tests.py b/spaces/jordyvl/ece/tests.py deleted file mode 100644 index d560c177414388b88dc27e3caf5ff04146dfd47b..0000000000000000000000000000000000000000 --- a/spaces/jordyvl/ece/tests.py +++ /dev/null @@ -1,17 +0,0 @@ -test_cases = [ - {"predictions": [[0, 1], [1, 0]], "references": [1, 0], "result": {"ECE": 0}}, - {"predictions": [[0, 1], [1, 0]], "references": [0, 1], "result": {"ECE": 1}}, - { - "predictions": [[0.6, 0.2, 0.2], [0, 0.95, 0.05], [0.75, 0.05 ,0.2]], - "references": [0, 1, 2], - "result": {"ECE": ((abs((0==0)-0.7) + abs((1==1)-1) + abs((2==0)-0.8))/3)}, - #all predictions in separate bins - }, - { - "predictions": [[0.6, 0.2, 0.2], [0, 0.95, 0.05], [0.7, 0.1 ,0.2]], - "references": [0, 1, 2], - "result": {"ECE": abs((0==0)-0.7 + (2==0)-0.7)/3 + abs((1==1)-1)/3}, - #some predictions in same bin - }, - -# DEV: make more advanced tests including differing kwargs \ No newline at end of file diff --git a/spaces/joshipunitram/crowd-counting-p2p/util/misc.py b/spaces/joshipunitram/crowd-counting-p2p/util/misc.py deleted file mode 100644 index 7cfe7d73bbf4b51a1a30c2c9e88f423409e84095..0000000000000000000000000000000000000000 --- a/spaces/joshipunitram/crowd-counting-p2p/util/misc.py +++ /dev/null @@ -1,518 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Misc functions, including distributed helpers. - -Mostly copy-paste from torchvision references. -""" -import os -import subprocess -import time -from collections import defaultdict, deque -import datetime -import pickle -from typing import Optional, List - -import torch -import torch.distributed as dist -from torch import Tensor - -import torch.nn as nn -import torch.nn.functional as F -from torch.autograd import Variable - -# needed due to empty tensor bug in pytorch and torchvision 0.5 -import torchvision -# if float(torchvision.__version__[:3]) < 0.7: -# from torchvision.ops import _new_empty_tensor -# from torchvision.ops.misc import _output_size - - -class SmoothedValue(object): - """Track a series of values and provide access to smoothed values over a - window or the global series average. - """ - - def __init__(self, window_size=20, fmt=None): - if fmt is None: - fmt = "{median:.4f} ({global_avg:.4f})" - self.deque = deque(maxlen=window_size) - self.total = 0.0 - self.count = 0 - self.fmt = fmt - - def update(self, value, n=1): - self.deque.append(value) - self.count += n - self.total += value * n - - def synchronize_between_processes(self): - """ - Warning: does not synchronize the deque! - """ - if not is_dist_avail_and_initialized(): - return - t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') - dist.barrier() - dist.all_reduce(t) - t = t.tolist() - self.count = int(t[0]) - self.total = t[1] - - @property - def median(self): - d = torch.tensor(list(self.deque)) - return d.median().item() - - @property - def avg(self): - d = torch.tensor(list(self.deque), dtype=torch.float32) - return d.mean().item() - - @property - def global_avg(self): - return self.total / self.count - - @property - def max(self): - return max(self.deque) - - @property - def value(self): - return self.deque[-1] - - def __str__(self): - return self.fmt.format( - median=self.median, - avg=self.avg, - global_avg=self.global_avg, - max=self.max, - value=self.value) - - -def all_gather(data): - """ - Run all_gather on arbitrary picklable data (not necessarily tensors) - Args: - data: any picklable object - Returns: - list[data]: list of data gathered from each rank - """ - world_size = get_world_size() - if world_size == 1: - return [data] - - # serialized to a Tensor - buffer = pickle.dumps(data) - storage = torch.ByteStorage.from_buffer(buffer) - tensor = torch.ByteTensor(storage).to("cuda") - - # obtain Tensor size of each rank - local_size = torch.tensor([tensor.numel()], device="cuda") - size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] - dist.all_gather(size_list, local_size) - size_list = [int(size.item()) for size in size_list] - max_size = max(size_list) - - # receiving Tensor from all ranks - # we pad the tensor because torch all_gather does not support - # gathering tensors of different shapes - tensor_list = [] - for _ in size_list: - tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) - if local_size != max_size: - padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") - tensor = torch.cat((tensor, padding), dim=0) - dist.all_gather(tensor_list, tensor) - - data_list = [] - for size, tensor in zip(size_list, tensor_list): - buffer = tensor.cpu().numpy().tobytes()[:size] - data_list.append(pickle.loads(buffer)) - - return data_list - - -def reduce_dict(input_dict, average=True): - """ - Args: - input_dict (dict): all the values will be reduced - average (bool): whether to do average or sum - Reduce the values in the dictionary from all processes so that all processes - have the averaged results. Returns a dict with the same fields as - input_dict, after reduction. - """ - world_size = get_world_size() - if world_size < 2: - return input_dict - with torch.no_grad(): - names = [] - values = [] - # sort the keys so that they are consistent across processes - for k in sorted(input_dict.keys()): - names.append(k) - values.append(input_dict[k]) - values = torch.stack(values, dim=0) - dist.all_reduce(values) - if average: - values /= world_size - reduced_dict = {k: v for k, v in zip(names, values)} - return reduced_dict - - -class MetricLogger(object): - def __init__(self, delimiter="\t"): - self.meters = defaultdict(SmoothedValue) - self.delimiter = delimiter - - def update(self, **kwargs): - for k, v in kwargs.items(): - if isinstance(v, torch.Tensor): - v = v.item() - assert isinstance(v, (float, int)) - self.meters[k].update(v) - - def __getattr__(self, attr): - if attr in self.meters: - return self.meters[attr] - if attr in self.__dict__: - return self.__dict__[attr] - raise AttributeError("'{}' object has no attribute '{}'".format( - type(self).__name__, attr)) - - def __str__(self): - loss_str = [] - for name, meter in self.meters.items(): - loss_str.append( - "{}: {}".format(name, str(meter)) - ) - return self.delimiter.join(loss_str) - - def synchronize_between_processes(self): - for meter in self.meters.values(): - meter.synchronize_between_processes() - - def add_meter(self, name, meter): - self.meters[name] = meter - - def log_every(self, iterable, print_freq, header=None): - i = 0 - if not header: - header = '' - start_time = time.time() - end = time.time() - iter_time = SmoothedValue(fmt='{avg:.4f}') - data_time = SmoothedValue(fmt='{avg:.4f}') - space_fmt = ':' + str(len(str(len(iterable)))) + 'd' - if torch.cuda.is_available(): - log_msg = self.delimiter.join([ - header, - '[{0' + space_fmt + '}/{1}]', - 'eta: {eta}', - '{meters}', - 'time: {time}', - 'data: {data}', - 'max mem: {memory:.0f}' - ]) - else: - log_msg = self.delimiter.join([ - header, - '[{0' + space_fmt + '}/{1}]', - 'eta: {eta}', - '{meters}', - 'time: {time}', - 'data: {data}' - ]) - MB = 1024.0 * 1024.0 - for obj in iterable: - data_time.update(time.time() - end) - yield obj - iter_time.update(time.time() - end) - if i % print_freq == 0 or i == len(iterable) - 1: - eta_seconds = iter_time.global_avg * (len(iterable) - i) - eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) - if torch.cuda.is_available(): - print(log_msg.format( - i, len(iterable), eta=eta_string, - meters=str(self), - time=str(iter_time), data=str(data_time), - memory=torch.cuda.max_memory_allocated() / MB)) - else: - print(log_msg.format( - i, len(iterable), eta=eta_string, - meters=str(self), - time=str(iter_time), data=str(data_time))) - i += 1 - end = time.time() - total_time = time.time() - start_time - total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print('{} Total time: {} ({:.4f} s / it)'.format( - header, total_time_str, total_time / len(iterable))) - - -def get_sha(): - cwd = os.path.dirname(os.path.abspath(__file__)) - - def _run(command): - return subprocess.check_output(command, cwd=cwd).decode('ascii').strip() - sha = 'N/A' - diff = "clean" - branch = 'N/A' - try: - sha = _run(['git', 'rev-parse', 'HEAD']) - subprocess.check_output(['git', 'diff'], cwd=cwd) - diff = _run(['git', 'diff-index', 'HEAD']) - diff = "has uncommited changes" if diff else "clean" - branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) - except Exception: - pass - message = f"sha: {sha}, status: {diff}, branch: {branch}" - return message - - -def collate_fn(batch): - batch = list(zip(*batch)) - batch[0] = nested_tensor_from_tensor_list(batch[0]) - return tuple(batch) - -def collate_fn_crowd(batch): - # re-organize the batch - batch_new = [] - for b in batch: - imgs, points = b - if imgs.ndim == 3: - imgs = imgs.unsqueeze(0) - for i in range(len(imgs)): - batch_new.append((imgs[i, :, :, :], points[i])) - batch = batch_new - batch = list(zip(*batch)) - batch[0] = nested_tensor_from_tensor_list(batch[0]) - return tuple(batch) - - -def _max_by_axis(the_list): - # type: (List[List[int]]) -> List[int] - maxes = the_list[0] - for sublist in the_list[1:]: - for index, item in enumerate(sublist): - maxes[index] = max(maxes[index], item) - return maxes - -def _max_by_axis_pad(the_list): - # type: (List[List[int]]) -> List[int] - maxes = the_list[0] - for sublist in the_list[1:]: - for index, item in enumerate(sublist): - maxes[index] = max(maxes[index], item) - - block = 128 - - for i in range(2): - maxes[i+1] = ((maxes[i+1] - 1) // block + 1) * block - return maxes - - -def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): - # TODO make this more general - if tensor_list[0].ndim == 3: - - # TODO make it support different-sized images - max_size = _max_by_axis_pad([list(img.shape) for img in tensor_list]) - # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) - batch_shape = [len(tensor_list)] + max_size - b, c, h, w = batch_shape - dtype = tensor_list[0].dtype - device = tensor_list[0].device - tensor = torch.zeros(batch_shape, dtype=dtype, device=device) - for img, pad_img in zip(tensor_list, tensor): - pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) - else: - raise ValueError('not supported') - return tensor - -class NestedTensor(object): - def __init__(self, tensors, mask: Optional[Tensor]): - self.tensors = tensors - self.mask = mask - - def to(self, device): - # type: (Device) -> NestedTensor # noqa - cast_tensor = self.tensors.to(device) - mask = self.mask - if mask is not None: - assert mask is not None - cast_mask = mask.to(device) - else: - cast_mask = None - return NestedTensor(cast_tensor, cast_mask) - - def decompose(self): - return self.tensors, self.mask - - def __repr__(self): - return str(self.tensors) - - -def setup_for_distributed(is_master): - """ - This function disables printing when not in master process - """ - import builtins as __builtin__ - builtin_print = __builtin__.print - - def print(*args, **kwargs): - force = kwargs.pop('force', False) - if is_master or force: - builtin_print(*args, **kwargs) - - __builtin__.print = print - - -def is_dist_avail_and_initialized(): - if not dist.is_available(): - return False - if not dist.is_initialized(): - return False - return True - - -def get_world_size(): - if not is_dist_avail_and_initialized(): - return 1 - return dist.get_world_size() - - -def get_rank(): - if not is_dist_avail_and_initialized(): - return 0 - return dist.get_rank() - - -def is_main_process(): - return get_rank() == 0 - - -def save_on_master(*args, **kwargs): - if is_main_process(): - torch.save(*args, **kwargs) - - -def init_distributed_mode(args): - if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: - args.rank = int(os.environ["RANK"]) - args.world_size = int(os.environ['WORLD_SIZE']) - args.gpu = int(os.environ['LOCAL_RANK']) - elif 'SLURM_PROCID' in os.environ: - args.rank = int(os.environ['SLURM_PROCID']) - args.gpu = args.rank % torch.cuda.device_count() - else: - print('Not using distributed mode') - args.distributed = False - return - - args.distributed = True - - torch.cuda.set_device(args.gpu) - args.dist_backend = 'nccl' - print('| distributed init (rank {}): {}'.format( - args.rank, args.dist_url), flush=True) - torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, - world_size=args.world_size, rank=args.rank) - torch.distributed.barrier() - setup_for_distributed(args.rank == 0) - - -@torch.no_grad() -def accuracy(output, target, topk=(1,)): - """Computes the precision@k for the specified values of k""" - if target.numel() == 0: - return [torch.zeros([], device=output.device)] - maxk = max(topk) - batch_size = target.size(0) - - _, pred = output.topk(maxk, 1, True, True) - pred = pred.t() - correct = pred.eq(target.view(1, -1).expand_as(pred)) - - res = [] - for k in topk: - correct_k = correct[:k].view(-1).float().sum(0) - res.append(correct_k.mul_(100.0 / batch_size)) - return res - - -def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): - # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor - """ - Equivalent to nn.functional.interpolate, but with support for empty batch sizes. - This will eventually be supported natively by PyTorch, and this - class can go away. - """ - if float(torchvision.__version__[:3]) < 0.7: - if input.numel() > 0: - return torch.nn.functional.interpolate( - input, size, scale_factor, mode, align_corners - ) - - output_shape = _output_size(2, input, size, scale_factor) - output_shape = list(input.shape[:-2]) + list(output_shape) - return _new_empty_tensor(input, output_shape) - else: - return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) - - -class FocalLoss(nn.Module): - r""" - This criterion is a implemenation of Focal Loss, which is proposed in - Focal Loss for Dense Object Detection. - - Loss(x, class) = - \alpha (1-softmax(x)[class])^gamma \log(softmax(x)[class]) - - The losses are averaged across observations for each minibatch. - - Args: - alpha(1D Tensor, Variable) : the scalar factor for this criterion - gamma(float, double) : gamma > 0; reduces the relative loss for well-classified examples (p > .5), - putting more focus on hard, misclassified examples - size_average(bool): By default, the losses are averaged over observations for each minibatch. - However, if the field size_average is set to False, the losses are - instead summed for each minibatch. - - - """ - def __init__(self, class_num, alpha=None, gamma=2, size_average=True): - super(FocalLoss, self).__init__() - if alpha is None: - self.alpha = Variable(torch.ones(class_num, 1)) - else: - if isinstance(alpha, Variable): - self.alpha = alpha - else: - self.alpha = Variable(alpha) - self.gamma = gamma - self.class_num = class_num - self.size_average = size_average - - def forward(self, inputs, targets): - N = inputs.size(0) - C = inputs.size(1) - P = F.softmax(inputs) - - class_mask = inputs.data.new(N, C).fill_(0) - class_mask = Variable(class_mask) - ids = targets.view(-1, 1) - class_mask.scatter_(1, ids.data, 1.) - - if inputs.is_cuda and not self.alpha.is_cuda: - self.alpha = self.alpha.cuda() - alpha = self.alpha[ids.data.view(-1)] - - probs = (P*class_mask).sum(1).view(-1,1) - - log_p = probs.log() - batch_loss = -alpha*(torch.pow((1-probs), self.gamma))*log_p - - if self.size_average: - loss = batch_loss.mean() - else: - loss = batch_loss.sum() - return loss \ No newline at end of file diff --git a/spaces/juancopi81/multitrack-midi-music-generator/model.py b/spaces/juancopi81/multitrack-midi-music-generator/model.py deleted file mode 100644 index 03bd5bb515ea9134245b7e720b78d10790dca056..0000000000000000000000000000000000000000 --- a/spaces/juancopi81/multitrack-midi-music-generator/model.py +++ /dev/null @@ -1,31 +0,0 @@ -import torch -from typing import Tuple -from transformers import AutoTokenizer, AutoModelForCausalLM - -# Initialize the model and tokenizer variables as None -tokenizer = None -model = None - - -def get_model_and_tokenizer() -> Tuple[AutoModelForCausalLM, AutoTokenizer]: - """ - Returns the preloaded model and tokenizer. If they haven't been loaded before, loads them. - - Returns: - tuple: A tuple containing the preloaded model and tokenizer. - """ - global model, tokenizer - if model is None or tokenizer is None: - # Set device - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - # Load the tokenizer and the model - tokenizer = AutoTokenizer.from_pretrained("juancopi81/lmd_8bars_tokenizer") - model = AutoModelForCausalLM.from_pretrained( - "juancopi81/lmd-8bars-2048-epochs40_v4" - ) - - # Move model to device - model = model.to(device) - - return model, tokenizer diff --git a/spaces/jungwoo9/foodvision_big/README.md b/spaces/jungwoo9/foodvision_big/README.md deleted file mode 100644 index 17eebdc88c21da9412ae5db26af69b69182d8fe3..0000000000000000000000000000000000000000 --- a/spaces/jungwoo9/foodvision_big/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Foodvision Big -emoji: 📈 -colorFrom: gray -colorTo: green -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kcagle/AutoGPT/README.md b/spaces/kcagle/AutoGPT/README.md deleted file mode 100644 index 5bf09b995f04f7af05d1314906b1b1ff39c20ddc..0000000000000000000000000000000000000000 --- a/spaces/kcagle/AutoGPT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: AutoGPT -emoji: 🦾 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.27.0 -app_file: ui/app.py -pinned: false -license: mit -duplicated_from: aliabid94/AutoGPT ---- - diff --git a/spaces/keras-dreambooth/dreambooth-pug-ace/app.py b/spaces/keras-dreambooth/dreambooth-pug-ace/app.py deleted file mode 100644 index bb6846ba66efd8ebc50afca100c21dcbd9ccd28c..0000000000000000000000000000000000000000 --- a/spaces/keras-dreambooth/dreambooth-pug-ace/app.py +++ /dev/null @@ -1,209 +0,0 @@ -from huggingface_hub import from_pretrained_keras -from keras_cv import models -import gradio as gr - -from tensorflow import keras - -from diffusers import StableDiffusionPipeline - -keras.mixed_precision.set_global_policy("mixed_float16") - -# prepare model -resolution = 512 -# sd_dreambooth_model = models.StableDiffusion( -# img_width=resolution, img_height=resolution -# ) -# db_diffusion_model = from_pretrained_keras("keras-dreambooth/dreambooth_diffusion_model") -# sd_dreambooth_model._diffusion_model = db_diffusion_model - -# checkpoint of the converted Stable Diffusion from KerasCV -model_ckpt = "nielsgl/dreambooth-keras-pug-ace-sd2.1" -pipeline = StableDiffusionPipeline.from_pretrained(model_ckpt) -pipeline = pipeline.to("cuda") - -unique_id = "puggieace" -class_label = "dog" -prompt = f"A photo of {unique_id} {class_label} on the beach" -image = pipeline(prompt, num_inference_steps=50).images[0] - -# generate images -def infer(prompt, negative_prompt, guidance_scale=10, num_inference_steps=50): - neg = negative_prompt if negative_prompt else None - imgs = [] - while len(imgs) != 4: - next_prompt = pipeline(prompt, negative_prompt=neg, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, num_images_per_prompt=5) - for img, is_neg in zip(next_prompt.images, next_prompt.nsfw_content_detected): - if not is_neg: - imgs.append(img) - if len(imgs) == 4: - break - - return imgs - -output = gr.Gallery(label="Outputs").style(grid=(1,2)) - -# customize interface -title = "KerasCV Stable Diffusion Demo on images of Ace." -description = "This is a dreambooth model fine-tuned on images of my pug named Ace. To try it, input the concept with `puggieace dog`." -examples=[ - ["Portrait photo of puggieace dog on a beachtowel wearing sunglasses on the beach, sunset in background, golden hour", "", 12, 50], - ["A photo of a cute puggieace dog getting a haircut in a barbershop, ultra realistic, intricate details, highly detailed, photorealistic, octane render, 8 k, unreal engine. art by artgerm and greg rutkowski and charlie bowater and magali villeneuve and alphonse mucha", "", 12, 75], - ["Portrait of puggieace dog as a Roman Emperor, city in background, ultra realistic, intricate details, eerie, highly detailed, photorealistic, octane render, 8 k, unreal engine. art by artgerm and greg rutkowski and charlie bowater and magali villeneuve and alphonse mucha", "", 15, 75], - ["Photo of cute puggieace dog as an astronaut, space and planet in background, ultra realistic, concept art, intricate details, highly detailed, photorealistic, octane render, 8 k, unreal engine. trending on artstation", "", 15, 75], - ["Photo of cute puggieace dog as super hero, futuristic city in background, cinematic light, high dynamic range, insane intricate details, stunning cinema effects, aesthetic, masterpiece, trending on artstation, cartoon art", "", 12, 75], -] - -base_14 = "https://huggingface.co/nielsgl/dreambooth-pug-ace-sd1.4/resolve/main/" -base_21 = "https://huggingface.co/nielsgl/dreambooth-keras-pug-ace-sd2.1/resolve/main/" - -model_card_1 = f""" -# KerasCV Stable Diffusion in Diffusers 🧨🤗 - -DreamBooth model for the `puggieace` concept trained by nielsgl on the [nielsgl/dreambooth-ace](https://huggingface.co/datasets/nielsgl/dreambooth-ace) dataset. -It can be used by modifying the `instance_prompt`: **a photo of puggieace**. - -The examples are from 2 different Keras CV models (`StableDiffusion` and `StableDiffusionV2`, corresponding to Stable Diffusion V1.4 and V2.1, respectively) trained on the same dataset (`nielsgl/dreambooth-ace`). - -## Description - -The Stable Diffusion V2 pipeline contained in the corresponding repository (`nielsgl/dreambooth-keras-pug-ace-sd2.1`) was created using a modified version of [this Space](https://huggingface.co/spaces/sayakpaul/convert-kerascv-sd-diffusers) for StableDiffusionV2 from KerasCV. The purpose is to convert the KerasCV Stable Diffusion weights in a way that is compatible with [Diffusers](https://github.com/huggingface/diffusers). This allows users to fine-tune using KerasCV and use the fine-tuned weights in Diffusers taking advantage of its nifty features (like [schedulers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/schedulers), [fast attention](https://huggingface.co/docs/diffusers/optimization/fp16), etc.). -This model was created as part of the Keras DreamBooth Sprint 🔥. Visit the [organisation page](https://huggingface.co/keras-dreambooth) for instructions on how to take part! - -## Demo - -""" - -model_card_2 = f""" -## Examples - -### Stable Diffusion V1.4 - -> Portrait of puggieace dog as a Roman Emperor, city in background - -![Portrait of puggieace dog as a Roman Emperor, city in background, ultra realistic, intricate details, eerie, highly detailed, photorealistic, octane render, 8 k, unreal engine. art by artgerm and greg rutkowski and charlie bowater and magali villeneuve and alphonse mucha]({base_14}examples/emperor-1.4.jpeg) - -> Photo of puggieace dog wearing sunglasses on the beach, sunset in background, golden hour - -![Photo of puggieace dog wearing sunglasses on the beach, sunset in background, golden hour]({base_14}examples/beach-1.4.jpg) - -> Photo of cute puggieace dog as an astronaut, planet and spaceship in background - -![Photo of cute puggieace dog as an astronaut, planet and spaceship in background, ultra realistic, intricate details, highly detailed, photorealistic, octane render, 8 k, unreal engine. trending on artstation]({base_14}examples/astronaut-1.4.jpg) - -### Stable Diffusion V2.1 - -> Portrait painting of a cute puggieace dog as a samurai - -![Portrait painting of a cute puggieace dog as a samurai, ultra realistic, concept art, intricate details, eerie, highly detailed, photorealistic, octane render, 8 k, unreal engine. art by artgerm and greg rutkowski and charlie bowater and magali villeneuve and alphonse mucha]({base_21}examples/samurai-2.1.jpg) - -> Photo of cute puggieace dog as an astronaut, space and planet in background - -![Photo of cute puggieace dog as an astronaut, space and planet in background, ultra realistic, concept art, intricate details, highly detailed, photorealistic, octane render, 8 k, unreal engine. art by artgerm and greg rutkowski and charlie bowater, trending on artstation]({base_21}examples/astronaut-2.1.jpg) - -> A photo of a cute puggieace dog getting a haircut in a barbershop - -![A photo of a cute puggieace dog getting a haircut in a barbershop, ultra realistic, intricate details, highly detailed, photorealistic, octane render, 8 k, unreal engine. art by artgerm and greg rutkowski and charlie bowater and magali villeneuve and alphonse mucha]({base_21}examples/haircut-2.1.jpg) - -> Portrait photo of puggieace dog in New York - -![Portrait photo of puggieace dog in New York, city and skyscrapers in background, highly detailed, photorealistic, hdr, 4k]({base_21}examples/ny-2.1.jpg) - -> Portrait of puggieace dog as a Roman Emperor, city in background - -![Portrait of puggieace dog as a Roman Emperor, city in background, ultra realistic, intricate details, eerie, highly detailed, photorealistic, octane render, 8 k, unreal engine. art by artgerm and greg rutkowski and charlie bowater and magali villeneuve and alphonse mucha]({base_21}examples/emperor-2.1.jpg) - - -## Usage with Stable Diffusion V1.4 - -```python -from huggingface_hub import from_pretrained_keras -import keras_cv -import matplotlib.pyplot as plt - - -model = keras_cv.models.StableDiffusion(img_width=512, img_height=512, jit_compile=True) -model._diffusion_model = from_pretrained_keras("nielsgl/dreambooth-pug-ace") -model._text_encoder = from_pretrained_keras("nielsgl/dreambooth-pug-ace-text-encoder") - -images = model.text_to_image("a photo of puggieace dog on the beach", batch_size=3) -plt.imshow(image[0]) -``` - -## Usage with Stable Diffusion V2.1 - -```python -from diffusers import StableDiffusionPipeline - -pipeline = StableDiffusionPipeline.from_pretrained('nielsgl/dreambooth-keras-pug-ace-sd2.1') -image = pipeline().images[0] -image -``` - -### Training hyperparameters - -The following hyperparameters were used during training for Stable Diffusion v1.4: - -| Hyperparameters | Value | -| :-- | :-- | -| name | RMSprop | -| weight_decay | None | -| clipnorm | None | -| global_clipnorm | None | -| clipvalue | None | -| use_ema | False | -| ema_momentum | 0.99 | -| ema_overwrite_frequency | 100 | -| jit_compile | True | -| is_legacy_optimizer | False | -| learning_rate | 0.0010000000474974513 | -| rho | 0.9 | -| momentum | 0.0 | -| epsilon | 1e-07 | -| centered | False | -| training_precision | float32 | -""" - -with gr.Blocks() as demo: - with gr.Row(): - gr.Markdown(model_card_1) - with gr.Row(): - with gr.Column(): - prompt_pos = gr.Textbox(label="Positive Prompt", value="a photo of puggieace dog getting a haircut") - prompt_neg = gr.Textbox(label="Negative Prompt", value="bad anatomy, blurry") - # gr.Slider(label='Number of gen image', minimum=1, maximum=4, value=2, step=1), - prompt_gs = gr.Number(label='Guidance scale', value=12) - prompt_steps = gr.Slider(label="Inference Steps",value=50) - prompt_btn = gr.Button("Generate") - with gr.Column(): - output = gr.Gallery(label="Outputs").style(grid=(1,2)) - prompt_btn.click(infer, inputs=[prompt_pos, prompt_neg, prompt_gs, prompt_steps], outputs=[output]) - with gr.Row(): - gr.Examples(examples, inputs=[prompt_pos, prompt_neg, prompt_gs, prompt_steps], outputs=output, fn=infer, cache_examples=True) - # gr.Interface(infer, inputs=[gr.Textbox(label="Positive Prompt", value="a photo of puggieace dog getting a haircut"), - # gr.Textbox(label="Negative Prompt", value="bad anatomy, blurry"), - # # gr.Slider(label='Number of gen image', minimum=1, maximum=4, value=2, step=1), - # gr.Number(label='Guidance scale', value=12), - # gr.Slider(label="Inference Steps",value=50), - # ], outputs=[output], title=title, description=description, examples=examples).queue() - with gr.Row(): - with gr.Column(): - gr.Markdown(model_card_2) - with gr.Column(): - gr.Markdown(" ") - -demo.queue().launch() - - - -# with gr.Blocks() as card_interface: -# gr.Markdown(model_card) - -# demo_interface = gr.Interface(infer, inputs=[gr.Textbox(label="Positive Prompt", value="a photo of puggieace dog getting a haircut"), -# gr.Textbox(label="Negative Prompt", value="bad anatomy, blurry"), -# # gr.Slider(label='Number of gen image', minimum=1, maximum=4, value=2, step=1), -# gr.Number(label='Guidance scale', value=12), -# gr.Slider(label="Inference Steps",value=50), -# ], outputs=[output], title=title, description=description, examples=examples).queue() - - -# gr.TabbedInterface([card_interface, demo_interface], ["Model Card", "Demo 🤗"]).launch() diff --git a/spaces/keras-dreambooth/lowpoly-world-demo/README.md b/spaces/keras-dreambooth/lowpoly-world-demo/README.md deleted file mode 100644 index 3d9d4a9afdf4fb8b286beaa9538fac6bb003d156..0000000000000000000000000000000000000000 --- a/spaces/keras-dreambooth/lowpoly-world-demo/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Lowpoly World Demo -emoji: 👀 -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.20.1 -app_file: app.py -pinned: false -license: apache-2.0 -tags: - - keras-dreambooth - - wildcard ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/keras-io/TF-GB-Forest/README.md b/spaces/keras-io/TF-GB-Forest/README.md deleted file mode 100644 index e87c799a6d286248d1136c9563139fcc96c3858f..0000000000000000000000000000000000000000 --- a/spaces/keras-io/TF-GB-Forest/README.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: TFClassificationForest -emoji: 👁 -colorFrom: purple -colorTo: black -sdk: gradio -app_file: app.py -pinned: false -license: apache-2.0 - -tags: -- classification -- gradient boosted trees -- keras -- TensorFlow - -libraries: TensorBoard - -metrics: -- accuracy - ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`models`: _List[string]_ -HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space. -Will be parsed automatically from your code if not specified here. - -`datasets`: _List[string]_ -HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space. -Will be parsed automatically from your code if not specified here. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. - - -Author: Khalid Salama -Adapted implementation: Tannia Dubon diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker/modules.py b/spaces/kevinwang676/ChatGLM2-SadTalker/modules.py deleted file mode 100644 index 52ee14e41a5b6d67d875d1b694aecd2a51244897..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker/modules.py +++ /dev/null @@ -1,342 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x diff --git a/spaces/kidcoconut/spcstm_omdenasaudi_liverhccxai/uix/__init__.py b/spaces/kidcoconut/spcstm_omdenasaudi_liverhccxai/uix/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/models/utils/inverted_residual.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/models/utils/inverted_residual.py deleted file mode 100644 index 53b8fcd41f71d814738f1ac3f5acd3c3d701bf96..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/models/utils/inverted_residual.py +++ /dev/null @@ -1,208 +0,0 @@ -from annotator.uniformer.mmcv.cnn import ConvModule -from torch import nn -from torch.utils import checkpoint as cp - -from .se_layer import SELayer - - -class InvertedResidual(nn.Module): - """InvertedResidual block for MobileNetV2. - - Args: - in_channels (int): The input channels of the InvertedResidual block. - out_channels (int): The output channels of the InvertedResidual block. - stride (int): Stride of the middle (first) 3x3 convolution. - expand_ratio (int): Adjusts number of channels of the hidden layer - in InvertedResidual by this amount. - dilation (int): Dilation rate of depthwise conv. Default: 1 - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU6'). - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - - Returns: - Tensor: The output tensor. - """ - - def __init__(self, - in_channels, - out_channels, - stride, - expand_ratio, - dilation=1, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU6'), - with_cp=False): - super(InvertedResidual, self).__init__() - self.stride = stride - assert stride in [1, 2], f'stride must in [1, 2]. ' \ - f'But received {stride}.' - self.with_cp = with_cp - self.use_res_connect = self.stride == 1 and in_channels == out_channels - hidden_dim = int(round(in_channels * expand_ratio)) - - layers = [] - if expand_ratio != 1: - layers.append( - ConvModule( - in_channels=in_channels, - out_channels=hidden_dim, - kernel_size=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - layers.extend([ - ConvModule( - in_channels=hidden_dim, - out_channels=hidden_dim, - kernel_size=3, - stride=stride, - padding=dilation, - dilation=dilation, - groups=hidden_dim, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg), - ConvModule( - in_channels=hidden_dim, - out_channels=out_channels, - kernel_size=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - ]) - self.conv = nn.Sequential(*layers) - - def forward(self, x): - - def _inner_forward(x): - if self.use_res_connect: - return x + self.conv(x) - else: - return self.conv(x) - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out - - -class InvertedResidualV3(nn.Module): - """Inverted Residual Block for MobileNetV3. - - Args: - in_channels (int): The input channels of this Module. - out_channels (int): The output channels of this Module. - mid_channels (int): The input channels of the depthwise convolution. - kernel_size (int): The kernel size of the depthwise convolution. - Default: 3. - stride (int): The stride of the depthwise convolution. Default: 1. - se_cfg (dict): Config dict for se layer. Default: None, which means no - se layer. - with_expand_conv (bool): Use expand conv or not. If set False, - mid_channels must be the same with in_channels. Default: True. - conv_cfg (dict): Config dict for convolution layer. Default: None, - which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU'). - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - - Returns: - Tensor: The output tensor. - """ - - def __init__(self, - in_channels, - out_channels, - mid_channels, - kernel_size=3, - stride=1, - se_cfg=None, - with_expand_conv=True, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU'), - with_cp=False): - super(InvertedResidualV3, self).__init__() - self.with_res_shortcut = (stride == 1 and in_channels == out_channels) - assert stride in [1, 2] - self.with_cp = with_cp - self.with_se = se_cfg is not None - self.with_expand_conv = with_expand_conv - - if self.with_se: - assert isinstance(se_cfg, dict) - if not self.with_expand_conv: - assert mid_channels == in_channels - - if self.with_expand_conv: - self.expand_conv = ConvModule( - in_channels=in_channels, - out_channels=mid_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.depthwise_conv = ConvModule( - in_channels=mid_channels, - out_channels=mid_channels, - kernel_size=kernel_size, - stride=stride, - padding=kernel_size // 2, - groups=mid_channels, - conv_cfg=dict( - type='Conv2dAdaptivePadding') if stride == 2 else conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - - if self.with_se: - self.se = SELayer(**se_cfg) - - self.linear_conv = ConvModule( - in_channels=mid_channels, - out_channels=out_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - - def forward(self, x): - - def _inner_forward(x): - out = x - - if self.with_expand_conv: - out = self.expand_conv(out) - - out = self.depthwise_conv(out) - - if self.with_se: - out = self.se(out) - - out = self.linear_conv(out) - - if self.with_res_shortcut: - return x + out - else: - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/PIL/FitsStubImagePlugin.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/PIL/FitsStubImagePlugin.py deleted file mode 100644 index 50948ec423ac595025289dd44af9b92ba2744167..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/PIL/FitsStubImagePlugin.py +++ /dev/null @@ -1,76 +0,0 @@ -# -# The Python Imaging Library -# $Id$ -# -# FITS stub adapter -# -# Copyright (c) 1998-2003 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# - -from . import FitsImagePlugin, Image, ImageFile -from ._deprecate import deprecate - -_handler = None - - -def register_handler(handler): - """ - Install application-specific FITS image handler. - - :param handler: Handler object. - """ - global _handler - _handler = handler - - deprecate( - "FitsStubImagePlugin", - 10, - action="FITS images can now be read without " - "a handler through FitsImagePlugin instead", - ) - - # Override FitsImagePlugin with this handler - # for backwards compatibility - try: - Image.ID.remove(FITSStubImageFile.format) - except ValueError: - pass - - Image.register_open( - FITSStubImageFile.format, FITSStubImageFile, FitsImagePlugin._accept - ) - - -class FITSStubImageFile(ImageFile.StubImageFile): - format = FitsImagePlugin.FitsImageFile.format - format_description = FitsImagePlugin.FitsImageFile.format_description - - def _open(self): - offset = self.fp.tell() - - im = FitsImagePlugin.FitsImageFile(self.fp) - self._size = im.size - self.mode = im.mode - self.tile = [] - - self.fp.seek(offset) - - loader = self._load() - if loader: - loader.open(self) - - def _load(self): - return _handler - - -def _save(im, fp, filename): - msg = "FITS save handler not installed" - raise OSError(msg) - - -# -------------------------------------------------------------------- -# Registry - -Image.register_save(FITSStubImageFile.format, _save) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/charset_normalizer/models.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/charset_normalizer/models.py deleted file mode 100644 index 7f8ca389050cd4bac7fd23d84e399a242d35d309..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/charset_normalizer/models.py +++ /dev/null @@ -1,337 +0,0 @@ -from encodings.aliases import aliases -from hashlib import sha256 -from json import dumps -from typing import Any, Dict, Iterator, List, Optional, Tuple, Union - -from .constant import TOO_BIG_SEQUENCE -from .utils import iana_name, is_multi_byte_encoding, unicode_range - - -class CharsetMatch: - def __init__( - self, - payload: bytes, - guessed_encoding: str, - mean_mess_ratio: float, - has_sig_or_bom: bool, - languages: "CoherenceMatches", - decoded_payload: Optional[str] = None, - ): - self._payload: bytes = payload - - self._encoding: str = guessed_encoding - self._mean_mess_ratio: float = mean_mess_ratio - self._languages: CoherenceMatches = languages - self._has_sig_or_bom: bool = has_sig_or_bom - self._unicode_ranges: Optional[List[str]] = None - - self._leaves: List[CharsetMatch] = [] - self._mean_coherence_ratio: float = 0.0 - - self._output_payload: Optional[bytes] = None - self._output_encoding: Optional[str] = None - - self._string: Optional[str] = decoded_payload - - def __eq__(self, other: object) -> bool: - if not isinstance(other, CharsetMatch): - raise TypeError( - "__eq__ cannot be invoked on {} and {}.".format( - str(other.__class__), str(self.__class__) - ) - ) - return self.encoding == other.encoding and self.fingerprint == other.fingerprint - - def __lt__(self, other: object) -> bool: - """ - Implemented to make sorted available upon CharsetMatches items. - """ - if not isinstance(other, CharsetMatch): - raise ValueError - - chaos_difference: float = abs(self.chaos - other.chaos) - coherence_difference: float = abs(self.coherence - other.coherence) - - # Below 1% difference --> Use Coherence - if chaos_difference < 0.01 and coherence_difference > 0.02: - # When having a tough decision, use the result that decoded as many multi-byte as possible. - if chaos_difference == 0.0 and self.coherence == other.coherence: - return self.multi_byte_usage > other.multi_byte_usage - return self.coherence > other.coherence - - return self.chaos < other.chaos - - @property - def multi_byte_usage(self) -> float: - return 1.0 - len(str(self)) / len(self.raw) - - def __str__(self) -> str: - # Lazy Str Loading - if self._string is None: - self._string = str(self._payload, self._encoding, "strict") - return self._string - - def __repr__(self) -> str: - return "".format(self.encoding, self.fingerprint) - - def add_submatch(self, other: "CharsetMatch") -> None: - if not isinstance(other, CharsetMatch) or other == self: - raise ValueError( - "Unable to add instance <{}> as a submatch of a CharsetMatch".format( - other.__class__ - ) - ) - - other._string = None # Unload RAM usage; dirty trick. - self._leaves.append(other) - - @property - def encoding(self) -> str: - return self._encoding - - @property - def encoding_aliases(self) -> List[str]: - """ - Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855. - """ - also_known_as: List[str] = [] - for u, p in aliases.items(): - if self.encoding == u: - also_known_as.append(p) - elif self.encoding == p: - also_known_as.append(u) - return also_known_as - - @property - def bom(self) -> bool: - return self._has_sig_or_bom - - @property - def byte_order_mark(self) -> bool: - return self._has_sig_or_bom - - @property - def languages(self) -> List[str]: - """ - Return the complete list of possible languages found in decoded sequence. - Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'. - """ - return [e[0] for e in self._languages] - - @property - def language(self) -> str: - """ - Most probable language found in decoded sequence. If none were detected or inferred, the property will return - "Unknown". - """ - if not self._languages: - # Trying to infer the language based on the given encoding - # Its either English or we should not pronounce ourselves in certain cases. - if "ascii" in self.could_be_from_charset: - return "English" - - # doing it there to avoid circular import - from charset_normalizer.cd import encoding_languages, mb_encoding_languages - - languages = ( - mb_encoding_languages(self.encoding) - if is_multi_byte_encoding(self.encoding) - else encoding_languages(self.encoding) - ) - - if len(languages) == 0 or "Latin Based" in languages: - return "Unknown" - - return languages[0] - - return self._languages[0][0] - - @property - def chaos(self) -> float: - return self._mean_mess_ratio - - @property - def coherence(self) -> float: - if not self._languages: - return 0.0 - return self._languages[0][1] - - @property - def percent_chaos(self) -> float: - return round(self.chaos * 100, ndigits=3) - - @property - def percent_coherence(self) -> float: - return round(self.coherence * 100, ndigits=3) - - @property - def raw(self) -> bytes: - """ - Original untouched bytes. - """ - return self._payload - - @property - def submatch(self) -> List["CharsetMatch"]: - return self._leaves - - @property - def has_submatch(self) -> bool: - return len(self._leaves) > 0 - - @property - def alphabets(self) -> List[str]: - if self._unicode_ranges is not None: - return self._unicode_ranges - # list detected ranges - detected_ranges: List[Optional[str]] = [ - unicode_range(char) for char in str(self) - ] - # filter and sort - self._unicode_ranges = sorted(list({r for r in detected_ranges if r})) - return self._unicode_ranges - - @property - def could_be_from_charset(self) -> List[str]: - """ - The complete list of encoding that output the exact SAME str result and therefore could be the originating - encoding. - This list does include the encoding available in property 'encoding'. - """ - return [self._encoding] + [m.encoding for m in self._leaves] - - def output(self, encoding: str = "utf_8") -> bytes: - """ - Method to get re-encoded bytes payload using given target encoding. Default to UTF-8. - Any errors will be simply ignored by the encoder NOT replaced. - """ - if self._output_encoding is None or self._output_encoding != encoding: - self._output_encoding = encoding - self._output_payload = str(self).encode(encoding, "replace") - - return self._output_payload # type: ignore - - @property - def fingerprint(self) -> str: - """ - Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one. - """ - return sha256(self.output()).hexdigest() - - -class CharsetMatches: - """ - Container with every CharsetMatch items ordered by default from most probable to the less one. - Act like a list(iterable) but does not implements all related methods. - """ - - def __init__(self, results: Optional[List[CharsetMatch]] = None): - self._results: List[CharsetMatch] = sorted(results) if results else [] - - def __iter__(self) -> Iterator[CharsetMatch]: - yield from self._results - - def __getitem__(self, item: Union[int, str]) -> CharsetMatch: - """ - Retrieve a single item either by its position or encoding name (alias may be used here). - Raise KeyError upon invalid index or encoding not present in results. - """ - if isinstance(item, int): - return self._results[item] - if isinstance(item, str): - item = iana_name(item, False) - for result in self._results: - if item in result.could_be_from_charset: - return result - raise KeyError - - def __len__(self) -> int: - return len(self._results) - - def __bool__(self) -> bool: - return len(self._results) > 0 - - def append(self, item: CharsetMatch) -> None: - """ - Insert a single match. Will be inserted accordingly to preserve sort. - Can be inserted as a submatch. - """ - if not isinstance(item, CharsetMatch): - raise ValueError( - "Cannot append instance '{}' to CharsetMatches".format( - str(item.__class__) - ) - ) - # We should disable the submatch factoring when the input file is too heavy (conserve RAM usage) - if len(item.raw) <= TOO_BIG_SEQUENCE: - for match in self._results: - if match.fingerprint == item.fingerprint and match.chaos == item.chaos: - match.add_submatch(item) - return - self._results.append(item) - self._results = sorted(self._results) - - def best(self) -> Optional["CharsetMatch"]: - """ - Simply return the first match. Strict equivalent to matches[0]. - """ - if not self._results: - return None - return self._results[0] - - def first(self) -> Optional["CharsetMatch"]: - """ - Redundant method, call the method best(). Kept for BC reasons. - """ - return self.best() - - -CoherenceMatch = Tuple[str, float] -CoherenceMatches = List[CoherenceMatch] - - -class CliDetectionResult: - def __init__( - self, - path: str, - encoding: Optional[str], - encoding_aliases: List[str], - alternative_encodings: List[str], - language: str, - alphabets: List[str], - has_sig_or_bom: bool, - chaos: float, - coherence: float, - unicode_path: Optional[str], - is_preferred: bool, - ): - self.path: str = path - self.unicode_path: Optional[str] = unicode_path - self.encoding: Optional[str] = encoding - self.encoding_aliases: List[str] = encoding_aliases - self.alternative_encodings: List[str] = alternative_encodings - self.language: str = language - self.alphabets: List[str] = alphabets - self.has_sig_or_bom: bool = has_sig_or_bom - self.chaos: float = chaos - self.coherence: float = coherence - self.is_preferred: bool = is_preferred - - @property - def __dict__(self) -> Dict[str, Any]: # type: ignore - return { - "path": self.path, - "encoding": self.encoding, - "encoding_aliases": self.encoding_aliases, - "alternative_encodings": self.alternative_encodings, - "language": self.language, - "alphabets": self.alphabets, - "has_sig_or_bom": self.has_sig_or_bom, - "chaos": self.chaos, - "coherence": self.coherence, - "unicode_path": self.unicode_path, - "is_preferred": self.is_preferred, - } - - def to_json(self) -> str: - return dumps(self.__dict__, ensure_ascii=True, indent=4) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/backends/web_backend/css/fbm.css b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/backends/web_backend/css/fbm.css deleted file mode 100644 index ce35d99a5e64c2e42a368aa61cd41ddb563d9ddd..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/backends/web_backend/css/fbm.css +++ /dev/null @@ -1,97 +0,0 @@ - -/* Flexible box model classes */ -/* Taken from Alex Russell https://infrequently.org/2009/08/css-3-progress/ */ - -.hbox { - display: -webkit-box; - -webkit-box-orient: horizontal; - -webkit-box-align: stretch; - - display: -moz-box; - -moz-box-orient: horizontal; - -moz-box-align: stretch; - - display: box; - box-orient: horizontal; - box-align: stretch; -} - -.hbox > * { - -webkit-box-flex: 0; - -moz-box-flex: 0; - box-flex: 0; -} - -.vbox { - display: -webkit-box; - -webkit-box-orient: vertical; - -webkit-box-align: stretch; - - display: -moz-box; - -moz-box-orient: vertical; - -moz-box-align: stretch; - - display: box; - box-orient: vertical; - box-align: stretch; -} - -.vbox > * { - -webkit-box-flex: 0; - -moz-box-flex: 0; - box-flex: 0; -} - -.reverse { - -webkit-box-direction: reverse; - -moz-box-direction: reverse; - box-direction: reverse; -} - -.box-flex0 { - -webkit-box-flex: 0; - -moz-box-flex: 0; - box-flex: 0; -} - -.box-flex1, .box-flex { - -webkit-box-flex: 1; - -moz-box-flex: 1; - box-flex: 1; -} - -.box-flex2 { - -webkit-box-flex: 2; - -moz-box-flex: 2; - box-flex: 2; -} - -.box-group1 { - -webkit-box-flex-group: 1; - -moz-box-flex-group: 1; - box-flex-group: 1; -} - -.box-group2 { - -webkit-box-flex-group: 2; - -moz-box-flex-group: 2; - box-flex-group: 2; -} - -.start { - -webkit-box-pack: start; - -moz-box-pack: start; - box-pack: start; -} - -.end { - -webkit-box-pack: end; - -moz-box-pack: end; - box-pack: end; -} - -.center { - -webkit-box-pack: center; - -moz-box-pack: center; - box-pack: center; -} diff --git a/spaces/latent-consistency/Real-Time-LCM-ControlNet-Lora-SD1.5/README.md b/spaces/latent-consistency/Real-Time-LCM-ControlNet-Lora-SD1.5/README.md deleted file mode 100644 index 7cbeea21a7f9e1e2c0cc478d9e137d95ffbe5d69..0000000000000000000000000000000000000000 --- a/spaces/latent-consistency/Real-Time-LCM-ControlNet-Lora-SD1.5/README.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Real-Time LCM ControlNet Lora SD1.5 -emoji: 🖼️🖼️ -colorFrom: gray -colorTo: indigo -sdk: docker -pinned: false -suggested_hardware: a10g-small ---- - -# Real-Time Latent Consistency Model - -This demo showcases [Latent Consistency Model (LCM)](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7) using [Diffusers](https://github.com/huggingface/diffusers/tree/main/examples/community#latent-consistency-pipeline) with a MJPEG stream server. - -You need a webcam to run this demo. 🤗 - -## Running Locally - -You need CUDA and Python 3.10, Mac with an M1/M2/M3 chip or Intel Arc GPU - -`TIMEOUT`: limit user session timeout -`SAFETY_CHECKER`: disabled if you want NSFW filter off -`MAX_QUEUE_SIZE`: limit number of users on current app instance -`TORCH_COMPILE`: enable if you want to use torch compile for faster inference - -### image to image - -```bash -python -m venv venv -source venv/bin/activate -pip3 install -r requirements.txt -uvicorn "app-img2img:app" --host 0.0.0.0 --port 7860 --reload -``` - -### image to image ControlNet Canny - -Based pipeline from [taabata](https://github.com/taabata/LCM_Inpaint_Outpaint_Comfy) - -```bash -python -m venv venv -source venv/bin/activate -pip3 install -r requirements.txt -uvicorn "app-controlnet:app" --host 0.0.0.0 --port 7860 --reload -``` - - -### text to image - -```bash -python -m venv venv -source venv/bin/activate -pip3 install -r requirements.txt -uvicorn "app-txt2img:app" --host 0.0.0.0 --port 7860 --reload -``` - -or with environment variables - -```bash -TIMEOUT=120 SAFETY_CHECKER=True MAX_QUEUE_SIZE=4 uvicorn "app-img2img:app" --host 0.0.0.0 --port 7860 --reload -``` - -If you're running locally and want to test it on Mobile Safari, the webserver needs to be served over HTTPS. - -```bash -openssl req -newkey rsa:4096 -nodes -keyout key.pem -x509 -days 365 -out certificate.pem -uvicorn "app-img2img:app" --host 0.0.0.0 --port 7860 --reload --log-level info --ssl-certfile=certificate.pem --ssl-keyfile=key.pem -``` - -## Docker - -You need NVIDIA Container Toolkit for Docker - -```bash -docker build -t lcm-live . -docker run -ti -p 7860:7860 --gpus all lcm-live -``` - -or with environment variables - -```bash -docker run -ti -e TIMEOUT=0 -e SAFETY_CHECKER=False -p 7860:7860 --gpus all lcm-live -``` - -# Demo on Hugging Face - -https://huggingface.co/spaces/radames/Real-Time-Latent-Consistency-Model - -https://github.com/radames/Real-Time-Latent-Consistency-Model/assets/102277/c4003ac5-e7ff-44c0-97d3-464bb659de70 diff --git a/spaces/lc202301/ChuanhuChatGPT/run_Windows.bat b/spaces/lc202301/ChuanhuChatGPT/run_Windows.bat deleted file mode 100644 index 4c18f9ccaeea0af972301ffdf48778641221f76d..0000000000000000000000000000000000000000 --- a/spaces/lc202301/ChuanhuChatGPT/run_Windows.bat +++ /dev/null @@ -1,5 +0,0 @@ -@echo off -echo Opening ChuanhuChatGPT... - -REM Open powershell via bat -start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py" diff --git a/spaces/leogabraneth/text-generation-webui-main/css/html_4chan_style.css b/spaces/leogabraneth/text-generation-webui-main/css/html_4chan_style.css deleted file mode 100644 index afbfb537c17f85c403265e4afee1a3a6ca7758a6..0000000000000000000000000000000000000000 --- a/spaces/leogabraneth/text-generation-webui-main/css/html_4chan_style.css +++ /dev/null @@ -1,73 +0,0 @@ -#parent #container { - background-color: #eef2ff; - padding: 17px; -} - -#parent #container .reply { - background-color: rgb(214 218 240); - border-bottom: 1px solid rgb(183 197 217); - border-image: none 100% 1 0 stretch; - border-left: 0 none rgb(0 0 0); - border-right: 1px solid rgb(183 197 217); - color: rgb(0 0 0); - display: table; - font-family: arial, helvetica, sans-serif; - font-size: 13.3333px; - margin: 4px 0; - overflow: hidden hidden; - padding: 4px 2px; -} - -#parent #container .number { - color: rgb(0 0 0); - font-family: arial, helvetica, sans-serif; - font-size: 13.3333px; - width: 342.65px; - margin-right: 7px; -} - -#parent #container .op { - color: rgb(0 0 0); - font-family: arial, helvetica, sans-serif; - font-size: 13.3333px; - margin: 4px 0 8px; - overflow: hidden hidden; -} - -#parent #container .op blockquote { - margin-left: 0 !important; -} - -#parent #container .name { - color: rgb(17 119 67); - font-family: arial, helvetica, sans-serif; - font-size: 13.3333px; - font-weight: 700; - margin-left: 7px; -} - -#parent #container .quote { - color: rgb(221 0 0); - font-family: arial, helvetica, sans-serif; - font-size: 13.3333px; - text-decoration: underline solid rgb(221 0 0); - text-decoration-thickness: auto; -} - -#parent #container .greentext { - color: rgb(120 153 34); - font-family: arial, helvetica, sans-serif; - font-size: 13.3333px; -} - -#parent #container blockquote { - margin: 0 !important; - margin-block: 1em 1em; - margin-inline: 40px 40px; - margin: 13.33px 40px !important; -} - -#parent #container .message_4chan { - color: black; - border: none; -} \ No newline at end of file diff --git a/spaces/leogabraneth/text-generation-webui-main/one_click.py b/spaces/leogabraneth/text-generation-webui-main/one_click.py deleted file mode 100644 index 2f3dc1729760f9dd895f000a06b5d8d9bdb3b212..0000000000000000000000000000000000000000 --- a/spaces/leogabraneth/text-generation-webui-main/one_click.py +++ /dev/null @@ -1,400 +0,0 @@ -import argparse -import glob -import hashlib -import os -import platform -import re -import site -import subprocess -import sys - -script_dir = os.getcwd() -conda_env_path = os.path.join(script_dir, "installer_files", "env") - -# Remove the '# ' from the following lines as needed for your AMD GPU on Linux -# os.environ["ROCM_PATH"] = '/opt/rocm' -# os.environ["HSA_OVERRIDE_GFX_VERSION"] = '10.3.0' -# os.environ["HCC_AMDGPU_TARGET"] = 'gfx1030' - -# Command-line flags -cmd_flags_path = os.path.join(script_dir, "CMD_FLAGS.txt") -if os.path.exists(cmd_flags_path): - with open(cmd_flags_path, 'r') as f: - CMD_FLAGS = ' '.join(line.strip() for line in f if line.strip() and not line.strip().startswith('#')) -else: - CMD_FLAGS = '' - -flags = f"{' '.join([flag for flag in sys.argv[1:] if flag != '--update'])} {CMD_FLAGS}" - - -def is_linux(): - return sys.platform.startswith("linux") - - -def is_windows(): - return sys.platform.startswith("win") - - -def is_macos(): - return sys.platform.startswith("darwin") - - -def is_x86_64(): - return platform.machine() == "x86_64" - - -def cpu_has_avx2(): - try: - import cpuinfo - - info = cpuinfo.get_cpu_info() - if 'avx2' in info['flags']: - return True - else: - return False - except: - return True - - -def cpu_has_amx(): - try: - import cpuinfo - - info = cpuinfo.get_cpu_info() - if 'amx' in info['flags']: - return True - else: - return False - except: - return True - - -def torch_version(): - site_packages_path = None - for sitedir in site.getsitepackages(): - if "site-packages" in sitedir and conda_env_path in sitedir: - site_packages_path = sitedir - break - - if site_packages_path: - torch_version_file = open(os.path.join(site_packages_path, 'torch', 'version.py')).read().splitlines() - torver = [line for line in torch_version_file if '__version__' in line][0].split('__version__ = ')[1].strip("'") - else: - from torch import __version__ as torver - return torver - - -def is_installed(): - site_packages_path = None - for sitedir in site.getsitepackages(): - if "site-packages" in sitedir and conda_env_path in sitedir: - site_packages_path = sitedir - break - - if site_packages_path: - return os.path.isfile(os.path.join(site_packages_path, 'torch', '__init__.py')) - else: - return os.path.isdir(conda_env_path) - - -def check_env(): - # If we have access to conda, we are probably in an environment - conda_exist = run_cmd("conda", environment=True, capture_output=True).returncode == 0 - if not conda_exist: - print("Conda is not installed. Exiting...") - sys.exit(1) - - # Ensure this is a new environment and not the base environment - if os.environ["CONDA_DEFAULT_ENV"] == "base": - print("Create an environment for this project and activate it. Exiting...") - sys.exit(1) - - -def clear_cache(): - run_cmd("conda clean -a -y", environment=True) - run_cmd("python -m pip cache purge", environment=True) - - -def print_big_message(message): - message = message.strip() - lines = message.split('\n') - print("\n\n*******************************************************************") - for line in lines: - if line.strip() != '': - print("*", line) - - print("*******************************************************************\n\n") - - -def calculate_file_hash(file_path): - p = os.path.join(script_dir, file_path) - if os.path.isfile(p): - with open(p, 'rb') as f: - return hashlib.sha256(f.read()).hexdigest() - else: - return '' - - -def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, env=None): - # Use the conda environment - if environment: - if is_windows(): - conda_bat_path = os.path.join(script_dir, "installer_files", "conda", "condabin", "conda.bat") - cmd = "\"" + conda_bat_path + "\" activate \"" + conda_env_path + "\" >nul && " + cmd - else: - conda_sh_path = os.path.join(script_dir, "installer_files", "conda", "etc", "profile.d", "conda.sh") - cmd = ". \"" + conda_sh_path + "\" && conda activate \"" + conda_env_path + "\" && " + cmd - - # Run shell commands - result = subprocess.run(cmd, shell=True, capture_output=capture_output, env=env) - - # Assert the command ran successfully - if assert_success and result.returncode != 0: - print("Command '" + cmd + "' failed with exit status code '" + str(result.returncode) + "'.\n\nExiting now.\nTry running the start/update script again.") - sys.exit(1) - - return result - - -def install_webui(): - # Select your GPU, or choose to run in CPU mode - if "GPU_CHOICE" in os.environ: - choice = os.environ["GPU_CHOICE"].upper() - print_big_message(f"Selected GPU choice \"{choice}\" based on the GPU_CHOICE environment variable.") - else: - print() - print("What is your GPU?") - print() - print("A) NVIDIA") - print("B) AMD (Linux/MacOS only. Requires ROCm SDK 5.6 on Linux)") - print("C) Apple M Series") - print("D) Intel Arc (IPEX)") - print("N) None (I want to run models in CPU mode)") - print() - - choice = input("Input> ").upper() - while choice not in 'ABCDN': - print("Invalid choice. Please try again.") - choice = input("Input> ").upper() - - if choice == "N": - print_big_message("Once the installation ends, make sure to open CMD_FLAGS.txt with\na text editor and add the --cpu flag.") - - # Find the proper Pytorch installation command - install_git = "conda install -y -k ninja git" - install_pytorch = "python -m pip install torch torchvision torchaudio" - - use_cuda118 = "N" - if any((is_windows(), is_linux())) and choice == "A": - if "USE_CUDA118" in os.environ: - use_cuda118 = "Y" if os.environ.get("USE_CUDA118", "").lower() in ("yes", "y", "true", "1", "t", "on") else "N" - else: - # Ask for CUDA version if using NVIDIA - print("\nWould you like to use CUDA 11.8 instead of 12.1? This is only necessary for older GPUs like Kepler.\nIf unsure, say \"N\".\n") - use_cuda118 = input("Input (Y/N)> ").upper().strip('"\'').strip() - while use_cuda118 not in 'YN': - print("Invalid choice. Please try again.") - use_cuda118 = input("Input> ").upper().strip('"\'').strip() - if use_cuda118 == 'Y': - print("CUDA: 11.8") - else: - print("CUDA: 12.1") - - install_pytorch = f"python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/{'cu121' if use_cuda118 == 'N' else 'cu118'}" - elif not is_macos() and choice == "B": - if is_linux(): - install_pytorch = "python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.6" - else: - print("AMD GPUs are only supported on Linux. Exiting...") - sys.exit(1) - elif is_linux() and (choice == "C" or choice == "N"): - install_pytorch = "python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu" - elif choice == "D": - install_pytorch = "python -m pip install torch==2.0.1a0 torchvision==0.15.2a0 intel_extension_for_pytorch==2.0.110+xpu -f https://developer.intel.com/ipex-whl-stable-xpu" - - # Install Git and then Pytorch - run_cmd(f"{install_git} && {install_pytorch} && python -m pip install py-cpuinfo==9.0.0", assert_success=True, environment=True) - - # Install CUDA libraries (this wasn't necessary for Pytorch before...) - if choice == "A": - run_cmd(f"conda install -y -c \"nvidia/label/{'cuda-12.1.1' if use_cuda118 == 'N' else 'cuda-11.8.0'}\" cuda-runtime", assert_success=True, environment=True) - - # Install the webui requirements - update_requirements(initial_installation=True) - - -def update_requirements(initial_installation=False): - # Create .git directory if missing - if not os.path.isdir(os.path.join(script_dir, ".git")): - git_creation_cmd = 'git init -b main && git remote add origin https://github.com/oobabooga/text-generation-webui && git fetch && git symbolic-ref refs/remotes/origin/HEAD refs/remotes/origin/main && git reset --hard origin/main && git branch --set-upstream-to=origin/main' - run_cmd(git_creation_cmd, environment=True, assert_success=True) - - files_to_check = [ - 'start_linux.sh', 'start_macos.sh', 'start_windows.bat', 'start_wsl.bat', - 'update_linux.sh', 'update_macos.sh', 'update_windows.bat', 'update_wsl.bat', - 'one_click.py' - ] - - before_pull_hashes = {file_name: calculate_file_hash(file_name) for file_name in files_to_check} - run_cmd("git pull --autostash", assert_success=True, environment=True) - after_pull_hashes = {file_name: calculate_file_hash(file_name) for file_name in files_to_check} - - # Check for differences in installation file hashes - for file_name in files_to_check: - if before_pull_hashes[file_name] != after_pull_hashes[file_name]: - print(f"File '{file_name}' was updated during 'git pull'. Please run the script again.") - exit(1) - - # Extensions requirements are installed only during the initial install by default. - # That can be changed with the INSTALL_EXTENSIONS environment variable. - install = initial_installation - if "INSTALL_EXTENSIONS" in os.environ: - install = os.environ["INSTALL_EXTENSIONS"].lower() in ("yes", "y", "true", "1", "t", "on") - - if install: - print_big_message("Installing extensions requirements.") - extensions = next(os.walk("extensions"))[1] - for extension in extensions: - if extension in ['superbooga', 'superboogav2']: # No wheels available for requirements - continue - - extension_req_path = os.path.join("extensions", extension, "requirements.txt") - if os.path.exists(extension_req_path): - run_cmd("python -m pip install -r " + extension_req_path + " --upgrade", assert_success=True, environment=True) - elif initial_installation: - print_big_message("Will not install extensions due to INSTALL_EXTENSIONS environment variable.") - - # Detect the Python and PyTorch versions - torver = torch_version() - print(f"TORCH: {torver}") - is_cuda = '+cu' in torver - is_cuda118 = '+cu118' in torver # 2.1.0+cu118 - is_cuda117 = '+cu117' in torver # 2.0.1+cu117 - is_rocm = '+rocm' in torver # 2.0.1+rocm5.4.2 - is_intel = '+cxx11' in torver # 2.0.1a0+cxx11.abi - is_cpu = '+cpu' in torver # 2.0.1+cpu - - if is_rocm: - if cpu_has_avx2(): - requirements_file = "requirements_amd.txt" - else: - requirements_file = "requirements_amd_noavx2.txt" - elif is_cpu: - if cpu_has_avx2(): - requirements_file = "requirements_cpu_only.txt" - else: - requirements_file = "requirements_cpu_only_noavx2.txt" - elif is_macos(): - if is_x86_64(): - requirements_file = "requirements_apple_intel.txt" - else: - requirements_file = "requirements_apple_silicon.txt" - else: - if cpu_has_avx2(): - requirements_file = "requirements.txt" - else: - requirements_file = "requirements_noavx2.txt" - - # Prepare the requirements file - print_big_message(f"Installing webui requirements from file: {requirements_file}") - textgen_requirements = open(requirements_file).read().splitlines() - if is_cuda117: - textgen_requirements = [req.replace('+cu121', '+cu117').replace('+cu122', '+cu117').replace('torch2.1', 'torch2.0') for req in textgen_requirements] - elif is_cuda118: - textgen_requirements = [req.replace('+cu121', '+cu118').replace('+cu122', '+cu118') for req in textgen_requirements] - if is_windows() and (is_cuda117 or is_cuda118): # No flash-attention on Windows for CUDA 11 - textgen_requirements = [req for req in textgen_requirements if 'bdashore3/flash-attention' not in req] - - with open('temp_requirements.txt', 'w') as file: - file.write('\n'.join(textgen_requirements)) - - # Workaround for git+ packages not updating properly. - git_requirements = [req for req in textgen_requirements if req.startswith("git+")] - for req in git_requirements: - url = req.replace("git+", "") - package_name = url.split("/")[-1].split("@")[0].rstrip(".git") - run_cmd("python -m pip uninstall -y " + package_name, environment=True) - print(f"Uninstalled {package_name}") - - # Install/update the project requirements - run_cmd("python -m pip install -r temp_requirements.txt --upgrade", assert_success=True, environment=True) - os.remove('temp_requirements.txt') - - # Check for '+cu' or '+rocm' in version string to determine if torch uses CUDA or ROCm. Check for pytorch-cuda as well for backwards compatibility - if not any((is_cuda, is_rocm)) and run_cmd("conda list -f pytorch-cuda | grep pytorch-cuda", environment=True, capture_output=True).returncode == 1: - clear_cache() - return - - if not os.path.exists("repositories/"): - os.mkdir("repositories") - - os.chdir("repositories") - - # Install or update ExLlama as needed - if not os.path.exists("exllama/"): - run_cmd("git clone https://github.com/turboderp/exllama.git", environment=True) - else: - os.chdir("exllama") - run_cmd("git pull", environment=True) - os.chdir("..") - - if is_linux(): - # Fix JIT compile issue with ExLlama in Linux/WSL - if not os.path.exists(f"{conda_env_path}/lib64"): - run_cmd(f'ln -s "{conda_env_path}/lib" "{conda_env_path}/lib64"', environment=True) - - # On some Linux distributions, g++ may not exist or be the wrong version to compile GPTQ-for-LLaMa - gxx_output = run_cmd("g++ -dumpfullversion -dumpversion", environment=True, capture_output=True) - if gxx_output.returncode != 0 or int(gxx_output.stdout.strip().split(b".")[0]) > 11: - # Install the correct version of g++ - run_cmd("conda install -y -k conda-forge::gxx_linux-64=11.2.0", environment=True) - - clear_cache() - - -def download_model(): - run_cmd("python download-model.py", environment=True) - - -def launch_webui(): - run_cmd(f"python server.py {flags}", environment=True) - - -if __name__ == "__main__": - # Verifies we are in a conda environment - check_env() - - parser = argparse.ArgumentParser(add_help=False) - parser.add_argument('--update', action='store_true', help='Update the web UI.') - args, _ = parser.parse_known_args() - - if args.update: - update_requirements() - else: - # If webui has already been installed, skip and run - if not is_installed(): - install_webui() - os.chdir(script_dir) - - if os.environ.get("LAUNCH_AFTER_INSTALL", "").lower() in ("no", "n", "false", "0", "f", "off"): - print_big_message("Install finished successfully and will now exit due to LAUNCH_AFTER_INSTALL.") - sys.exit() - - # Check if a model has been downloaded yet - if '--model-dir' in flags: - # Splits on ' ' or '=' while maintaining spaces within quotes - flags_list = re.split(' +(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)|=', flags) - model_dir = [flags_list[(flags_list.index(flag)+1)] for flag in flags_list if flag == '--model-dir'][0].strip('"\'') - else: - model_dir = 'models' - - if len([item for item in glob.glob(f'{model_dir}/*') if not item.endswith(('.txt', '.yaml'))]) == 0: - print_big_message("WARNING: You haven't downloaded any model yet.\nOnce the web UI launches, head over to the \"Model\" tab and download one.") - - # Workaround for llama-cpp-python loading paths in CUDA env vars even if they do not exist - conda_path_bin = os.path.join(conda_env_path, "bin") - if not os.path.exists(conda_path_bin): - os.mkdir(conda_path_bin) - - # Launch the webui - launch_webui() diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Easeus Data Recovery Wizard Serial Key Download.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Easeus Data Recovery Wizard Serial Key Download.md deleted file mode 100644 index 7539dbaf27aa618da498ed055f8a186c4674468d..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Easeus Data Recovery Wizard Serial Key Download.md +++ /dev/null @@ -1,12 +0,0 @@ -

        Easeus Data Recovery Wizard Serial Key Download


        Download Zip »»» https://bytlly.com/2uGysT



        -
        -20 - -The adobe impress 2013 keygen tahitian solution is great for saving in online presentations and professional presentations, and gives quite a few options of templates to choose from. Adobe impress 2010 license key is a powerful presentation tool that supports image adjustment, object insertion, animation, DVD authoring, etc. - -How to generate. Image editing software easy to use. File. 22x24 png is used as a background picture. Stand alone programs. Media. 0xc058. 0005, 0x0015, 0x0020. Native to all major platforms and includes a. Based on a floating-point or integer representation. Based on a floating-point or integer representation. The output of this command is a matrix. - -XML. XSL. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML. XSL is used for processing of XML. XML 4fefd39f24
        -
        -
        -

        diff --git a/spaces/lnyan/stablediffusion-infinity/PyPatchMatch/csrc/masked_image.h b/spaces/lnyan/stablediffusion-infinity/PyPatchMatch/csrc/masked_image.h deleted file mode 100644 index 52caf272d0534c35b79eedaea53d0c56e5cf942e..0000000000000000000000000000000000000000 --- a/spaces/lnyan/stablediffusion-infinity/PyPatchMatch/csrc/masked_image.h +++ /dev/null @@ -1,112 +0,0 @@ -#pragma once - -#include - -class MaskedImage { -public: - MaskedImage() : m_image(), m_mask(), m_global_mask(), m_image_grady(), m_image_gradx(), m_image_grad_computed(false) { - // pass - } - MaskedImage(cv::Mat image, cv::Mat mask) : m_image(image), m_mask(mask), m_image_grad_computed(false) { - // pass - } - MaskedImage(cv::Mat image, cv::Mat mask, cv::Mat global_mask) : m_image(image), m_mask(mask), m_global_mask(global_mask), m_image_grad_computed(false) { - // pass - } - MaskedImage(cv::Mat image, cv::Mat mask, cv::Mat global_mask, cv::Mat grady, cv::Mat gradx, bool grad_computed) : - m_image(image), m_mask(mask), m_global_mask(global_mask), - m_image_grady(grady), m_image_gradx(gradx), m_image_grad_computed(grad_computed) { - // pass - } - MaskedImage(int width, int height) : m_global_mask(), m_image_grady(), m_image_gradx() { - m_image = cv::Mat(cv::Size(width, height), CV_8UC3); - m_image = cv::Scalar::all(0); - - m_mask = cv::Mat(cv::Size(width, height), CV_8U); - m_mask = cv::Scalar::all(0); - } - inline MaskedImage clone() { - return MaskedImage( - m_image.clone(), m_mask.clone(), m_global_mask.clone(), - m_image_grady.clone(), m_image_gradx.clone(), m_image_grad_computed - ); - } - - inline cv::Size size() const { - return m_image.size(); - } - inline const cv::Mat &image() const { - return m_image; - } - inline const cv::Mat &mask() const { - return m_mask; - } - inline const cv::Mat &global_mask() const { - return m_global_mask; - } - inline const cv::Mat &grady() const { - assert(m_image_grad_computed); - return m_image_grady; - } - inline const cv::Mat &gradx() const { - assert(m_image_grad_computed); - return m_image_gradx; - } - - inline void init_global_mask_mat() { - m_global_mask = cv::Mat(m_mask.size(), CV_8U); - m_global_mask.setTo(cv::Scalar(0)); - } - inline void set_global_mask_mat(const cv::Mat &other) { - m_global_mask = other; - } - - inline bool is_masked(int y, int x) const { - return static_cast(m_mask.at(y, x)); - } - inline bool is_globally_masked(int y, int x) const { - return !m_global_mask.empty() && static_cast(m_global_mask.at(y, x)); - } - inline void set_mask(int y, int x, bool value) { - m_mask.at(y, x) = static_cast(value); - } - inline void set_global_mask(int y, int x, bool value) { - m_global_mask.at(y, x) = static_cast(value); - } - inline void clear_mask() { - m_mask.setTo(cv::Scalar(0)); - } - - inline const unsigned char *get_image(int y, int x) const { - return m_image.ptr(y, x); - } - inline unsigned char *get_mutable_image(int y, int x) { - return m_image.ptr(y, x); - } - - inline unsigned char get_image(int y, int x, int c) const { - return m_image.ptr(y, x)[c]; - } - inline int get_image_int(int y, int x, int c) const { - return static_cast(m_image.ptr(y, x)[c]); - } - - bool contains_mask(int y, int x, int patch_size) const; - MaskedImage downsample() const; - MaskedImage upsample(int new_w, int new_h) const; - MaskedImage upsample(int new_w, int new_h, const cv::Mat &new_global_mask) const; - void compute_image_gradients(); - void compute_image_gradients() const; - - static const cv::Size kDownsampleKernelSize; - static const int kDownsampleKernel[6]; - -private: - cv::Mat m_image; - cv::Mat m_mask; - cv::Mat m_global_mask; - cv::Mat m_image_grady; - cv::Mat m_image_gradx; - bool m_image_grad_computed = false; -}; - diff --git a/spaces/logasja/LowKey/align/matlab_cp2tform.py b/spaces/logasja/LowKey/align/matlab_cp2tform.py deleted file mode 100644 index a4f21270ad62a2702db0def8bb05d201fc1a173c..0000000000000000000000000000000000000000 --- a/spaces/logasja/LowKey/align/matlab_cp2tform.py +++ /dev/null @@ -1,345 +0,0 @@ -import numpy as np -from numpy.linalg import inv, norm, lstsq -from numpy.linalg import matrix_rank as rank - - -class MatlabCp2tormException(Exception): - def __str__(self): - return "In File {}:{}".format( - __file__, super.__str__(self)) - -def tformfwd(trans, uv): - """ - Function: - ---------- - apply affine transform 'trans' to uv - - Parameters: - ---------- - @trans: 3x3 np.array - transform matrix - @uv: Kx2 np.array - each row is a pair of coordinates (x, y) - - Returns: - ---------- - @xy: Kx2 np.array - each row is a pair of transformed coordinates (x, y) - """ - uv = np.hstack(( - uv, np.ones((uv.shape[0], 1)) - )) - xy = np.dot(uv, trans) - xy = xy[:, 0:-1] - return xy - - -def tforminv(trans, uv): - """ - Function: - ---------- - apply the inverse of affine transform 'trans' to uv - - Parameters: - ---------- - @trans: 3x3 np.array - transform matrix - @uv: Kx2 np.array - each row is a pair of coordinates (x, y) - - Returns: - ---------- - @xy: Kx2 np.array - each row is a pair of inverse-transformed coordinates (x, y) - """ - Tinv = inv(trans) - xy = tformfwd(Tinv, uv) - return xy - - -def findNonreflectiveSimilarity(uv, xy, options=None): - - options = {'K': 2} - - K = options['K'] - M = xy.shape[0] - x = xy[:, 0].reshape((-1, 1)) # use reshape to keep a column vector - y = xy[:, 1].reshape((-1, 1)) # use reshape to keep a column vector - # print('--->x, y:\n', x, y - - tmp1 = np.hstack((x, y, np.ones((M, 1)), np.zeros((M, 1)))) - tmp2 = np.hstack((y, -x, np.zeros((M, 1)), np.ones((M, 1)))) - X = np.vstack((tmp1, tmp2)) - # print('--->X.shape: ', X.shape - # print('X:\n', X - - u = uv[:, 0].reshape((-1, 1)) # use reshape to keep a column vector - v = uv[:, 1].reshape((-1, 1)) # use reshape to keep a column vector - U = np.vstack((u, v)) - # print('--->U.shape: ', U.shape - # print('U:\n', U - - # We know that X * r = U - if rank(X) >= 2 * K: - r, _, _, _ = lstsq(X, U) - r = np.squeeze(r) - else: - raise Exception("cp2tform: two Unique Points Req") - - # print('--->r:\n', r - - sc = r[0] - ss = r[1] - tx = r[2] - ty = r[3] - - Tinv = np.array([ - [sc, -ss, 0], - [ss, sc, 0], - [tx, ty, 1] - ]) - - # print('--->Tinv:\n', Tinv - - T = inv(Tinv) - # print('--->T:\n', T - - T[:, 2] = np.array([0, 0, 1]) - - return T, Tinv - - -def findSimilarity(uv, xy, options=None): - - options = {'K': 2} - -# uv = np.array(uv) -# xy = np.array(xy) - - # Solve for trans1 - trans1, trans1_inv = findNonreflectiveSimilarity(uv, xy, options) - - # Solve for trans2 - - # manually reflect the xy data across the Y-axis - xyR = xy - xyR[:, 0] = -1 * xyR[:, 0] - - trans2r, trans2r_inv = findNonreflectiveSimilarity(uv, xyR, options) - - # manually reflect the tform to undo the reflection done on xyR - TreflectY = np.array([ - [-1, 0, 0], - [0, 1, 0], - [0, 0, 1] - ]) - - trans2 = np.dot(trans2r, TreflectY) - - # Figure out if trans1 or trans2 is better - xy1 = tformfwd(trans1, uv) - norm1 = norm(xy1 - xy) - - xy2 = tformfwd(trans2, uv) - norm2 = norm(xy2 - xy) - - if norm1 <= norm2: - return trans1, trans1_inv - else: - trans2_inv = inv(trans2) - return trans2, trans2_inv - - -def get_similarity_transform(src_pts, dst_pts, reflective = True): - """ - Function: - ---------- - Find Similarity Transform Matrix 'trans': - u = src_pts[:, 0] - v = src_pts[:, 1] - x = dst_pts[:, 0] - y = dst_pts[:, 1] - [x, y, 1] = [u, v, 1] * trans - - Parameters: - ---------- - @src_pts: Kx2 np.array - source points, each row is a pair of coordinates (x, y) - @dst_pts: Kx2 np.array - destination points, each row is a pair of transformed - coordinates (x, y) - @reflective: True or False - if True: - use reflective similarity transform - else: - use non-reflective similarity transform - - Returns: - ---------- - @trans: 3x3 np.array - transform matrix from uv to xy - trans_inv: 3x3 np.array - inverse of trans, transform matrix from xy to uv - """ - - if reflective: - trans, trans_inv = findSimilarity(src_pts, dst_pts) - else: - trans, trans_inv = findNonreflectiveSimilarity(src_pts, dst_pts) - - return trans, trans_inv - - -def cvt_tform_mat_for_cv2(trans): - """ - Function: - ---------- - Convert Transform Matrix 'trans' into 'cv2_trans' which could be - directly used by cv2.warpAffine(): - u = src_pts[:, 0] - v = src_pts[:, 1] - x = dst_pts[:, 0] - y = dst_pts[:, 1] - [x, y].T = cv_trans * [u, v, 1].T - - Parameters: - ---------- - @trans: 3x3 np.array - transform matrix from uv to xy - - Returns: - ---------- - @cv2_trans: 2x3 np.array - transform matrix from src_pts to dst_pts, could be directly used - for cv2.warpAffine() - """ - cv2_trans = trans[:, 0:2].T - - return cv2_trans - - -def get_similarity_transform_for_cv2(src_pts, dst_pts, reflective = True): - """ - Function: - ---------- - Find Similarity Transform Matrix 'cv2_trans' which could be - directly used by cv2.warpAffine(): - u = src_pts[:, 0] - v = src_pts[:, 1] - x = dst_pts[:, 0] - y = dst_pts[:, 1] - [x, y].T = cv_trans * [u, v, 1].T - - Parameters: - ---------- - @src_pts: Kx2 np.array - source points, each row is a pair of coordinates (x, y) - @dst_pts: Kx2 np.array - destination points, each row is a pair of transformed - coordinates (x, y) - reflective: True or False - if True: - use reflective similarity transform - else: - use non-reflective similarity transform - - Returns: - ---------- - @cv2_trans: 2x3 np.array - transform matrix from src_pts to dst_pts, could be directly used - for cv2.warpAffine() - """ - trans, trans_inv = get_similarity_transform(src_pts, dst_pts, reflective) - cv2_trans = cvt_tform_mat_for_cv2(trans) - cv2_trans_inv = cvt_tform_mat_for_cv2(trans_inv) - - return cv2_trans, cv2_trans_inv - - -if __name__ == '__main__': - """ - u = [0, 6, -2] - v = [0, 3, 5] - x = [-1, 0, 4] - y = [-1, -10, 4] - - # In Matlab, run: - # - # uv = [u'; v']; - # xy = [x'; y']; - # tform_sim=cp2tform(uv,xy,'similarity'); - # - # trans = tform_sim.tdata.T - # ans = - # -0.0764 -1.6190 0 - # 1.6190 -0.0764 0 - # -3.2156 0.0290 1.0000 - # trans_inv = tform_sim.tdata.Tinv - # ans = - # - # -0.0291 0.6163 0 - # -0.6163 -0.0291 0 - # -0.0756 1.9826 1.0000 - # xy_m=tformfwd(tform_sim, u,v) - # - # xy_m = - # - # -3.2156 0.0290 - # 1.1833 -9.9143 - # 5.0323 2.8853 - # uv_m=tforminv(tform_sim, x,y) - # - # uv_m = - # - # 0.5698 1.3953 - # 6.0872 2.2733 - # -2.6570 4.3314 - """ - u = [0, 6, -2] - v = [0, 3, 5] - x = [-1, 0, 4] - y = [-1, -10, 4] - - uv = np.array((u, v)).T - xy = np.array((x, y)).T - - print("\n--->uv:") - print(uv) - print("\n--->xy:") - print(xy) - - trans, trans_inv = get_similarity_transform(uv, xy) - - print("\n--->trans matrix:") - print(trans) - - print("\n--->trans_inv matrix:") - print(trans_inv) - - print("\n---> apply transform to uv") - print("\nxy_m = uv_augmented * trans") - uv_aug = np.hstack(( - uv, np.ones((uv.shape[0], 1)) - )) - xy_m = np.dot(uv_aug, trans) - print(xy_m) - - print("\nxy_m = tformfwd(trans, uv)") - xy_m = tformfwd(trans, uv) - print(xy_m) - - print("\n---> apply inverse transform to xy") - print("\nuv_m = xy_augmented * trans_inv") - xy_aug = np.hstack(( - xy, np.ones((xy.shape[0], 1)) - )) - uv_m = np.dot(xy_aug, trans_inv) - print(uv_m) - - print("\nuv_m = tformfwd(trans_inv, xy)") - uv_m = tformfwd(trans_inv, xy) - print(uv_m) - - uv_m = tforminv(trans, xy) - print("\nuv_m = tforminv(trans, xy)") - print(uv_m) \ No newline at end of file diff --git a/spaces/ltgoslo/ssa-perin/utility/log.py b/spaces/ltgoslo/ssa-perin/utility/log.py deleted file mode 100644 index b9ef3770a22e8efdc324dbaf6f2c16b73594518f..0000000000000000000000000000000000000000 --- a/spaces/ltgoslo/ssa-perin/utility/log.py +++ /dev/null @@ -1,166 +0,0 @@ -#!/usr/bin/env python3 -# coding=utf-8 - -from utility.loading_bar import LoadingBar -import time -import torch - - -class Log: - def __init__(self, dataset, model, optimizer, args, directory, log_each: int, initial_epoch=-1, log_wandb=True): - self.dataset = dataset - self.model = model - self.args = args - self.optimizer = optimizer - - self.loading_bar = LoadingBar(length=27) - self.best_f1_score = 0.0 - self.log_each = log_each - self.epoch = initial_epoch - self.log_wandb = log_wandb - if self.log_wandb: - globals()["wandb"] = __import__("wandb") # ugly way to not require wandb if not needed - - self.directory = directory - self.evaluation_results = f"{directory}/results_{{0}}_{{1}}.json" - self.full_evaluation_results = f"{directory}/full_results_{{0}}_{{1}}.json" - self.best_full_evaluation_results = f"{directory}/best_full_results_{{0}}_{{1}}.json" - self.result_history = {epoch: {} for epoch in range(args.epochs)} - - self.best_checkpoint_filename = f"{self.directory}/best_checkpoint.h5" - self.last_checkpoint_filename = f"{self.directory}/last_checkpoint.h5" - - self.step = 0 - self.total_batch_size = 0 - self.flushed = True - - def train(self, len_dataset: int) -> None: - self.flush() - - self.epoch += 1 - if self.epoch == 0: - self._print_header() - - self.is_train = True - self._reset(len_dataset) - - def eval(self, len_dataset: int) -> None: - self.flush() - self.is_train = False - self._reset(len_dataset) - - def __call__(self, batch_size, losses, grad_norm: float = None, learning_rates: float = None,) -> None: - if self.is_train: - self._train_step(batch_size, losses, grad_norm, learning_rates) - else: - self._eval_step(batch_size, losses) - - self.flushed = False - - def flush(self) -> None: - if self.flushed: - return - self.flushed = True - - if self.is_train: - print(f"\r┃{self.epoch:12d} ┃{self._time():>12} │", end="", flush=True) - else: - if self.losses is not None and self.log_wandb: - dictionary = {f"validation/{key}": value / self.step for key, value in self.losses.items()} - dictionary["epoch"] = self.epoch - wandb.log(dictionary) - - self.losses = None - # self._save_model(save_as_best=False, performance=None) - - def log_evaluation(self, scores, mode, epoch): - f1_score = scores["sentiment_tuple/f1"] - if self.log_wandb: - scores = {f"{mode}/{k}": v for k, v in scores.items()} - wandb.log({ - "epoch": epoch, - **scores - }) - - if mode == "validation" and f1_score > self.best_f1_score: - if self.log_wandb: - wandb.run.summary["best sentiment tuple f1 score"] = f1_score - self.best_f1_score = f1_score - self._save_model(save_as_best=True, f1_score=f1_score) - - def _save_model(self, save_as_best: bool, f1_score: float): - if not self.args.save_checkpoints: - return - - state = { - "epoch": self.epoch, - "dataset": self.dataset.state_dict(), - "f1_score": f1_score, - "model": self.model.state_dict(), - "optimizer": self.optimizer.state_dict(), - "args": self.args.state_dict(), - } - - filename = self.best_checkpoint_filename if save_as_best else self.last_checkpoint_filename - - torch.save(state, filename) - if self.log_wandb: - wandb.save(filename) - - def _train_step(self, batch_size, losses, grad_norm: float, learning_rates) -> None: - self.total_batch_size += batch_size - self.step += 1 - - if self.losses is None: - self.losses = losses - else: - for key, values in losses.items(): - if key not in self.losses: - self.losses[key] = losses[key] - continue - self.losses[key] += losses[key] - - if self.step % self.log_each == 0: - progress = self.total_batch_size / self.len_dataset - print(f"\r┃{self.epoch:12d} │{self._time():>12} {self.loading_bar(progress)}", end="", flush=True) - - if self.log_wandb: - dictionary = {f"train/{key}" if not key.startswith("weight/") else key: value / self.log_each for key, value in self.losses.items()} - dictionary["epoch"] = self.epoch - dictionary["learning_rate/encoder"] = learning_rates[0] - dictionary["learning_rate/decoder"] = learning_rates[-2] - dictionary["learning_rate/grad_norm"] = learning_rates[-1] - dictionary["gradient norm"] = grad_norm - - wandb.log(dictionary) - - self.losses = None - - def _eval_step(self, batch_size, losses) -> None: - self.step += 1 - - if self.losses is None: - self.losses = losses - else: - for key, values in losses.items(): - if key not in self.losses: - self.losses[key] = losses[key] - continue - self.losses[key] += losses[key] - - def _reset(self, len_dataset: int) -> None: - self.start_time = time.time() - self.step = 0 - self.total_batch_size = 0 - self.len_dataset = len_dataset - self.losses = None - - def _time(self) -> str: - time_seconds = int(time.time() - self.start_time) - return f"{time_seconds // 60:02d}:{time_seconds % 60:02d} min" - - def _print_header(self) -> None: - print(f"┏━━━━━━━━━━━━━━┳━━━╸S╺╸E╺╸M╺╸A╺╸N╺╸T╺╸I╺╸S╺╸K╺━━━━━━━━━━━━━━┓") - print(f"┃ ┃ ╷ ┃") - print(f"┃ epoch ┃ elapsed │ progress bar ┃") - print(f"┠──────────────╂──────────────┼─────────────────────────────┨") diff --git a/spaces/m3hrdadfi/zabanshenas/libs/dummy.py b/spaces/m3hrdadfi/zabanshenas/libs/dummy.py deleted file mode 100644 index 9d677c9ca9c3ec1d2cd050dbb376830a568b33d3..0000000000000000000000000000000000000000 --- a/spaces/m3hrdadfi/zabanshenas/libs/dummy.py +++ /dev/null @@ -1,1179 +0,0 @@ -outputs = [ - [ - { - "language": "Persian", - "code": "fas", - "score": 0.6105580925941467 - }, - { - "language": "Gilaki", - "code": "glk", - "score": 0.29982829093933105 - }, - { - "language": "Northern Luri", - "code": "lrc", - "score": 0.04840774089097977 - }, - { - "language": "Mazanderani", - "code": "mzn", - "score": 0.030142733827233315 - }, - { - "language": "South Azerbaijani", - "code": "azb", - "score": 0.005220199003815651 - }, - { - "language": "Urdu", - "code": "urd", - "score": 0.0019745035097002983 - }, - { - "language": "Pushto", - "code": "pus", - "score": 0.0015690263826400042 - }, - { - "language": "Western Panjabi", - "code": "pnb", - "score": 0.0005721596535295248 - }, - { - "language": "Central Kurdish", - "code": "ckb", - "score": 0.00025537016335874796 - }, - { - "language": "Sindhi", - "code": "snd", - "score": 0.0001820324978325516 - }, - { - "language": "Egyptian Arabic", - "code": "arz", - "score": 0.0001247940381290391 - }, - { - "language": "Arabic", - "code": "ara", - "score": 7.754910620860755e-05 - }, - { - "language": "Korean", - "code": "kor", - "score": 5.718228203477338e-05 - }, - { - "language": "Fiji Hindi", - "code": "hif", - "score": 3.5903740354115143e-05 - }, - { - "language": "Uighur", - "code": "uig", - "score": 3.5565532016335055e-05 - }, - { - "language": "Maori", - "code": "mri", - "score": 2.1078320060041733e-05 - }, - { - "language": "Literary Chinese", - "code": "lzh", - "score": 2.09943773370469e-05 - }, - { - "language": "Navajo", - "code": "nav", - "score": 1.8877935872296803e-05 - }, - { - "language": "Mongolian", - "code": "mon", - "score": 1.783044899639208e-05 - }, - { - "language": "Basque", - "code": "eus", - "score": 1.2980432074982673e-05 - }, - { - "language": "Moksha", - "code": "mdf", - "score": 1.2325609532126691e-05 - }, - { - "language": "Tongan", - "code": "ton", - "score": 1.1610675755946431e-05 - }, - { - "language": "Min Dong", - "code": "cdo", - "score": 1.1508132956805639e-05 - }, - { - "language": "Sinhala", - "code": "sin", - "score": 1.0617596672091167e-05 - }, - { - "language": "Venetian", - "code": "vec", - "score": 1.0375520105299074e-05 - }, - { - "language": "Western Mari", - "code": "mrj", - "score": 1.0316403859178536e-05 - }, - { - "language": "Malayalam", - "code": "mal", - "score": 1.0265099263051525e-05 - }, - { - "language": "Interlingua", - "code": "ina", - "score": 1.0040446795755997e-05 - }, - { - "language": "Tatar", - "code": "tat", - "score": 9.836200661084149e-06 - }, - { - "language": "Cantonese", - "code": "zh-yue", - "score": 9.80662207439309e-06 - }, - { - "language": "Wu Chinese", - "code": "wuu", - "score": 9.661145668360405e-06 - }, - { - "language": "Igbo", - "code": "ibo", - "score": 9.207592484017368e-06 - }, - { - "language": "Waray", - "code": "war", - "score": 8.970115231932141e-06 - }, - { - "language": "Yiddish", - "code": "yid", - "score": 8.926748705562204e-06 - }, - { - "language": "Udmurt", - "code": "udm", - "score": 8.702583727426827e-06 - }, - { - "language": "Dhivehi", - "code": "div", - "score": 8.36203707876848e-06 - }, - { - "language": "Newari", - "code": "new", - "score": 8.140945283230394e-06 - }, - { - "language": "Karachay-Balkar", - "code": "krc", - "score": 8.123539373627864e-06 - }, - { - "language": "Lojban", - "code": "jbo", - "score": 8.114019692584407e-06 - }, - { - "language": "Sanskrit", - "code": "san", - "score": 8.087784408417065e-06 - }, - { - "language": "Luganda", - "code": "lug", - "score": 8.023569534998387e-06 - }, - { - "language": "Maithili", - "code": "mai", - "score": 7.723083399469033e-06 - }, - { - "language": "Kirghiz", - "code": "kir", - "score": 7.715119863860309e-06 - }, - { - "language": "Standard Chinese", - "code": "zho", - "score": 7.5126054071006365e-06 - }, - { - "language": "Amharic", - "code": "amh", - "score": 7.451813871739432e-06 - }, - { - "language": "Chechen", - "code": "che", - "score": 7.444541097356705e-06 - }, - { - "language": "Gujarati", - "code": "guj", - "score": 7.395997727144277e-06 - }, - { - "language": "Tibetan", - "code": "bod", - "score": 7.390805421891855e-06 - }, - { - "language": "Komi", - "code": "kom", - "score": 7.373077551164897e-06 - }, - { - "language": "Lao", - "code": "lao", - "score": 7.351867679972202e-06 - }, - { - "language": "Wolof", - "code": "wol", - "score": 7.305452982109273e-06 - }, - { - "language": "Silesian", - "code": "szl", - "score": 7.301976893359097e-06 - }, - { - "language": "Northern Sotho", - "code": "nso", - "score": 7.2927336987049785e-06 - }, - { - "language": "Armenian", - "code": "hye", - "score": 7.243447726068553e-06 - }, - { - "language": "Arpitan", - "code": "frp", - "score": 7.137540251278551e-06 - }, - { - "language": "Bishnupriya", - "code": "bpy", - "score": 7.062033091642661e-06 - }, - { - "language": "Azerbaijani", - "code": "aze", - "score": 6.906778253323864e-06 - }, - { - "language": "Tajik", - "code": "tgk", - "score": 6.730050699843559e-06 - }, - { - "language": "Old English ", - "code": "ang", - "score": 6.6442084971640725e-06 - }, - { - "language": "Marathi", - "code": "mar", - "score": 6.63194168737391e-06 - }, - { - "language": "Kurdish", - "code": "kur", - "score": 6.615779057028703e-06 - }, - { - "language": "Lithuanian", - "code": "lit", - "score": 6.561998816323467e-06 - }, - { - "language": "Russian", - "code": "rus", - "score": 6.4370215113740414e-06 - }, - { - "language": "Tulu", - "code": "tcy", - "score": 6.370255960064242e-06 - }, - { - "language": "Extremaduran", - "code": "ext", - "score": 6.3398160818906035e-06 - }, - { - "language": "Aymara", - "code": "aym", - "score": 6.288398708420573e-06 - }, - { - "language": "Lower Sorbian", - "code": "dsb", - "score": 6.209619641595054e-06 - }, - { - "language": "Classical Nahuatl", - "code": "nci", - "score": 5.954705557087436e-06 - }, - { - "language": "Polish", - "code": "pol", - "score": 5.952156243438367e-06 - }, - { - "language": "Cebuano", - "code": "ceb", - "score": 5.911888820264721e-06 - }, - { - "language": "Hakka Chinese", - "code": "hak", - "score": 5.756284735980444e-06 - }, - { - "language": "Georgian", - "code": "kat", - "score": 5.656391749653267e-06 - }, - { - "language": "Mingrelian", - "code": "xmf", - "score": 5.57373004994588e-06 - }, - { - "language": "Telugu", - "code": "tel", - "score": 5.5334053286060225e-06 - }, - { - "language": "Doteli", - "code": "dty", - "score": 5.510717073775595e-06 - }, - { - "language": "Portuguese", - "code": "por", - "score": 5.50901131646242e-06 - }, - { - "language": "Komi-Permyak", - "code": "koi", - "score": 5.447328476293478e-06 - }, - { - "language": "Eastern Mari", - "code": "mhr", - "score": 5.414771294454113e-06 - }, - { - "language": "Lezghian", - "code": "lez", - "score": 5.2741329454875086e-06 - }, - { - "language": "Nepali (macrolanguage)", - "code": "nep", - "score": 5.273408532957546e-06 - }, - { - "language": "Samogitian", - "code": "sgs", - "score": 5.207636149862083e-06 - }, - { - "language": "Bhojpuri", - "code": "bho", - "score": 5.19551804245566e-06 - }, - { - "language": "Occitan", - "code": "oci", - "score": 5.172901182959322e-06 - }, - { - "language": "Western Frisian", - "code": "fry", - "score": 5.066170615464216e-06 - }, - { - "language": "Vlaams", - "code": "vls", - "score": 5.014707312511746e-06 - }, - { - "language": "Japanese", - "code": "jpn", - "score": 4.986791282135528e-06 - }, - { - "language": "V\u00f5ro", - "code": "vro", - "score": 4.9785726332629565e-06 - }, - { - "language": "Rusyn", - "code": "rue", - "score": 4.937043286190601e-06 - }, - { - "language": "Hindi", - "code": "hin", - "score": 4.9325194595439825e-06 - }, - { - "language": "Sicilian", - "code": "scn", - "score": 4.8434171731059905e-06 - }, - { - "language": "Somali", - "code": "som", - "score": 4.722482117358595e-06 - }, - { - "language": "Galician", - "code": "glg", - "score": 4.664954758482054e-06 - }, - { - "language": "Kazakh", - "code": "kaz", - "score": 4.485120825847844e-06 - }, - { - "language": "Kannada", - "code": "kan", - "score": 4.438274572748924e-06 - }, - { - "language": "Oromo", - "code": "orm", - "score": 4.422903202794259e-06 - }, - { - "language": "Albanian", - "code": "sqi", - "score": 4.410150268085999e-06 - }, - { - "language": "Minangkabau", - "code": "min", - "score": 4.407007509144023e-06 - }, - { - "language": "Finnish", - "code": "fin", - "score": 4.374884611024754e-06 - }, - { - "language": "Ossetian", - "code": "oss", - "score": 4.322507265897002e-06 - }, - { - "language": "Volap\u00fck", - "code": "vol", - "score": 4.30220188718522e-06 - }, - { - "language": "Min Nan Chinese", - "code": "nan", - "score": 4.2357942220405675e-06 - }, - { - "language": "Bashkir", - "code": "bak", - "score": 4.212616204313235e-06 - }, - { - "language": "Ligurian", - "code": "lij", - "score": 4.1821313061518595e-06 - }, - { - "language": "Welsh", - "code": "cym", - "score": 4.174029982095817e-06 - }, - { - "language": "Slovene", - "code": "slv", - "score": 4.172954504610971e-06 - }, - { - "language": "Dimli", - "code": "diq", - "score": 4.078176516486565e-06 - }, - { - "language": "Chuvash", - "code": "chv", - "score": 4.048466053063748e-06 - }, - { - "language": "Panjabi", - "code": "pan", - "score": 3.940522674383828e-06 - }, - { - "language": "Cornish", - "code": "cor", - "score": 3.940297119697789e-06 - }, - { - "language": "West Low German", - "code": "nds-nl", - "score": 3.926987574232044e-06 - }, - { - "language": "Cherokee", - "code": "chr", - "score": 3.9112833292165305e-06 - }, - { - "language": "Ido", - "code": "ido", - "score": 3.892145286954474e-06 - }, - { - "language": "Friulian", - "code": "fur", - "score": 3.869370175380027e-06 - }, - { - "language": "Ukrainian", - "code": "ukr", - "score": 3.7814761526533403e-06 - }, - { - "language": "Vietnamese", - "code": "vie", - "score": 3.7795757634739857e-06 - }, - { - "language": "Emilian", - "code": "egl", - "score": 3.7286854421836324e-06 - }, - { - "language": "Hungarian", - "code": "hun", - "score": 3.706084498844575e-06 - }, - { - "language": "Haitian Creole", - "code": "hat", - "score": 3.6860656109638512e-06 - }, - { - "language": "Jamaican Patois", - "code": "jam", - "score": 3.6750652725459076e-06 - }, - { - "language": "Turkmen", - "code": "tuk", - "score": 3.6414037367649144e-06 - }, - { - "language": "Gagauz", - "code": "gag", - "score": 3.6310443647380453e-06 - }, - { - "language": "Yakut", - "code": "sah", - "score": 3.611620968513307e-06 - }, - { - "language": "Breton", - "code": "bre", - "score": 3.5204120649723336e-06 - }, - { - "language": "Afrikaans", - "code": "afr", - "score": 3.5164177916158224e-06 - }, - { - "language": "Assamese", - "code": "asm", - "score": 3.5076063795713708e-06 - }, - { - "language": "Crimean Tatar", - "code": "crh", - "score": 3.4974791560671292e-06 - }, - { - "language": "Tswana", - "code": "tsn", - "score": 3.4639840578165604e-06 - }, - { - "language": "Malagasy", - "code": "mlg", - "score": 3.4424308523739455e-06 - }, - { - "language": "Tamil", - "code": "tam", - "score": 3.433554866205668e-06 - }, - { - "language": "Belarusian (Taraschkewiza)", - "code": "be-tarask", - "score": 3.4065565159835387e-06 - }, - { - "language": "Scottish Gaelic", - "code": "gla", - "score": 3.383374632903724e-06 - }, - { - "language": "Latin", - "code": "lat", - "score": 3.299320724181598e-06 - }, - { - "language": "Chavacano", - "code": "cbk", - "score": 3.277132236689795e-06 - }, - { - "language": "Tarantino dialect", - "code": "roa-tara", - "score": 3.2704483601264656e-06 - }, - { - "language": "Modern Greek", - "code": "ell", - "score": 3.2669522624928504e-06 - }, - { - "language": "Ladino", - "code": "lad", - "score": 3.1890219815977616e-06 - }, - { - "language": "Latgalian", - "code": "ltg", - "score": 3.1830948046263075e-06 - }, - { - "language": "Pampanga", - "code": "pam", - "score": 3.1460281206818763e-06 - }, - { - "language": "Tagalog", - "code": "tgl", - "score": 3.100457433902193e-06 - }, - { - "language": "Hebrew", - "code": "heb", - "score": 3.0715009415871464e-06 - }, - { - "language": "Serbo-Croatian", - "code": "hbs", - "score": 3.050950908800587e-06 - }, - { - "language": "Achinese", - "code": "ace", - "score": 3.0138855890982086e-06 - }, - { - "language": "Italian", - "code": "ita", - "score": 3.003329993589432e-06 - }, - { - "language": "English", - "code": "eng", - "score": 2.97778979074792e-06 - }, - { - "language": "Burmese", - "code": "mya", - "score": 2.9546490623033606e-06 - }, - { - "language": "Spanish", - "code": "spa", - "score": 2.9272057417983888e-06 - }, - { - "language": "Papiamento", - "code": "pap", - "score": 2.8780641514458694e-06 - }, - { - "language": "Sardinian", - "code": "srd", - "score": 2.866505383281037e-06 - }, - { - "language": "Esperanto", - "code": "epo", - "score": 2.848199301297427e-06 - }, - { - "language": "Serbian", - "code": "srp", - "score": 2.7479175059852423e-06 - }, - { - "language": "Zeeuws", - "code": "zea", - "score": 2.7430314730736427e-06 - }, - { - "language": "Czech", - "code": "ces", - "score": 2.7409500944486354e-06 - }, - { - "language": "Bengali", - "code": "ben", - "score": 2.6958239232044434e-06 - }, - { - "language": "Erzya", - "code": "myv", - "score": 2.6273187359038275e-06 - }, - { - "language": "Croatian", - "code": "hrv", - "score": 2.6178654479735997e-06 - }, - { - "language": "Buryat", - "code": "bxr", - "score": 2.60430465459649e-06 - }, - { - "language": "Swahili (macrolanguage)", - "code": "swa", - "score": 2.6016373340098653e-06 - }, - { - "language": "Pangasinan", - "code": "pag", - "score": 2.60037768384791e-06 - }, - { - "language": "Xhosa", - "code": "xho", - "score": 2.580123918960453e-06 - }, - { - "language": "Bosnian", - "code": "bos", - "score": 2.5763115445442963e-06 - }, - { - "language": "Low German", - "code": "nds", - "score": 2.5743340756889665e-06 - }, - { - "language": "Kinyarwanda", - "code": "kin", - "score": 2.568235458966228e-06 - }, - { - "language": "Aromanian", - "code": "rup", - "score": 2.520287125662435e-06 - }, - { - "language": "Aragonese", - "code": "arg", - "score": 2.4836215288814856e-06 - }, - { - "language": "Tetum", - "code": "tet", - "score": 2.396502168267034e-06 - }, - { - "language": "Quechua", - "code": "que", - "score": 2.3799134396540467e-06 - }, - { - "language": "Livvi-Karelian", - "code": "olo", - "score": 2.3709426386631094e-06 - }, - { - "language": "Kashubian", - "code": "csb", - "score": 2.358733354412834e-06 - }, - { - "language": "Avar", - "code": "ava", - "score": 2.330698407604359e-06 - }, - { - "language": "Hausa", - "code": "hau", - "score": 2.286114295202424e-06 - }, - { - "language": "Ripuarisch", - "code": "ksh", - "score": 2.254129412904149e-06 - }, - { - "language": "Bulgarian", - "code": "bul", - "score": 2.2492179141408997e-06 - }, - { - "language": "Oriya", - "code": "ori", - "score": 2.1661755909008207e-06 - }, - { - "language": "Interlingue", - "code": "ile", - "score": 2.059975486190524e-06 - }, - { - "language": "Guarani", - "code": "grn", - "score": 2.024690957114217e-06 - }, - { - "language": "Banjar", - "code": "bjn", - "score": 2.0237362150510307e-06 - }, - { - "language": "Thai", - "code": "tha", - "score": 2.01868806470884e-06 - }, - { - "language": "Dutch", - "code": "nld", - "score": 1.9297158360132016e-06 - }, - { - "language": "Kabyle", - "code": "kab", - "score": 1.9132662600895856e-06 - }, - { - "language": "Palatine German", - "code": "pfl", - "score": 1.9122355752188014e-06 - }, - { - "language": "Javanese", - "code": "jav", - "score": 1.8900879013017402e-06 - }, - { - "language": "Banyumasan", - "code": "map-bms", - "score": 1.8552185565567925e-06 - }, - { - "language": "Faroese", - "code": "fao", - "score": 1.8414674514133367e-06 - }, - { - "language": "Scots", - "code": "sco", - "score": 1.818199393710529e-06 - }, - { - "language": "Central Khmer", - "code": "khm", - "score": 1.7993022538576042e-06 - }, - { - "language": "Slovak", - "code": "slk", - "score": 1.7988603531193803e-06 - }, - { - "language": "Belarusian", - "code": "bel", - "score": 1.782583581189101e-06 - }, - { - "language": "Swedish", - "code": "swe", - "score": 1.7702136574371252e-06 - }, - { - "language": "Saterfriesisch", - "code": "stq", - "score": 1.7663436437942437e-06 - }, - { - "language": "Latvian", - "code": "lav", - "score": 1.7178032294395962e-06 - }, - { - "language": "Konkani", - "code": "kok", - "score": 1.690383783170546e-06 - }, - { - "language": "Tuvan", - "code": "tyv", - "score": 1.672853159107035e-06 - }, - { - "language": "Walloon", - "code": "wln", - "score": 1.6722132158975e-06 - }, - { - "language": "Sranan", - "code": "srn", - "score": 1.646132773203135e-06 - }, - { - "language": "Picard", - "code": "pcd", - "score": 1.6385885146519286e-06 - }, - { - "language": "Limburgan", - "code": "lim", - "score": 1.6372666777897393e-06 - }, - { - "language": "French", - "code": "fra", - "score": 1.6239549722740776e-06 - }, - { - "language": "Icelandic", - "code": "isl", - "score": 1.5904075780781568e-06 - }, - { - "language": "Irish", - "code": "gle", - "score": 1.5750525790281245e-06 - }, - { - "language": "Corsican", - "code": "cos", - "score": 1.570832523611898e-06 - }, - { - "language": "Alemannic German", - "code": "als", - "score": 1.5651218063794659e-06 - }, - { - "language": "German", - "code": "deu", - "score": 1.5594737305946182e-06 - }, - { - "language": "Upper Sorbian", - "code": "hsb", - "score": 1.5125158370210556e-06 - }, - { - "language": "Romanian", - "code": "ron", - "score": 1.5119784393391456e-06 - }, - { - "language": "Manx", - "code": "glv", - "score": 1.5035052456369158e-06 - }, - { - "language": "Lingala", - "code": "lin", - "score": 1.493238073635439e-06 - }, - { - "language": "Malay", - "code": "msa", - "score": 1.4067626352698426e-06 - }, - { - "language": "Maltese", - "code": "mlt", - "score": 1.370485165352875e-06 - }, - { - "language": "Luxembourgish", - "code": "ltz", - "score": 1.3397349221122568e-06 - }, - { - "language": "Estonian", - "code": "est", - "score": 1.3280839539220324e-06 - }, - { - "language": "Kabardian", - "code": "kbd", - "score": 1.3062604011793155e-06 - }, - { - "language": "Macedonian", - "code": "mkd", - "score": 1.2802570381609257e-06 - }, - { - "language": "Pennsylvania German", - "code": "pdc", - "score": 1.2550040082714986e-06 - }, - { - "language": "Sundanese", - "code": "sun", - "score": 1.1068191270169336e-06 - }, - { - "language": "Iloko", - "code": "ilo", - "score": 1.0791690101541462e-06 - }, - { - "language": "Karakalpak", - "code": "kaa", - "score": 1.0603262126096524e-06 - }, - { - "language": "Norwegian Nynorsk", - "code": "nno", - "score": 1.0554679192864569e-06 - }, - { - "language": "Yoruba", - "code": "yor", - "score": 1.046297711582156e-06 - }, - { - "language": "Neapolitan", - "code": "nap", - "score": 1.0279602520313347e-06 - }, - { - "language": "Danish", - "code": "dan", - "score": 1.0038916116172913e-06 - }, - { - "language": "Indonesian", - "code": "ind", - "score": 9.83746303973021e-07 - }, - { - "language": "Mirandese", - "code": "mwl", - "score": 8.806521236692788e-07 - }, - { - "language": "Catalan", - "code": "cat", - "score": 8.687447348165733e-07 - }, - { - "language": "Turkish", - "code": "tur", - "score": 8.384120064874878e-07 - }, - { - "language": "Veps", - "code": "vep", - "score": 7.812500371073838e-07 - }, - { - "language": "Bokm\u00e5l", - "code": "nob", - "score": 7.427178161378833e-07 - }, - { - "language": "Shona", - "code": "sna", - "score": 6.660703775196453e-07 - }, - { - "language": "Bavarian", - "code": "bar", - "score": 6.222485353646334e-07 - }, - { - "language": "Uzbek", - "code": "uzb", - "score": 6.021850822435226e-07 - }, - { - "language": "Central Bikol", - "code": "bcl", - "score": 5.77034370508045e-07 - }, - { - "language": "Asturian", - "code": "ast", - "score": 5.743918336520437e-07 - }, - { - "language": "Lombard", - "code": "lmo", - "score": 4.6301857992148143e-07 - }, - { - "language": "Romansh", - "code": "roh", - "score": 4.5534079617937095e-07 - }, - { - "language": "Narom", - "code": "nrm", - "score": 3.6611126574825903e-07 - }, - { - "language": "Northern Sami", - "code": "sme", - "score": 1.0723972820869676e-07 - } - ] -] diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/cpp/detail/for_each.h b/spaces/ma-xu/LIVE/thrust/thrust/system/cpp/detail/for_each.h deleted file mode 100644 index 9ef45dfe62e47d9779c4b60839628efd82c2a5e1..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/cpp/detail/for_each.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// this system inherits for_each -#include - diff --git a/spaces/magicr/BuboGPT/bubogpt/datasets/builders/image_base_dataset_builder.py b/spaces/magicr/BuboGPT/bubogpt/datasets/builders/image_base_dataset_builder.py deleted file mode 100644 index 942ff86bf9ca13af4efee94af5e8b0a481b25353..0000000000000000000000000000000000000000 --- a/spaces/magicr/BuboGPT/bubogpt/datasets/builders/image_base_dataset_builder.py +++ /dev/null @@ -1,238 +0,0 @@ -""" - This file is from - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import logging -import os -import shutil -import warnings - -from omegaconf import OmegaConf -import torch.distributed as dist -from torchvision.datasets.utils import download_url - -import bubogpt.common.utils as utils -from bubogpt.common.dist_utils import is_dist_avail_and_initialized, is_main_process -from bubogpt.common.registry import registry -from bubogpt.processors.base_processor import BaseProcessor - - -class ImageBaseDatasetBuilder: - train_dataset_cls, eval_dataset_cls = None, None - - def __init__(self, cfg=None): - super().__init__() - - if cfg is None: - # help to create datasets from default config. - self.config = load_dataset_config(self.default_config_path()) - elif isinstance(cfg, str): - self.config = load_dataset_config(cfg) - else: - # when called from task.build_dataset() - self.config = cfg - - self.data_type = self.config.data_type - - self.vis_processors = {"train": BaseProcessor(), "eval": BaseProcessor()} - self.text_processors = {"train": BaseProcessor(), "eval": BaseProcessor()} - - def build_datasets(self): - # download, split, etc... - # only called on 1 GPU/TPU in distributed - - if is_main_process(): - self._download_data() - - if is_dist_avail_and_initialized(): - dist.barrier() - - # at this point, all the annotations and image/videos should be all downloaded to the specified locations. - logging.info("Building datasets...") - datasets = self.build() # dataset['train'/'val'/'test'] - - return datasets - - def build_processors(self): - vis_proc_cfg = self.config.get("vis_processor") - txt_proc_cfg = self.config.get("text_processor") - - if vis_proc_cfg is not None: - vis_train_cfg = vis_proc_cfg.get("train") - vis_eval_cfg = vis_proc_cfg.get("eval") - - self.vis_processors["train"] = self._build_proc_from_cfg(vis_train_cfg) - self.vis_processors["eval"] = self._build_proc_from_cfg(vis_eval_cfg) - - if txt_proc_cfg is not None: - txt_train_cfg = txt_proc_cfg.get("train") - txt_eval_cfg = txt_proc_cfg.get("eval") - - self.text_processors["train"] = self._build_proc_from_cfg(txt_train_cfg) - self.text_processors["eval"] = self._build_proc_from_cfg(txt_eval_cfg) - - @staticmethod - def _build_proc_from_cfg(cfg): - return ( - registry.get_processor_class(cfg.name).from_config(cfg) - if cfg is not None - else None - ) - - @classmethod - def default_config_path(cls, type="default"): - if cls.DATASET_CONFIG_DICT[type] is None: - return None - else: - return utils.get_abs_path(cls.DATASET_CONFIG_DICT[type]) - - def _download_data(self): - self._download_ann() - self._download_vis() - - def _download_ann(self): - """ - Download annotation files if necessary. - All the vision-language datasets should have annotations of unified format. - - storage_path can be: - (1) relative/absolute: will be prefixed with env.cache_root to make full path if relative. - (2) basename/dirname: will be suffixed with base name of URL if dirname is provided. - - Local annotation paths should be relative. - """ - anns = self.config.build_info.annotations - - splits = anns.keys() - - cache_root = registry.get_path("cache_root") - - for split in splits: - info = anns[split] - - urls, storage_paths = info.get("url", None), info.storage - - if isinstance(urls, str): - urls = [urls] - if isinstance(storage_paths, str): - storage_paths = [storage_paths] - - assert len(urls) == len(storage_paths) - - for url_or_filename, storage_path in zip(urls, storage_paths): - # if storage_path is relative, make it full by prefixing with cache_root. - if not os.path.isabs(storage_path): - storage_path = os.path.join(cache_root, storage_path) - - dirname = os.path.dirname(storage_path) - if not os.path.exists(dirname): - os.makedirs(dirname) - - if os.path.isfile(url_or_filename): - src, dst = url_or_filename, storage_path - if not os.path.exists(dst): - shutil.copyfile(src=src, dst=dst) - else: - logging.info("Using existing file {}.".format(dst)) - else: - if os.path.isdir(storage_path): - # if only dirname is provided, suffix with basename of URL. - raise ValueError( - "Expecting storage_path to be a file path, got directory {}".format( - storage_path - ) - ) - else: - filename = os.path.basename(storage_path) - - download_url(url=url_or_filename, root=dirname, filename=filename) - - def _download_vis(self): - - storage_path = self.config.build_info.get(self.data_type).storage - storage_path = utils.get_cache_path(storage_path) - - if not os.path.exists(storage_path): - warnings.warn( - f""" - The specified path {storage_path} for visual inputs does not exist. - Please provide a correct path to the visual inputs or - refer to datasets/download_scripts/README.md for downloading instructions. - """ - ) - - def build(self): - """ - Create by split datasets inheriting torch.utils.data.Datasets. - - # build() can be dataset-specific. Overwrite to customize. - """ - self.build_processors() - - build_info = self.config.build_info - - ann_info = build_info.annotations - vis_info = build_info.get(self.data_type) - - datasets = dict() - for split in ann_info.keys(): - if split not in ["train", "val", "test"]: - continue - - is_train = split == "train" - - # processors - vis_processor = ( - self.vis_processors["train"] - if is_train - else self.vis_processors["eval"] - ) - text_processor = ( - self.text_processors["train"] - if is_train - else self.text_processors["eval"] - ) - - # annotation path - ann_paths = ann_info.get(split).storage - if isinstance(ann_paths, str): - ann_paths = [ann_paths] - - abs_ann_paths = [] - for ann_path in ann_paths: - if not os.path.isabs(ann_path): - ann_path = utils.get_cache_path(ann_path) - abs_ann_paths.append(ann_path) - ann_paths = abs_ann_paths - - # visual data storage path - vis_path = os.path.join(vis_info.storage, split) - - if not os.path.isabs(vis_path): - # vis_path = os.path.join(utils.get_cache_path(), vis_path) - vis_path = utils.get_cache_path(vis_path) - - if not os.path.exists(vis_path): - warnings.warn("storage path {} does not exist.".format(vis_path)) - - # create datasets - dataset_cls = self.train_dataset_cls if is_train else self.eval_dataset_cls - datasets[split] = dataset_cls( - vis_processor=vis_processor, - text_processor=text_processor, - ann_paths=ann_paths, - vis_root=vis_path, - ) - - return datasets - - -def load_dataset_config(cfg_path): - cfg = OmegaConf.load(cfg_path).datasets - cfg = cfg[list(cfg.keys())[0]] - - return cfg diff --git a/spaces/magicr/BuboGPT/bubogpt/datasets/builders/image_text_pair_builder.py b/spaces/magicr/BuboGPT/bubogpt/datasets/builders/image_text_pair_builder.py deleted file mode 100644 index 1fa404362f043aad9575aead930e7c72627a8123..0000000000000000000000000000000000000000 --- a/spaces/magicr/BuboGPT/bubogpt/datasets/builders/image_text_pair_builder.py +++ /dev/null @@ -1,189 +0,0 @@ -import os -import logging -import warnings - -from bubogpt.common.registry import registry -from bubogpt.datasets.builders.image_base_dataset_builder import ImageBaseDatasetBuilder -from bubogpt.datasets.datasets.image_caption.laion_dataset import LaionDataset -from bubogpt.datasets.datasets.image_caption.cc_sbu_dataset import CCSBUDataset, \ - CCSBUAlignDatasetImageImageCaptionDataset, CCDataset -from bubogpt.datasets.datasets.image_caption.llava_dataset import LlavaInstruct150Dataset - -@registry.register_builder("cc_sbu") -class CCSBUBuilderImage(ImageBaseDatasetBuilder): - train_dataset_cls = CCSBUDataset - - DATASET_CONFIG_DICT = {"default": "configs/datasets/cc_sbu/defaults.yaml"} - - def _download_ann(self): - pass - - def _download_vis(self): - pass - - def build(self): - self.build_processors() - - build_info = self.config.build_info - - datasets = dict() - split = "train" - - # create datasets - # [NOTE] return inner_datasets (wds.DataPipeline) - dataset_cls = self.train_dataset_cls - datasets[split] = dataset_cls( - vision_processor=self.vis_processors[split], - text_processor=self.text_processors[split], - location=build_info.storage, - ).inner_dataset - - return datasets - - -@registry.register_builder("laion") -class LaionBuilderImage(ImageBaseDatasetBuilder): - train_dataset_cls = LaionDataset - - DATASET_CONFIG_DICT = {"default": "configs/datasets/laion/defaults.yaml"} - - def _download_ann(self): - pass - - def _download_vis(self): - pass - - def build(self): - self.build_processors() - - build_info = self.config.build_info - - datasets = dict() - split = "train" - - # create datasets - # [NOTE] return inner_datasets (wds.DataPipeline) - dataset_cls = self.train_dataset_cls - datasets[split] = dataset_cls( - vision_processor=self.vis_processors[split], - text_processor=self.text_processors[split], - location=build_info.storage, - ).inner_dataset - - return datasets - - -@registry.register_builder("cc_sbu_align") -class CCSBUAlignBuilderImage(ImageBaseDatasetBuilder): - train_dataset_cls = CCSBUAlignDatasetImageImageCaptionDataset - - DATASET_CONFIG_DICT = { - "default": "configs/datasets/cc_sbu/align.yaml", - } - - def build_datasets(self): - # at this point, all the annotations and image/videos should be all downloaded to the specified locations. - logging.info("Building datasets...") - self.build_processors() - - build_info = self.config.build_info - storage_path = build_info.storage - - datasets = dict() - - if not os.path.exists(storage_path): - warnings.warn("storage path {} does not exist.".format(storage_path)) - - # create datasets - dataset_cls = self.train_dataset_cls - datasets['train'] = dataset_cls( - vision_processor=self.vis_processors["train"], - text_processor=self.text_processors["train"], - ann_paths=[os.path.join(storage_path, 'filter_cap.json')], - vis_root=os.path.join(storage_path, 'image'), - ) - - return datasets - - -@registry.register_builder("cc12m") -class CC12MBuilder(ImageBaseDatasetBuilder): - train_dataset_cls = CCDataset - - DATASET_CONFIG_DICT = {"default": "configs/datasets/cc12m/defaults.yaml"} - - def _download_ann(self): - pass - - def _download_vis(self): - pass - - def build(self): - self.build_processors() - - build_info = self.config.build_info - - datasets = dict() - split = "train" - - # create datasets - # [NOTE] return inner_datasets (wds.DataPipeline) - dataset_cls = self.train_dataset_cls - datasets[split] = dataset_cls( - vis_processor=self.vis_processors[split], - text_processor=self.text_processors[split], - location=build_info.storage, - ).inner_dataset - - return datasets - - -@registry.register_builder("llava_instruct150") -class LlavaInstruct150Builder(ImageBaseDatasetBuilder): - train_dataset_cls = LlavaInstruct150Dataset - - DATASET_CONFIG_DICT = {"default": None} - - def _download_ann(self): - pass - - def _download_vis(self): - pass - - - def build(self): - self.build_processors() - - datasets = dict() - split = "train" - dataset_cls = self.train_dataset_cls - datasets[split] = dataset_cls( - vis_processor=self.vis_processors[split], - text_processor=self.text_processors[split], - vis_root="/path/to/dataset/COCO_2014", - ann_paths=[os.path.join("/path/to/dataset/llava/annotations", subset + '.json') - for subset in ["complex_reasoning_77k", "conversation_58k", "detail_23k"]], - ) - return datasets - - -# from bubogpt.datasets.builders.image_text_pair_builder import LlavaInstruct150Builder - -if __name__ == "__main__": - from omegaconf import OmegaConf - from itertools import islice - - data_cfg = OmegaConf.create({ - "vis_processor": {"train": {"name": "imagebind_vision_train", "image_size": 224}}, - "text_processor": {"train": {"name": "imagebind_caption"}}, - "data_type": "image", - }) - - builder = LlavaInstruct150Builder(data_cfg) - - datasets = builder.build_datasets() - - datasets["train"].check_existence() - - for sample in islice(datasets["train"], 10): - print(sample["vision"].shape, sample["prompt"], sample["text_input"]) diff --git a/spaces/magicr/BuboGPT/imagebind/models/multimodal_preprocessors.py b/spaces/magicr/BuboGPT/imagebind/models/multimodal_preprocessors.py deleted file mode 100644 index 0938d6be3e87b37f407a7949b7952655d8e1a083..0000000000000000000000000000000000000000 --- a/spaces/magicr/BuboGPT/imagebind/models/multimodal_preprocessors.py +++ /dev/null @@ -1,698 +0,0 @@ -#!/usr/bin/env python3 -# Portions Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import gzip -import html -import io -import math -from functools import lru_cache -from typing import Callable, List, Optional - -import ftfy - -import numpy as np -import regex as re -import torch -import torch.nn as nn -from iopath.common.file_io import g_pathmgr -from timm.models.layers import trunc_normal_ -from imagebind.models.helper import VerboseNNModule, cast_if_src_dtype - - -def get_sinusoid_encoding_table(n_position, d_hid): - """Sinusoid position encoding table""" - - # TODO: make it with torch instead of numpy - def get_position_angle_vec(position): - return [ - position / np.power(10000, 2 * (hid_j // 2) / d_hid) - for hid_j in range(d_hid) - ] - - sinusoid_table = np.array( - [get_position_angle_vec(pos_i) for pos_i in range(n_position)] - ) - sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i - sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 - - return torch.FloatTensor(sinusoid_table).unsqueeze(0) - - -def interpolate_pos_encoding_2d(target_spatial_size, pos_embed): - N = pos_embed.shape[1] - if N == target_spatial_size: - return pos_embed - dim = pos_embed.shape[-1] - # nn.functional.interpolate doesn't work with bfloat16 so we cast to float32 - pos_embed, updated = cast_if_src_dtype(pos_embed, torch.bfloat16, torch.float32) - pos_embed = nn.functional.interpolate( - pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute( - 0, 3, 1, 2 - ), - scale_factor=math.sqrt(target_spatial_size / N), - mode="bicubic", - ) - if updated: - pos_embed, _ = cast_if_src_dtype(pos_embed, torch.float32, torch.bfloat16) - pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) - return pos_embed - - -def interpolate_pos_encoding( - npatch_per_img, - pos_embed, - patches_layout, - input_shape=None, - first_patch_idx=1, -): - assert first_patch_idx == 0 or first_patch_idx == 1, "there is 1 CLS token or none" - N = pos_embed.shape[1] - first_patch_idx # since it's 1 if cls_token exists - if npatch_per_img == N: - return pos_embed - - assert ( - patches_layout[-1] == patches_layout[-2] - ), "Interpolation of pos embed not supported for non-square layouts" - - class_emb = pos_embed[:, :first_patch_idx] - pos_embed = pos_embed[:, first_patch_idx:] - - if input_shape is None or patches_layout[0] == 1: - # simple 2D pos embedding, no temporal component - pos_embed = interpolate_pos_encoding_2d(npatch_per_img, pos_embed) - elif patches_layout[0] > 1: - # pos embed has a temporal component - assert len(input_shape) == 4, "temporal interpolation not supported" - # we only support 2D interpolation in this case - num_frames = patches_layout[0] - num_spatial_tokens = patches_layout[1] * patches_layout[2] - pos_embed = pos_embed.view(1, num_frames, num_spatial_tokens, -1) - # interpolate embedding for zeroth frame - pos_embed = interpolate_pos_encoding_2d( - npatch_per_img, pos_embed[0, 0, ...].unsqueeze(0) - ) - else: - raise ValueError("This type of interpolation isn't implemented") - - return torch.cat((class_emb, pos_embed), dim=1) - - -def _get_pos_embedding( - npatch_per_img, - pos_embed, - patches_layout, - input_shape, - first_patch_idx=1, -): - pos_embed = interpolate_pos_encoding( - npatch_per_img, - pos_embed, - patches_layout, - input_shape=input_shape, - first_patch_idx=first_patch_idx, - ) - return pos_embed - - -class PatchEmbedGeneric(nn.Module): - """ - PatchEmbed from Hydra - """ - - def __init__(self, proj_stem, norm_layer: Optional[nn.Module] = None): - super().__init__() - - if len(proj_stem) > 1: - self.proj = nn.Sequential(*proj_stem) - else: - # Special case to be able to load pre-trained models that were - # trained with a standard stem - self.proj = proj_stem[0] - self.norm_layer = norm_layer - - def get_patch_layout(self, img_size): - with torch.no_grad(): - dummy_img = torch.zeros( - [ - 1, - ] - + img_size - ) - dummy_out = self.proj(dummy_img) - embed_dim = dummy_out.shape[1] - patches_layout = tuple(dummy_out.shape[2:]) - num_patches = np.prod(patches_layout) - return patches_layout, num_patches, embed_dim - - def forward(self, x): - x = self.proj(x) - # B C (T) H W -> B (T)HW C - x = x.flatten(2).transpose(1, 2) - if self.norm_layer is not None: - x = self.norm_layer(x) - return x - - -class SpatioTemporalPosEmbeddingHelper(VerboseNNModule): - def __init__( - self, - patches_layout: List, - num_patches: int, - num_cls_tokens: int, - embed_dim: int, - learnable: bool, - ) -> None: - super().__init__() - self.num_cls_tokens = num_cls_tokens - self.patches_layout = patches_layout - self.num_patches = num_patches - self.num_tokens = num_cls_tokens + num_patches - self.learnable = learnable - if self.learnable: - self.pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, embed_dim)) - trunc_normal_(self.pos_embed, std=0.02) - else: - self.register_buffer( - "pos_embed", get_sinusoid_encoding_table(self.num_tokens, embed_dim) - ) - - def get_pos_embedding(self, vision_input, all_vision_tokens): - input_shape = vision_input.shape - pos_embed = _get_pos_embedding( - all_vision_tokens.size(1) - self.num_cls_tokens, - pos_embed=self.pos_embed, - patches_layout=self.patches_layout, - input_shape=input_shape, - first_patch_idx=self.num_cls_tokens, - ) - return pos_embed - -class BlipPreprocessor(VerboseNNModule): - def __init__(self) -> None: - super().__init__() - - def forward(self, vision=None): - return_dict = { - "trunk": { - "x": vision, - }, - "head": {}, - } - return return_dict - -class RGBDTPreprocessor(VerboseNNModule): - def __init__( - self, - rgbt_stem: PatchEmbedGeneric, - depth_stem: PatchEmbedGeneric, - img_size: List = (3, 224, 224), - num_cls_tokens: int = 1, - pos_embed_fn: Callable = None, - use_type_embed: bool = False, - init_param_style: str = "openclip", - ) -> None: - super().__init__() - stem = rgbt_stem if rgbt_stem is not None else depth_stem - ( - self.patches_layout, - self.num_patches, - self.embed_dim, - ) = stem.get_patch_layout(img_size) - self.rgbt_stem = rgbt_stem - self.depth_stem = depth_stem - self.use_pos_embed = pos_embed_fn is not None - self.use_type_embed = use_type_embed - self.num_cls_tokens = num_cls_tokens - - if self.use_pos_embed: - self.pos_embedding_helper = pos_embed_fn( - patches_layout=self.patches_layout, - num_cls_tokens=num_cls_tokens, - num_patches=self.num_patches, - embed_dim=self.embed_dim, - ) - if self.num_cls_tokens > 0: - self.cls_token = nn.Parameter( - torch.zeros(1, self.num_cls_tokens, self.embed_dim) - ) - if self.use_type_embed: - self.type_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) - - self.init_parameters(init_param_style) - - @torch.no_grad() - def init_parameters(self, init_param_style): - if init_param_style == "openclip": - # OpenCLIP style initialization - scale = self.embed_dim**-0.5 - if self.use_pos_embed: - nn.init.normal_(self.pos_embedding_helper.pos_embed) - self.pos_embedding_helper.pos_embed *= scale - - if self.num_cls_tokens > 0: - nn.init.normal_(self.cls_token) - self.cls_token *= scale - elif init_param_style == "vit": - self.cls_token.data.fill_(0) - else: - raise ValueError(f"Unknown init {init_param_style}") - - if self.use_type_embed: - nn.init.normal_(self.type_embed) - - def tokenize_input_and_cls_pos(self, input, stem, mask): - # tokens is of shape B x L x D - tokens = stem(input) - assert tokens.ndim == 3 - assert tokens.shape[2] == self.embed_dim - B = tokens.shape[0] - if self.num_cls_tokens > 0: - class_tokens = self.cls_token.expand( - B, -1, -1 - ) # stole class_tokens impl from Phil Wang, thanks - tokens = torch.cat((class_tokens, tokens), dim=1) - if self.use_pos_embed: - pos_embed = self.pos_embedding_helper.get_pos_embedding(input, tokens) - tokens = tokens + pos_embed - if self.use_type_embed: - tokens = tokens + self.type_embed.expand(B, -1, -1) - return tokens - - def forward(self, vision=None, depth=None, patch_mask=None): - if patch_mask is not None: - raise NotImplementedError() - - if vision is not None: - vision_tokens = self.tokenize_input_and_cls_pos( - vision, self.rgbt_stem, patch_mask - ) - - if depth is not None: - depth_tokens = self.tokenize_input_and_cls_pos( - depth, self.depth_stem, patch_mask - ) - - # aggregate tokens - if vision is not None and depth is not None: - final_tokens = vision_tokens + depth_tokens - else: - final_tokens = vision_tokens if vision is not None else depth_tokens - return_dict = { - "trunk": { - "tokens": final_tokens, - }, - "head": {}, - } - return return_dict - - -class AudioPreprocessor(RGBDTPreprocessor): - def __init__(self, audio_stem: PatchEmbedGeneric, **kwargs) -> None: - super().__init__(rgbt_stem=audio_stem, depth_stem=None, **kwargs) - - def forward(self, audio=None): - return super().forward(vision=audio) - - -class ThermalPreprocessor(RGBDTPreprocessor): - def __init__(self, thermal_stem: PatchEmbedGeneric, **kwargs) -> None: - super().__init__(rgbt_stem=thermal_stem, depth_stem=None, **kwargs) - - def forward(self, thermal=None): - return super().forward(vision=thermal) - - -def build_causal_attention_mask(context_length): - # lazily create causal attention mask, with full attention between the vision tokens - # pytorch uses additive attention mask; fill with -inf - mask = torch.empty(context_length, context_length, requires_grad=False) - mask.fill_(float("-inf")) - mask.triu_(1) # zero out the lower diagonal - return mask - - -class TextPreprocessor(VerboseNNModule): - def __init__( - self, - vocab_size: int, - context_length: int, - embed_dim: int, - causal_masking: bool, - supply_seq_len_to_head: bool = True, - num_cls_tokens: int = 0, - init_param_style: str = "openclip", - ) -> None: - super().__init__() - self.vocab_size = vocab_size - self.context_length = context_length - self.token_embedding = nn.Embedding(vocab_size, embed_dim) - self.pos_embed = nn.Parameter( - torch.empty(1, self.context_length + num_cls_tokens, embed_dim) - ) - self.causal_masking = causal_masking - if self.causal_masking: - mask = build_causal_attention_mask(self.context_length) - # register the mask as a buffer so it can be moved to the right device - self.register_buffer("mask", mask) - - self.supply_seq_len_to_head = supply_seq_len_to_head - self.num_cls_tokens = num_cls_tokens - self.embed_dim = embed_dim - if num_cls_tokens > 0: - assert self.causal_masking is False, "Masking + CLS token isn't implemented" - self.cls_token = nn.Parameter( - torch.zeros(1, self.num_cls_tokens, embed_dim) - ) - - self.init_parameters(init_param_style) - - @torch.no_grad() - def init_parameters(self, init_param_style="openclip"): - # OpenCLIP style initialization - nn.init.normal_(self.token_embedding.weight, std=0.02) - nn.init.normal_(self.pos_embed, std=0.01) - - if init_param_style == "openclip": - # OpenCLIP style initialization - scale = self.embed_dim**-0.5 - if self.num_cls_tokens > 0: - nn.init.normal_(self.cls_token) - self.cls_token *= scale - elif init_param_style == "vit": - self.cls_token.data.fill_(0) - else: - raise ValueError(f"Unknown init {init_param_style}") - - def forward(self, text): - # text tokens are of shape B x L x D - text_tokens = self.token_embedding(text) - # concat CLS tokens if any - if self.num_cls_tokens > 0: - B = text_tokens.shape[0] - class_tokens = self.cls_token.expand( - B, -1, -1 - ) # stole class_tokens impl from Phil Wang, thanks - text_tokens = torch.cat((class_tokens, text_tokens), dim=1) - text_tokens = text_tokens + self.pos_embed - return_dict = { - "trunk": { - "tokens": text_tokens, - }, - "head": {}, - } - # Compute sequence length after adding CLS tokens - if self.supply_seq_len_to_head: - text_lengths = text.argmax(dim=-1) - return_dict["head"] = { - "seq_len": text_lengths, - } - if self.causal_masking: - return_dict["trunk"].update({"attn_mask": self.mask}) - return return_dict - - -class Im2Video(nn.Module): - """Convert an image into a trivial video.""" - - def __init__(self, time_dim=2): - super().__init__() - self.time_dim = time_dim - - def forward(self, x): - if x.ndim == 4: - # B, C, H, W -> B, C, T, H, W - return x.unsqueeze(self.time_dim) - elif x.ndim == 5: - return x - else: - raise ValueError(f"Dimension incorrect {x.shape}") - - -class PadIm2Video(Im2Video): - def __init__(self, ntimes, pad_type, time_dim=2): - super().__init__(time_dim=time_dim) - assert ntimes > 0 - assert pad_type in ["zero", "repeat"] - self.ntimes = ntimes - self.pad_type = pad_type - - def forward(self, x): - x = super().forward(x) - if x.shape[self.time_dim] == 1: - if self.pad_type == "repeat": - new_shape = [1] * len(x.shape) - new_shape[self.time_dim] = self.ntimes - x = x.repeat(new_shape) - elif self.pad_type == "zero": - padarg = [0, 0] * len(x.shape) - padarg[2 * self.time_dim + 1] = self.ntimes - x.shape[self.time_dim] - x = nn.functional.pad(x, padarg) - return x - - -# Modified from github.com/openai/CLIP -@lru_cache() -def bytes_to_unicode(): - """ - Returns list of utf-8 byte and a corresponding list of unicode strings. - The reversible bpe codes work on unicode strings. - This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. - When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. - This is a signficant percentage of your normal, say, 32K bpe vocab. - To avoid that, we want lookup tables between utf-8 bytes and unicode strings. - And avoids mapping to whitespace/control characters the bpe code barfs on. - """ - bs = ( - list(range(ord("!"), ord("~") + 1)) - + list(range(ord("¡"), ord("¬") + 1)) - + list(range(ord("®"), ord("ÿ") + 1)) - ) - cs = bs[:] - n = 0 - for b in range(2**8): - if b not in bs: - bs.append(b) - cs.append(2**8 + n) - n += 1 - cs = [chr(n) for n in cs] - return dict(zip(bs, cs)) - - -def get_pairs(word): - """Return set of symbol pairs in a word. - Word is represented as tuple of symbols (symbols being variable-length strings). - """ - pairs = set() - prev_char = word[0] - for char in word[1:]: - pairs.add((prev_char, char)) - prev_char = char - return pairs - - -def basic_clean(text): - text = ftfy.fix_text(text) - text = html.unescape(html.unescape(text)) - return text.strip() - - -def whitespace_clean(text): - text = re.sub(r"\s+", " ", text) - text = text.strip() - return text - - -class SimpleTokenizer(object): - def __init__(self, bpe_path: str, context_length=77): - self.byte_encoder = bytes_to_unicode() - self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} - - with g_pathmgr.open(bpe_path, "rb") as fh: - bpe_bytes = io.BytesIO(fh.read()) - merges = gzip.open(bpe_bytes).read().decode("utf-8").split("\n") - merges = merges[1 : 49152 - 256 - 2 + 1] - merges = [tuple(merge.split()) for merge in merges] - vocab = list(bytes_to_unicode().values()) - vocab = vocab + [v + "" for v in vocab] - for merge in merges: - vocab.append("".join(merge)) - vocab.extend(["<|startoftext|>", "<|endoftext|>"]) - self.encoder = dict(zip(vocab, range(len(vocab)))) - self.decoder = {v: k for k, v in self.encoder.items()} - self.bpe_ranks = dict(zip(merges, range(len(merges)))) - self.cache = { - "<|startoftext|>": "<|startoftext|>", - "<|endoftext|>": "<|endoftext|>", - } - self.pat = re.compile( - r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", - re.IGNORECASE, - ) - self.context_length = context_length - - def bpe(self, token): - if token in self.cache: - return self.cache[token] - word = tuple(token[:-1]) + (token[-1] + "",) - pairs = get_pairs(word) - - if not pairs: - return token + "" - - while True: - bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) - if bigram not in self.bpe_ranks: - break - first, second = bigram - new_word = [] - i = 0 - while i < len(word): - try: - j = word.index(first, i) - new_word.extend(word[i:j]) - i = j - except: - new_word.extend(word[i:]) - break - - if word[i] == first and i < len(word) - 1 and word[i + 1] == second: - new_word.append(first + second) - i += 2 - else: - new_word.append(word[i]) - i += 1 - new_word = tuple(new_word) - word = new_word - if len(word) == 1: - break - else: - pairs = get_pairs(word) - word = " ".join(word) - self.cache[token] = word - return word - - def encode(self, text): - bpe_tokens = [] - text = whitespace_clean(basic_clean(text)).lower() - for token in re.findall(self.pat, text): - token = "".join(self.byte_encoder[b] for b in token.encode("utf-8")) - bpe_tokens.extend( - self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ") - ) - return bpe_tokens - - def decode(self, tokens): - text = "".join([self.decoder[token] for token in tokens]) - text = ( - bytearray([self.byte_decoder[c] for c in text]) - .decode("utf-8", errors="replace") - .replace("", " ") - ) - return text - - def __call__(self, texts, context_length=None): - if not context_length: - context_length = self.context_length - - if isinstance(texts, str): - texts = [texts] - - sot_token = self.encoder["<|startoftext|>"] - eot_token = self.encoder["<|endoftext|>"] - all_tokens = [[sot_token] + self.encode(text) + [eot_token] for text in texts] - result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) - - for i, tokens in enumerate(all_tokens): - tokens = tokens[:context_length] - result[i, : len(tokens)] = torch.tensor(tokens) - - if len(result) == 1: - return result[0] - return result - - -class IMUPreprocessor(VerboseNNModule): - def __init__( - self, - kernel_size: int, - imu_stem: PatchEmbedGeneric, - embed_dim: int, - img_size: List = (6, 2000), - num_cls_tokens: int = 1, - pos_embed_fn: Callable = None, - init_param_style: str = "openclip", - ) -> None: - super().__init__() - stem = imu_stem - self.imu_stem = imu_stem - self.embed_dim = embed_dim - self.use_pos_embed = pos_embed_fn is not None - self.num_cls_tokens = num_cls_tokens - self.kernel_size = kernel_size - self.pos_embed = nn.Parameter( - torch.empty(1, (img_size[1] // kernel_size) + num_cls_tokens, embed_dim) - ) - - if self.num_cls_tokens > 0: - self.cls_token = nn.Parameter( - torch.zeros(1, self.num_cls_tokens, self.embed_dim) - ) - - self.init_parameters(init_param_style) - - @torch.no_grad() - def init_parameters(self, init_param_style): - nn.init.normal_(self.pos_embed, std=0.01) - - if init_param_style == "openclip": - # OpenCLIP style initialization - scale = self.embed_dim**-0.5 - - if self.num_cls_tokens > 0: - nn.init.normal_(self.cls_token) - self.cls_token *= scale - elif init_param_style == "vit": - self.cls_token.data.fill_(0) - else: - raise ValueError(f"Unknown init {init_param_style}") - - def tokenize_input_and_cls_pos(self, input, stem): - # tokens is of shape B x L x D - tokens = stem.norm_layer(stem.proj(input)) - assert tokens.ndim == 3 - assert tokens.shape[2] == self.embed_dim - B = tokens.shape[0] - if self.num_cls_tokens > 0: - class_tokens = self.cls_token.expand( - B, -1, -1 - ) # stole class_tokens impl from Phil Wang, thanks - tokens = torch.cat((class_tokens, tokens), dim=1) - if self.use_pos_embed: - tokens = tokens + self.pos_embed - return tokens - - def forward(self, imu): - # Patchify - imu = imu.unfold( - -1, - self.kernel_size, - self.kernel_size, - ).permute(0, 2, 1, 3) - imu = imu.reshape(imu.size(0), imu.size(1), -1) - - imu_tokens = self.tokenize_input_and_cls_pos( - imu, - self.imu_stem, - ) - - return_dict = { - "trunk": { - "tokens": imu_tokens, - }, - "head": {}, - } - return return_dict \ No newline at end of file diff --git a/spaces/manhkhanhUIT/BOPBTL/Face_Enhancement/data/image_folder.py b/spaces/manhkhanhUIT/BOPBTL/Face_Enhancement/data/image_folder.py deleted file mode 100644 index 7281eb23df59a7337732d5b4622977137fefdbd4..0000000000000000000000000000000000000000 --- a/spaces/manhkhanhUIT/BOPBTL/Face_Enhancement/data/image_folder.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import torch.utils.data as data -from PIL import Image -import os - -IMG_EXTENSIONS = [ - ".jpg", - ".JPG", - ".jpeg", - ".JPEG", - ".png", - ".PNG", - ".ppm", - ".PPM", - ".bmp", - ".BMP", - ".tiff", - ".webp", -] - - -def is_image_file(filename): - return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) - - -def make_dataset_rec(dir, images): - assert os.path.isdir(dir), "%s is not a valid directory" % dir - - for root, dnames, fnames in sorted(os.walk(dir, followlinks=True)): - for fname in fnames: - if is_image_file(fname): - path = os.path.join(root, fname) - images.append(path) - - -def make_dataset(dir, recursive=False, read_cache=False, write_cache=False): - images = [] - - if read_cache: - possible_filelist = os.path.join(dir, "files.list") - if os.path.isfile(possible_filelist): - with open(possible_filelist, "r") as f: - images = f.read().splitlines() - return images - - if recursive: - make_dataset_rec(dir, images) - else: - assert os.path.isdir(dir) or os.path.islink(dir), "%s is not a valid directory" % dir - - for root, dnames, fnames in sorted(os.walk(dir)): - for fname in fnames: - if is_image_file(fname): - path = os.path.join(root, fname) - images.append(path) - - if write_cache: - filelist_cache = os.path.join(dir, "files.list") - with open(filelist_cache, "w") as f: - for path in images: - f.write("%s\n" % path) - print("wrote filelist cache at %s" % filelist_cache) - - return images - - -def default_loader(path): - return Image.open(path).convert("RGB") - - -class ImageFolder(data.Dataset): - def __init__(self, root, transform=None, return_paths=False, loader=default_loader): - imgs = make_dataset(root) - if len(imgs) == 0: - raise ( - RuntimeError( - "Found 0 images in: " + root + "\n" - "Supported image extensions are: " + ",".join(IMG_EXTENSIONS) - ) - ) - - self.root = root - self.imgs = imgs - self.transform = transform - self.return_paths = return_paths - self.loader = loader - - def __getitem__(self, index): - path = self.imgs[index] - img = self.loader(path) - if self.transform is not None: - img = self.transform(img) - if self.return_paths: - return img, path - else: - return img - - def __len__(self): - return len(self.imgs) diff --git a/spaces/masterkram/finance_news_classifier/src/evalutate.py b/spaces/masterkram/finance_news_classifier/src/evalutate.py deleted file mode 100644 index 4a61253d2a94d25fcf6b2dbb92a0ebeaa791c5ba..0000000000000000000000000000000000000000 --- a/spaces/masterkram/finance_news_classifier/src/evalutate.py +++ /dev/null @@ -1,2 +0,0 @@ -def evaluate_model(trainer, data): - results = trainer.predict(data) diff --git a/spaces/matthoffner/starchat-ui/types/chat.ts b/spaces/matthoffner/starchat-ui/types/chat.ts deleted file mode 100644 index 1233f2cbe347464ba4937d7a0272ea533ded116b..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/starchat-ui/types/chat.ts +++ /dev/null @@ -1,26 +0,0 @@ -import { OpenAIModel } from './openai'; - -export interface Message { - role: Role; - content: string; -} - -export type Role = 'assistant' | 'user'; - -export interface ChatBody { - model: OpenAIModel; - messages: Message[]; - key: string; - prompt: string; - temperature: number; -} - -export interface Conversation { - id: string; - name: string; - messages: Message[]; - model: OpenAIModel; - prompt: string; - temperature: number; - folderId: string | null; -} diff --git a/spaces/mayordp/DeepFakeAI/DeepFakeAI/uis/components/face_selector.py b/spaces/mayordp/DeepFakeAI/DeepFakeAI/uis/components/face_selector.py deleted file mode 100644 index b6f4c66e07c46ce0f961acbd99289e421cd4e619..0000000000000000000000000000000000000000 --- a/spaces/mayordp/DeepFakeAI/DeepFakeAI/uis/components/face_selector.py +++ /dev/null @@ -1,133 +0,0 @@ -from typing import List, Optional, Tuple, Any, Dict -from time import sleep - -import cv2 -import gradio - -import DeepFakeAI.choices -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.capturer import get_video_frame -from DeepFakeAI.face_analyser import get_many_faces -from DeepFakeAI.face_reference import clear_face_reference -from DeepFakeAI.typing import Frame, FaceRecognition -from DeepFakeAI.uis import core as ui -from DeepFakeAI.uis.typing import ComponentName, Update -from DeepFakeAI.utilities import is_image, is_video - -FACE_RECOGNITION_DROPDOWN : Optional[gradio.Dropdown] = None -REFERENCE_FACE_POSITION_GALLERY : Optional[gradio.Gallery] = None -REFERENCE_FACE_DISTANCE_SLIDER : Optional[gradio.Slider] = None - - -def render() -> None: - global FACE_RECOGNITION_DROPDOWN - global REFERENCE_FACE_POSITION_GALLERY - global REFERENCE_FACE_DISTANCE_SLIDER - - with gradio.Box(): - reference_face_gallery_args: Dict[str, Any] = { - 'label': wording.get('reference_face_gallery_label'), - 'height': 120, - 'object_fit': 'cover', - 'columns': 10, - 'allow_preview': False, - 'visible': 'reference' in DeepFakeAI.globals.face_recognition - } - if is_image(DeepFakeAI.globals.target_path): - reference_frame = cv2.imread(DeepFakeAI.globals.target_path) - reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame) - if is_video(DeepFakeAI.globals.target_path): - reference_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number) - reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame) - FACE_RECOGNITION_DROPDOWN = gradio.Dropdown( - label = wording.get('face_recognition_dropdown_label'), - choices = DeepFakeAI.choices.face_recognition, - value = DeepFakeAI.globals.face_recognition - ) - REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_args) - REFERENCE_FACE_DISTANCE_SLIDER = gradio.Slider( - label = wording.get('reference_face_distance_slider_label'), - value = DeepFakeAI.globals.reference_face_distance, - maximum = 3, - step = 0.05, - visible = 'reference' in DeepFakeAI.globals.face_recognition - ) - ui.register_component('face_recognition_dropdown', FACE_RECOGNITION_DROPDOWN) - ui.register_component('reference_face_position_gallery', REFERENCE_FACE_POSITION_GALLERY) - ui.register_component('reference_face_distance_slider', REFERENCE_FACE_DISTANCE_SLIDER) - - -def listen() -> None: - FACE_RECOGNITION_DROPDOWN.select(update_face_recognition, inputs = FACE_RECOGNITION_DROPDOWN, outputs = [ REFERENCE_FACE_POSITION_GALLERY, REFERENCE_FACE_DISTANCE_SLIDER ]) - REFERENCE_FACE_POSITION_GALLERY.select(clear_and_update_face_reference_position) - REFERENCE_FACE_DISTANCE_SLIDER.change(update_reference_face_distance, inputs = REFERENCE_FACE_DISTANCE_SLIDER) - update_component_names : List[ComponentName] =\ - [ - 'target_file', - 'preview_frame_slider' - ] - for component_name in update_component_names: - component = ui.get_component(component_name) - if component: - component.change(update_face_reference_position, outputs = REFERENCE_FACE_POSITION_GALLERY) - select_component_names : List[ComponentName] =\ - [ - 'face_analyser_direction_dropdown', - 'face_analyser_age_dropdown', - 'face_analyser_gender_dropdown' - ] - for component_name in select_component_names: - component = ui.get_component(component_name) - if component: - component.select(update_face_reference_position, outputs = REFERENCE_FACE_POSITION_GALLERY) - - -def update_face_recognition(face_recognition : FaceRecognition) -> Tuple[Update, Update]: - if face_recognition == 'reference': - DeepFakeAI.globals.face_recognition = face_recognition - return gradio.update(visible = True), gradio.update(visible = True) - if face_recognition == 'many': - DeepFakeAI.globals.face_recognition = face_recognition - return gradio.update(visible = False), gradio.update(visible = False) - - -def clear_and_update_face_reference_position(event: gradio.SelectData) -> Update: - clear_face_reference() - return update_face_reference_position(event.index) - - -def update_face_reference_position(reference_face_position : int = 0) -> Update: - sleep(0.2) - gallery_frames = [] - DeepFakeAI.globals.reference_face_position = reference_face_position - if is_image(DeepFakeAI.globals.target_path): - reference_frame = cv2.imread(DeepFakeAI.globals.target_path) - gallery_frames = extract_gallery_frames(reference_frame) - if is_video(DeepFakeAI.globals.target_path): - reference_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number) - gallery_frames = extract_gallery_frames(reference_frame) - if gallery_frames: - return gradio.update(value = gallery_frames) - return gradio.update(value = None) - - -def update_reference_face_distance(reference_face_distance : float) -> Update: - DeepFakeAI.globals.reference_face_distance = reference_face_distance - return gradio.update(value = reference_face_distance) - - -def extract_gallery_frames(reference_frame : Frame) -> List[Frame]: - crop_frames = [] - faces = get_many_faces(reference_frame) - for face in faces: - start_x, start_y, end_x, end_y = map(int, face['bbox']) - padding_x = int((end_x - start_x) * 0.25) - padding_y = int((end_y - start_y) * 0.25) - start_x = max(0, start_x - padding_x) - start_y = max(0, start_y - padding_y) - end_x = max(0, end_x + padding_x) - end_y = max(0, end_y + padding_y) - crop_frame = reference_frame[start_y:end_y, start_x:end_x] - crop_frames.append(ui.normalize_frame(crop_frame)) - return crop_frames diff --git a/spaces/merle/PROTEIN_GENERATOR/utils/model/se3_transformer/data_loading/data_module.py b/spaces/merle/PROTEIN_GENERATOR/utils/model/se3_transformer/data_loading/data_module.py deleted file mode 100644 index 1047d41d16b81c05e2f14130311b42ab5b36eb36..0000000000000000000000000000000000000000 --- a/spaces/merle/PROTEIN_GENERATOR/utils/model/se3_transformer/data_loading/data_module.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. -# -# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES -# SPDX-License-Identifier: MIT - -import torch.distributed as dist -from abc import ABC -from torch.utils.data import DataLoader, DistributedSampler, Dataset - -from se3_transformer.runtime.utils import get_local_rank - - -def _get_dataloader(dataset: Dataset, shuffle: bool, **kwargs) -> DataLoader: - # Classic or distributed dataloader depending on the context - sampler = DistributedSampler(dataset, shuffle=shuffle) if dist.is_initialized() else None - return DataLoader(dataset, shuffle=(shuffle and sampler is None), sampler=sampler, **kwargs) - - -class DataModule(ABC): - """ Abstract DataModule. Children must define self.ds_{train | val | test}. """ - - def __init__(self, **dataloader_kwargs): - super().__init__() - if get_local_rank() == 0: - self.prepare_data() - - # Wait until rank zero has prepared the data (download, preprocessing, ...) - if dist.is_initialized(): - dist.barrier(device_ids=[get_local_rank()]) - - self.dataloader_kwargs = {'pin_memory': True, 'persistent_workers': True, **dataloader_kwargs} - self.ds_train, self.ds_val, self.ds_test = None, None, None - - def prepare_data(self): - """ Method called only once per node. Put here any downloading or preprocessing """ - pass - - def train_dataloader(self) -> DataLoader: - return _get_dataloader(self.ds_train, shuffle=True, **self.dataloader_kwargs) - - def val_dataloader(self) -> DataLoader: - return _get_dataloader(self.ds_val, shuffle=False, **self.dataloader_kwargs) - - def test_dataloader(self) -> DataLoader: - return _get_dataloader(self.ds_test, shuffle=False, **self.dataloader_kwargs) diff --git a/spaces/merve/data-leak/public/private-and-fair/top-bot-digits.js b/spaces/merve/data-leak/public/private-and-fair/top-bot-digits.js deleted file mode 100644 index bc2f85ec8cb3b5544245f159aa62ff2fbffbcbb5..0000000000000000000000000000000000000000 --- a/spaces/merve/data-leak/public/private-and-fair/top-bot-digits.js +++ /dev/null @@ -1,66 +0,0 @@ - -!(async function(){ - await util.getFile(`cns-cache/mnist_train_raw_3.npy`) - var digitMetadata = await util.getFile('mnist_train.csv') - var {byLabel} = util.decorateDigitMetadata(digitMetadata) - - var sel = d3.select('.top-bot-digits').html('') - .at({role: 'graphics-document', 'aria-label': `The twenty-five MNIST 3 digits most and least senstive to higher and lower privacy. The digits most sensitive to higher privacy are much more poorly drawn than the onces least sensitive to higher privacy.`}) - - var digitSel = sel.append('div') - var buttonSel = sel.append('div.digit-button-container') - .appendMany('div.button', d3.range(10)) - .text(d => d) - .on('click', d => drawClass(byLabel[d])) - - drawClass(byLabel[3]) - - async function drawClass(digitClass){ - buttonSel.classed('active', d => d == digitClass.key) - await util.getFile(`cns-cache/mnist_train_raw_${digitClass.key}.npy`) - - var nRows = 5 - var nCols = 5 - - var bot = _.sortBy(digitClass, d => +d.priv_order).slice(0, nRows*nCols) - var top = _.sortBy(digitClass, d => -d.priv_order).slice(0, nRows*nCols) - - digitSel.html('').append('div') - .st({maxWidth: 640, margin: '0 auto'}) - .appendMany('div', [bot, top]) - .st({display: 'inline-block'}) - .each(drawDigitBlock) - - - function drawDigitBlock(digits, isBot){ - var s = 2 - - var sel = d3.select(this).append('div') - - var c = d3.conventions({ - sel, - width: s*29*nCols, - height: s*29*nRows, - layers: 'cs', - margin: {top: 30, bottom: 10, right: 10, left: 10} - }) - - var ctx = c.layers[0] - - digits.forEach((d, i) => { - util.drawDigit( - ctx, - +d.i, - s, - (i % nCols)*s*29, - Math.floor(i/nCols)*s*29 - ) - }) - - c.svg.append('text') - .text(isBot ? 'Least sensitive to higher privacy' : 'Most sensitive to higher privacy') - .at({dy: '-.4em', textAnchor: 'middle', x: c.width/2, fontWeight: 600, fontSize: 14}) - } - } - -})() \ No newline at end of file diff --git a/spaces/merve/hidden-bias/server-side/fill-in-the-blank/node/gender-over-time.js b/spaces/merve/hidden-bias/server-side/fill-in-the-blank/node/gender-over-time.js deleted file mode 100644 index fcfe45855289fd6bf5143b5803d661cc1548d8d0..0000000000000000000000000000000000000000 --- a/spaces/merve/hidden-bias/server-side/fill-in-the-blank/node/gender-over-time.js +++ /dev/null @@ -1,212 +0,0 @@ -import ss from 'scrape-stl' -var {d3, jp, fs, io, _} = ss - -import npyjs from './npy.js' -import getSentenceEmbed from './get-sentence-embed.js' -import pLimit from 'p-limit' - -import { URL } from 'url' -var __dirname = new URL('.', import.meta.url).pathname - -var datadir = __dirname + '../../source/fill-in-the-blank/data/' - - -var outpath = __dirname + '/../../../1wheel/gender-over-time/gender-over-time.json' -// var outpath = __dirname + '/cache/gender-over-time.json' -var cacheSentences = io.readDataSync(outpath) -// var cacheSentences = [] - -var limit1 = pLimit(1) -var promises = [ - 'In $year [he|she] worked as a _.', - // 'In $year [they|she] worked as a _.', - // 'In $year [they|he] worked as a _.', - 'In $year [he|she] studied _.', - // 'In $year [they|she] studied _.', - // 'In $year [they|he] studied _.', - 'Born in $year [his|her] name was _.', - // 'Born in $year [their|her] name was _.', - // 'Born in $year [their|he] name was _.', - 'In $year [he|she] was _.', - 'In $year [he|she] was really _.', - 'In $year [he|she] was so _.', - 'In $year [he|she] named the dog _.', - 'In $year [he|she] named the cat _.', - 'In $year [he|she] hired a _.', - 'In $year, [he|she] joined the high school _ team', - "Things weren't like they used to be. In $year, [he|she] joined the high school _ team.", - // 'In $year [he|she] invented a _.', - 'In $year [his|her] favorite band was _.', - 'In $year [his|her] favorite movie was _.', - 'In $year [his|her] favorite book was _.', - 'In $year [he|she] loved to read about _.', - 'In $year [he|she] fixed a _.', - 'In $year [he|she] bought a _.', - 'In $year [he|she] traveled to _.', - 'In $year [he|she] went to a _.', - 'In $year [he|she] lived in a _.', - 'In $year [he|she] _ a bear.', - 'In $year [he|she] _.', - 'In $year [he|she] was arrested for _.', - 'In $year [he|she] adopted a _.', - // 'In $year [he|she] took care of a _.', - 'In $year [he|she] took care of the _.', - // [ - // 'In $year he took care of his _.', - // 'In $year she took care of her _.', - // ], - // 'In $year [he|she] took _ care of the baby.', - // 'In $year [he|she] loved to eat _.', - // 'In $year [he|she] ate a _.', - 'In $year [he|she] mostly ate _.', - // 'In $year [he|she] cooked a _.', - 'In $year [he|she] played _.', - // 'In $year [he|she] wore a _.', - // 'In $year [he|she] wore _.', - 'In $year [he|she] wore a pair of _.', - 'In $year [he|she] wore a _ to a party.', - 'In $year, [he|she] looked very fashionable wearing _.', - 'In $year [he|she] _ at the party.', - 'In $year [he|she] would _ for fun.', - // 'In $year [he|she] was the best _.', - // 'In $year [he|she] was good at _.', - 'In $year [he|she] was bad at _.', - 'In $year [his|her] favorite color was _.', - 'In $year [he|she] was one of the best _ in the world.', - // '[He|She] worked as a _ in $year', - // '[He|She] studied _ in $year', - // 'Born in $year [He|She] was named _.', - // 'It was $year and [he|she] loved to _.', - // [ - // 'In $year he loved his _.', - // 'In $year she loved her _.', - // ], - // [ - // 'In $year he traved to his _.', - // 'In $year she traved to her _.', - // ], - // [ - // 'In $year he traved with his _.', - // 'In $year she traved with her _.', - // ], - [ - 'In $year he married his _.', - 'In $year she married her _.', - ], - // [ - // 'In $year he helped his _.', - // 'In $year she helped her _.', - // ], - // [ - // 'In $year he loved to play with his _.', - // 'In $year she loved to play with her _.', - // ], - // [ - // 'In $year his favorite toy was his _.', - // 'In $year her favorite toy was her _.', - // ], - // [ - // "In $year the girl's favorite toy was her _.", - // "In $year the boy's favorite toy was his _.", - // ], - [ - 'In $year his favorite toy was the _.', - 'In $year her favorite toy was the _.', - - ], - // [ - // 'In $year he named his dog _.', - // 'In $year she named her dog _.', - // ], - // [ - // 'In $year he named his baby _.', - // 'In $year she named her baby _.', - // ], - // [ - // 'In $year he named his kid _.', - // 'In $year she named her kid _.', - // ], - -].slice(0, 1000).map(d => limit1(() => parseSentence(d))) - -var sentences = await Promise.all(promises) - - -io.writeDataSync(outpath, sentences) - -async function parseSentence(sentence){ - var m = cacheSentences.find(d => d.sentence + '' == sentence + '') - if (m){ - return m - } - console.log(sentence + '') - - if (sentence.length == 2){ - var s0 = sentence[0].replace('_', '[MASK]') - var s1 = sentence[1].replace('_', '[MASK]') - } else { - var start = sentence.split('[')[0] - var end = sentence.split(']')[1] - var [t0, t1] = sentence.split('[')[1].split(']')[0].split('|') - var s0 = (start + t0 + end).replace('_', '[MASK]') - var s1 = (start + t1 + end).replace('_', '[MASK]') - } - - async function fetchYear(year){ - var e0 = await getSentenceEmbed('embed', s0.replace('$year', year)) - var e1 = await getSentenceEmbed('embed', s1.replace('$year', year)) - - return {year, e0, e1} - } - - var limit = pLimit(10) - var promises = d3.range(1850, 2040, 1).map(d => limit(() => fetchYear(d))) - var years = await Promise.all(promises) - - - var vocab = io.readDataSync(datadir + 'processed_vocab.json') - - var token2index = Object.fromEntries(vocab.map((d, i) => [d, i])) - - var tidy = [] - years.forEach(({year, e0, e1}) => { - e0.forEach((v0, i) => { - var v1 = e1[i] - var dif = v0 - v1 - tidy.push({year, i, v0, v1, dif}) - }) - }) - - // tidy = [{i: 0, v0: .123, v1: .838}, {i: 0, v0: 322, v1: 144}, ...] - var byToken = jp.nestBy(tidy, d => d.i) - byToken.forEach(d => { - d.mean0 = d3.mean(d, d => d.v0) - d.mean1 = d3.mean(d, d => d.v1) - }) - - _.sortBy(byToken, d => -d.mean0).forEach((d, i) => d.i0 = i) - _.sortBy(byToken, d => -d.mean1).forEach((d, i) => d.i1 = i) - - var topTokens = _.sortBy(byToken, d => Math.min(d.i0, d.i1)).slice(0, 150) - - topTokens.forEach(d => { - // printTop(d.index) - delete d.v0 - delete d.v1 - delete d.i0 - delete d.i1 - d.index = +d.key - }) - - function printTop(index){ - // console.log(' ') - // console.log(vocab[index]) - byToken.filter(d => d.index == index)[0].forEach(({year, dif}) => { - console.log({year, dif}) - }) - } - - return {sentence, t0, t1, topTokens} -} - - diff --git a/spaces/merve/hidden-bias/source/third_party/topojson-client.js b/spaces/merve/hidden-bias/source/third_party/topojson-client.js deleted file mode 100644 index 728070f185d11aa72b3f78ab88037275614fe89b..0000000000000000000000000000000000000000 --- a/spaces/merve/hidden-bias/source/third_party/topojson-client.js +++ /dev/null @@ -1,2 +0,0 @@ -// https://github.com/topojson/topojson-client v3.0.1 Copyright 2019 Mike Bostock -!function(e,r){"object"==typeof exports&&"undefined"!=typeof module?r(exports):"function"==typeof define&&define.amd?define(["exports"],r):r((e=e||self).topojson=e.topojson||{})}(this,function(e){"use strict";function r(e){return e}function t(e){if(null==e)return r;var t,n,o=e.scale[0],a=e.scale[1],i=e.translate[0],c=e.translate[1];return function(e,r){r||(t=n=0);var u=2,f=e.length,s=new Array(f);for(s[0]=(t+=e[0])*o+i,s[1]=(n+=e[1])*a+c;ui&&(i=e[0]),e[1]c&&(c=e[1])}function f(e){switch(e.type){case"GeometryCollection":e.geometries.forEach(f);break;case"Point":u(e.coordinates);break;case"MultiPoint":e.coordinates.forEach(u)}}for(r in e.arcs.forEach(function(e){for(var r,t=-1,u=e.length;++ti&&(i=r[0]),r[1]c&&(c=r[1])}),e.objects)f(e.objects[r]);return[o,a,i,c]}function o(e,r){var t=r.id,n=r.bbox,o=null==r.properties?{}:r.properties,i=a(e,r);return null==t&&null==n?{type:"Feature",properties:o,geometry:i}:null==n?{type:"Feature",id:t,properties:o,geometry:i}:{type:"Feature",id:t,bbox:n,properties:o,geometry:i}}function a(e,r){var n=t(e.transform),o=e.arcs;function a(e,r){r.length&&r.pop();for(var t=o[e<0?~e:e],a=0,i=t.length;a1)n=function(e,r,t){var n,o=[],a=[];function i(e){var r=e<0?~e:e;(a[r]||(a[r]=[])).push({i:e,g:n})}function c(e){e.forEach(i)}function u(e){e.forEach(c)}return function e(r){switch(n=r,r.type){case"GeometryCollection":r.geometries.forEach(e);break;case"LineString":c(r.arcs);break;case"MultiLineString":case"Polygon":u(r.arcs);break;case"MultiPolygon":!function(e){e.forEach(u)}(r.arcs)}}(r),a.forEach(null==t?function(e){o.push(e[0].i)}:function(e){t(e[0].g,e[e.length-1].g)&&o.push(e[0].i)}),o}(0,r,t);else for(o=0,n=new Array(a=e.arcs.length);o1)for(var a,c,f=1,s=u(o[0]);fs&&(c=o[0],o[0]=o[f],o[f]=c,s=a);return o}).filter(function(e){return e.length>0})}}function f(e,r){for(var t=0,n=e.length;t>>1;e[o]=2))throw new Error("n must be ≥2");var t,o=(u=e.bbox||n(e))[0],a=u[1],i=u[2],c=u[3];r={scale:[i-o?(i-o)/(t-1):1,c-a?(c-a)/(t-1):1],translate:[o,a]}}var u,f,l=s(r),h=e.objects,p={};function g(e){return l(e)}function y(e){var r;switch(e.type){case"GeometryCollection":r={type:"GeometryCollection",geometries:e.geometries.map(y)};break;case"Point":r={type:"Point",coordinates:g(e.coordinates)};break;case"MultiPoint":r={type:"MultiPoint",coordinates:e.coordinates.map(g)};break;default:return e}return null!=e.id&&(r.id=e.id),null!=e.bbox&&(r.bbox=e.bbox),null!=e.properties&&(r.properties=e.properties),r}for(f in h)p[f]=y(h[f]);return{type:"Topology",bbox:u,transform:r,objects:p,arcs:e.arcs.map(function(e){var r,t=0,n=1,o=e.length,a=new Array(o);for(a[0]=l(e[0],0);++t - - -Let's take a look at a game of soccer. - - -
        - -

        - -Using the position of each player as training data, we can teach a model to predict which team would get to a loose ball first at each spot on the field, indicated by the color of the pixel. - -
        - -It updates in real-time—drag the players around to see the model change. - -

        - -This model reveals quite a lot about the data used to train it. Even without the actual positions of the players, it is simple to see where players might be. - -
        - -Click this button to move the players - -Take a guess at where the yellow team's goalie is now, then check their actual position. How close were you? - -

        Sensitive Salary Data

        - -In this specific soccer example, being able to make educated guesses about the data a model was trained on doesn't matter too much. But what if our data points represent something more sensitive? - -
        - -We’ve fed the same numbers into the model, but now they represent salary data instead of soccer data. Building models like this is a common technique to [detect discrimination](https://www.eeoc.gov/laws/guidance/section-10-compensation-discrimination#c.%20Using%20More%20Sophisticated%20Statistical%20Techniques%20to%20Evaluate). A union might test if a company is paying men and women fairly by building a salary model that takes into account years of experience. They can then [publish](https://postguild.org/2019-pay-study/) the results to bring pressure for change or show improvement. - -In this hypothetical salary study, even though no individual salaries have been published, it is easy to infer the salary of the newest male hire. And carefully cross referencing public start dates on LinkedIn with the model could almost perfectly reveal everyone's salary. - -Because the model here is so flexible (there are hundreds of square patches with independently calculated predictions) and we have so few data points (just 22 people), it is able to "memorize" individual data points. If we're looking to share information about patterns in salaries, a simpler and more constrained model like a linear regression might be more appropriate. - -
        - -By boiling down the 22 data points to two lines we're able to see broad trends without being able to guess anyone's salary. - -

        Subtle Leaks

        - -Removing complexity isn't a complete solution though. Depending on how the data is distributed, even a simple line can inadvertently reveal information. - -
        - -In this company, almost all the men started several years ago, so the slope of the line is especially sensitive to the salary of the new hire. - -Is their salary higher or lower than average? Based on the line, we can make a pretty good guess. - -Notice that changing the salary of someone with a more common tenure barely moves the line. In general, more typical data points are less susceptible to being leaked. This sets up a tricky trade off: we want models to learn about edge cases while being sure they haven't memorized individual data points. - -

        Real World Data

        - -Models of real world data are often quite complex—this can improve accuracy, but makes them [more susceptible](https://blog.tensorflow.org/2020/06/introducing-new-privacy-testing-library.html) to unexpectedly leaking information. Medical models have inadvertently revealed [patients' genetic markers](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4827719/). Language models have memorized [credit card numbers](https://bair.berkeley.edu/blog/2019/08/13/memorization/). Faces can even be [reconstructed](https://rist.tech.cornell.edu/papers/mi-ccs.pdf) from image models: - -
        - -[Fredrikson et al](https://rist.tech.cornell.edu/papers/mi-ccs.pdf) were able to extract the image on the left by repeatedly querying a facial recognition API. It isn't an exact match with the individual's actual face (on the right), but this attack only required access to the model's predictions, not its internal state. - -

        Protecting Private Data

        - -Training models with [differential privacy](http://www.cleverhans.io/privacy/2018/04/29/privacy-and-machine-learning.html) stops the training data from leaking by limiting how much the model can learn from any one data point. Differentially private models are still at the cutting edge of research, but they're being packaged into [machine learning frameworks](https://blog.tensorflow.org/2019/03/introducing-tensorflow-privacy-learning.html), making them much easier to use. When it isn't possible to train differentially private models, there are also tools that can [measure](https://github.com/tensorflow/privacy/tree/master/tensorflow_privacy/privacy/membership_inference_attack) how much data is the model memorizing. Also, standard techniques such as aggregation and limiting how much data a single source can contribute are still useful and usually improve the privacy of the model. - -As we saw in the [Collecting Sensitive Information Explorable](https://pair.withgoogle.com/explorables/anonymization/), adding enough random noise with differential privacy to protect outliers like the new hire can increase the amount of data required to reach a good level of accuracy. Depending on the application, the constraints of differential privacy could even improve the model—for instance, not learning too much from one data point can help prevent [overfitting](https://openreview.net/forum?id=r1xyx3R9tQ). - -Given the increasing utility of machine learning models for many real-world tasks, it’s clear that more and more systems, devices and apps will be powered, to some extent, by machine learning in the future. While [standard privacy best practices](https://owasp.org/www-project-top-ten/) developed for non-machine learning systems still apply to those with machine learning, the introduction of machine learning introduces new challenges, including the ability of the model to memorize some specific training data points and thus be vulnerable to privacy attacks that seek to extract this data from the model. Fortunately, techniques such as differential privacy exist that can be helpful in overcoming this specific challenge. Just as with other areas of [Responsible AI](https://ai.google/responsibilities/responsible-ai-practices/), it’s important to be aware of these new challenges that come along with machine learning and what steps can be taken to mitigate them. - - -

        Credits

        - -Adam Pearce and Ellen Jiang // December 2020 - -Thanks to Andreas Terzis, Ben Wedin, Carey Radebaugh, David Weinberger, Emily Reif, Fernanda Viégas, Hal Abelson, Kristen Olson, Martin Wattenberg, Michael Terry, Miguel Guevara, Thomas Steinke, Yannick Assogba, Zan Armstrong and our other colleagues at Google for their help with this piece. - - -

        More Explorables

        - -

        - - - - - - - - - \ No newline at end of file diff --git a/spaces/metricspace/OcTra/nnet/monotonic_align/__init__.py b/spaces/metricspace/OcTra/nnet/monotonic_align/__init__.py deleted file mode 100644 index 46c9183c2de2eb23bf51d6ae4d129ec13782dc3d..0000000000000000000000000000000000000000 --- a/spaces/metricspace/OcTra/nnet/monotonic_align/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# import numpy as np -# import torch -# from .core import maximum_path_c - - -# def maximum_path(neg_cent, mask): -# """ Cython optimized version. -# neg_cent: [b, t_t, t_s] -# mask: [b, t_t, t_s] -# """ -# device = neg_cent.device -# dtype = neg_cent.dtype -# neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) -# path = np.zeros(neg_cent.shape, dtype=np.int32) - -# t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) -# t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) -# maximum_path_c(path, neg_cent, t_t_max, t_s_max) -# return torch.from_numpy(path).to(device=device, dtype=dtype) diff --git a/spaces/metroidmen/face-restoration-Tencent/style.css b/spaces/metroidmen/face-restoration-Tencent/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/metroidmen/face-restoration-Tencent/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/mfrashad/CharacterGAN/netdissect/edit.html b/spaces/mfrashad/CharacterGAN/netdissect/edit.html deleted file mode 100644 index 9aac30bb08171c4c58eb936f9ba382e85a184803..0000000000000000000000000000000000000000 --- a/spaces/mfrashad/CharacterGAN/netdissect/edit.html +++ /dev/null @@ -1,805 +0,0 @@ - - - - - - - - - - - - - -
        - - - - - - - - -
        -
        -

        - - -

        -
        - - -
        - - -
        - -
        -
        - -
        - -
        - -
        {{urec.layer}} {{urec.unit}} -
        -
        - -
        -
        -
        - -
        - -
        - -
        - - - - - -
        - -
        -

        Seeds to generate

        -

        -To transfer activations from one pixel to another (1) click on a source pixel -on the left image and (2) click on a target pixel on a right image, -then (3) choose a set of units to insert in the palette.

        -
        -
        #{{ ex.id }}
        -
        -
        - -
        - -
        - -
        - - - - diff --git a/spaces/michaljunczyk/pl-asr-bigos-workspace/app.py b/spaces/michaljunczyk/pl-asr-bigos-workspace/app.py deleted file mode 100644 index 2d25708103dea60c0087ff32d7ff8f5d7c6b4ee7..0000000000000000000000000000000000000000 --- a/spaces/michaljunczyk/pl-asr-bigos-workspace/app.py +++ /dev/null @@ -1,420 +0,0 @@ -import gradio as gr -import whisper -import numpy as np -import openai -import os -from gtts import gTTS -import json -import hashlib -import random -import string -import uuid -from datetime import date,datetime -from huggingface_hub import Repository, upload_file -import shutil -from helpers import dict_origin, dict_promptset - -HF_TOKEN_WRITE = os.environ.get("HF_TOKEN_WRITE") -print("HF_TOKEN_WRITE", HF_TOKEN_WRITE) -today = date.today() -today_ymd = today.strftime("%Y%m%d") - -def greet(name): - return "Hello " + name + "!!" - -with open('app.css','r') as f: - css_file = f.read() - -markdown=""" -# Polish ASR BIGOS workspace -""" - -# TODO move to config -WORKING_DATASET_REPO_URL = "https://huggingface.co/datasets/goodmike31/working-db" -REPO_NAME = "goodmike31/working-db" -REPOSITORY_DIR = "data" -LOCAL_DIR = "data_local" -os.makedirs(LOCAL_DIR,exist_ok=True) - -def dump_json(thing,file): - with open(file,'w+',encoding="utf8") as f: - json.dump(thing,f) - -def get_unique_name(): - return ''.join([random.choice(string.ascii_letters - + string.digits) for n in range(32)]) - -def get_prompts(project_name, size, language_code,prompts_left_info): - print(f"Retrieving prompts for project {project_name} with method: {type} for language_code {language_code} of size {size}") - size = int(size) - promptset = dict_promptset[project_name][0:size] - prompts_left_info = size - return(promptset, promptset[0],prompts_left_info) - -def save_recording_and_meta(project_name, recording, prompt_text, language_code, spk_name, spk_age, spk_accent, spk_city, spk_gender, spk_nativity, promptset, prompt_number, prompts_left_info): - #, name, age, gender): - # TODO save user data in the next version - - current_prompt = prompt_text.strip() - print("current_prompt: ", current_prompt) - - # check if prompt number is set - if prompt_number == None: - prompt_number = 1 - prompt_index = prompt_number - 1 - print("prompt_number: ", prompt_number) - - print("promptset: ", promptset) - if prompt_number == len(promptset): - next_prompt = "All prompts recorded. Thank you! You can close the app now:)" - else: - next_prompt = promptset[prompt_number] - - print("next_prompt: ", next_prompt) - - # remove leading and trailing spaces - next_prompt =next_prompt.strip() - - # increment prompt number - prompt_number = prompt_number + 1 - - speaker_metadata={} - speaker_metadata['name'] = spk_name if spk_name != None else 'unknown' - speaker_metadata['gender'] = spk_gender if spk_gender != None else 'unknown' - speaker_metadata['age'] = spk_age if spk_age != None else 'unknown' - speaker_metadata['accent'] = spk_accent if spk_accent != None else 'unknown' - speaker_metadata['city'] = spk_city if spk_city != None else 'unknown' - speaker_metadata['nativity'] = spk_nativity if spk_nativity != None else 'unknown' - - # TODO get ISO-693-1 codes - - - SAVE_ROOT_DIR = os.path.join(LOCAL_DIR, project_name, today_ymd, spk_name) - - SAVE_DIR_AUDIO = os.path.join(SAVE_ROOT_DIR, "audio") - SAVE_DIR_META = os.path.join(SAVE_ROOT_DIR, "meta") - os.makedirs(SAVE_DIR_AUDIO, exist_ok=True) - os.makedirs(SAVE_DIR_META, exist_ok=True) - - # Write audio to file - #audio_name = get_unique_name() - - uuid_name = str(uuid.uuid4()) - audio_fn = uuid_name + ".wav" - audio_output_fp = os.path.join(SAVE_DIR_AUDIO, audio_fn) - - print (f"Saving {recording} as {audio_output_fp}") - shutil.copy2(recording, audio_output_fp) - - # Write metadata.json to file - meta_fn = uuid_name + '.metadata.jsonl' - json_file_path = os.path.join(SAVE_DIR_META, meta_fn) - - now = datetime.now() - timestamp_str = now.strftime("%d/%m/%Y %H:%M:%S") - metadata= {'id':uuid_name, - 'audio_file': audio_fn, - 'language_code':language_code, - 'prompt_number':prompt_number, - 'prompt':current_prompt, - 'name': speaker_metadata['name'], - 'age': speaker_metadata['age'], - 'gender': speaker_metadata['gender'], - 'accent': speaker_metadata['accent'], - 'nativity': speaker_metadata['nativity'], - 'city': speaker_metadata['city'], - "date":today_ymd, - "timestamp": timestamp_str } - - dump_json(metadata, json_file_path) - - # Simply upload the audio file and metadata using the hub's upload_file - # Upload the audio - repo_audio_path = os.path.join(REPOSITORY_DIR, project_name, today_ymd, spk_name, "audio", audio_fn) - - _ = upload_file(path_or_fileobj = audio_output_fp, - path_in_repo = repo_audio_path, - repo_id = REPO_NAME, - repo_type = 'dataset', - token = HF_TOKEN_WRITE - ) - - # Upload the metadata - repo_json_path = os.path.join(REPOSITORY_DIR, project_name, today_ymd, spk_name, "meta", meta_fn) - _ = upload_file(path_or_fileobj = json_file_path, - path_in_repo = repo_json_path, - repo_id = REPO_NAME, - repo_type = 'dataset', - token = HF_TOKEN_WRITE - ) - - output = print(f"Recording {audio_fn} and meta file {meta_fn} successfully saved to repo!") - - prompts_left_info = prompts_left_info - 1 - - # check if this is the last prompt - - - return [next_prompt, prompt_number, None, prompts_left_info] - - -def whisper_model_change(radio_whisper_model): - whisper_model = whisper.load_model(radio_whisper_model) - return(whisper_model) - -def prompt_gpt_assistant(input_text, api_key, temperature): - #, role, template_prompt, template_answer): - #TODO add option to specify instruction - openai.api_key = api_key - - #TODO add specific message for specific role - system_role_message="You are a helpful assistant" - - messages = [ - {"role": "system", "content": system_role_message}] - - if input_text: - messages.append( - {"role": "user", "content": input_text}, - ) - - chat_completion = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=messages, - temperature=temperature - ) - - reply = chat_completion.choices[0].message.content - #TODO save chat completion for future reuse - return reply - -def voicebot_pipeline(audio): - asr_out = transcribe(audio) - gpt_out = prompt_gpt_assistant(asr_out) - tts_out = synthesize_speech(gpt_out) - return(tts_out) - -def transcribe(audio, language_code, whisper_model, whisper_model_type): - if not whisper_model: - whisper_model=init_whisper_model(whisper_model_type) - - print(f"Transcribing {audio} for language_code {language_code} and model {whisper_model_type}") - audio = whisper.load_audio(audio) - audio = whisper.pad_or_trim(audio) - - mel = whisper.log_mel_spectrogram(audio) - - options = whisper.DecodingOptions(language=language_code, without_timestamps=True, fp16=False) - result = whisper.decode(whisper_model, mel, options) - result_text = result.text - return result_text - -def init_whisper_model(whisper_model_type): - print("Initializing whisper model") - print(whisper_model_type) - whisper_model = whisper.load_model(whisper_model_type) - return whisper_model - -def synthesize_speech(text, language_code): - audioobj = gTTS(text = text, - lang = language_code, - slow = False) - - audioobj.save("Temp.mp3") - return("Temp.mp3") - -block = gr.Blocks(css=css_file) -with block: - - #state variables - project_name = gr.State("voicebot") # voicebot is default for playground. For recording app, it is selected e.g. bridge - - language_code = gr.State("pl") - prompts_type = gr.State() - promptset = gr.State("test.prompts.txt") - prompt_history = gr.State() - current_prompt = gr.State() - prompt_number = gr.State() - finished_recording = gr.State() - - - temperature = gr.State(0) - whisper_model_type = gr.State("base") - whisper_model = gr.State() - openai_api_key = gr.State() - google_api_key = gr.State() - azure_api_key = gr.State() - - spk_age = gr.State("unknown") - spk_accent = gr.State("unknown") - spk_city = gr.State("unknown") - spk_gender = gr.State("unknown") - spk_nativity = gr.State("unknown") - spk_name = gr.State("unknown") - cities = sorted(dict_origin["Poland"]["cities"]) - - - # state handling functions - def change_project(choice): - print("Changing project to") - print(choice) - project=choice - return(project) - - def change_prompts_type(choice): - print("Changing promptset type to") - print(choice) - prompts_type=choice - return(prompts_type) - - def change_nativity(choice): - print("Changing speaker nativity to") - print(choice) - spk_nativity=choice - return(spk_nativity) - - def change_accent(choice): - print("Changing speaker accent to") - print(choice) - spk_accent=choice - return(spk_accent) - - def change_age(choice): - print("Changing speaker age to") - print(choice) - spk_age=choice - return(spk_age) - - def change_city(choice): - print("Changing speaker city to") - print(choice) - spk_city=choice - return(spk_city) - - def change_gender(choice): - print("Changing speaker gender to") - print(choice) - spk_gender=choice - return(spk_gender) - - def change_language(choice): - if choice == "Polish": - language_code="pl" - print("Switching to Polish") - print("language_code") - print(language_code) - elif choice == "English": - language_code="en" - print("Switching to English") - print("language_code") - print(language_code) - return(language_code) - - def change_whisper_model(choice): - whisper_model_type = choice - print("Switching Whisper model") - print(whisper_model_type) - whisper_model = init_whisper_model(whisper_model_type) - return [whisper_model_type, whisper_model] - - def change_prompts_left(prompts_left, current_prompt, promptset_size): - prompts_left = promptset_size - current_prompt - return [prompts_left] - - gr.Markdown(markdown) - - with gr.Tabs(): - """with gr.TabItem('General settings'): - radio_lang = gr.Radio(["Polish", "English"], label="Language", info="If none is selected, Polish is used") - radio_asr_type = gr.Radio(["Local", "Cloud"], label="Select ASR type", info="Cloud models are faster and more accurate, but costs money") - with gr.Accordion(label="Local ASR settings", open=False): - #radio_asr_type = gr.Radio(["Local", "Cloud"], label="Select ASR type", info="Cloud models are faster and more accurate, but costs money") - #radio_cloud_asr = gr.Radio(["Whisper", "Google", "Azure"], label="Select Cloud ASR provider", info="You need to provide API keys for specific service") - radio_whisper_model = gr.Radio(["tiny", "base", "small", "medium", "large"], label="Whisper ASR model (local)", info="Larger models are more accurate, but slower. Default - base") - with gr.Accordion(label="Cloud ASR settings", open=False): - radio_cloud_asr = gr.Radio(["Whisper", "Google", "Azure"], label="Select Cloud ASR provider", info="You need to provide API keys for specific service") - with gr.Accordion(label="Cloud API Keys",open=False): - gr.HTML("

        Open AI API Key:

        ") - # API key textbox (password-style) - openai_api_key = gr.Textbox(label="", elem_id="pw") - gr.HTML("

        Google Cloud API Key:

        ") - # API key textbox (password-style) - google_api_key = gr.Textbox(label="", elem_id="pw") - gr.HTML("

        Azure Cloud API Key:

        ") - # API key textbox (password-style) - azure_api_key = gr.Textbox(label="", elem_id="pw") - with gr.Accordion(label="Chat GPT settings",open=False): - slider_temp = gr.Slider(minimum=0, maximum= 2, step=0.2, label="ChatGPT temperature") - """ - with gr.TabItem('Speaker information'): - with gr.Row(): - spk_name = gr.Textbox(placeholder="Your name", label="Name") - dropdown_spk_nativity = gr.Dropdown(["Polish", "Other"], label="Native language", info="") - dropdown_spk_gender = gr.Dropdown(["Male", "Female", "Other", "Prefer not to say"], label="Gender", info="") - dropdown_spk_age = gr.Dropdown(["under 20", "20-29", "30-39", "40-49", "50-59", "over 60"], label="Age", info="") - dropdown_spk_origin_city = gr.Dropdown(cities, label="Hometown", visible=True, info="Closest city to speaker's place of birth and upbringing") - #radio_gdpr_consent = gr.Radio(["Yes", "No"], label="Personal data processing consent", info="Do you agree for your personal data processing according to the policy (link)") - dropdown_spk_nativity.change(fn=change_nativity, inputs=dropdown_spk_nativity, outputs=spk_age) - dropdown_spk_gender.change(fn=change_gender, inputs=dropdown_spk_gender, outputs=spk_gender) - dropdown_spk_age.change(fn=change_age, inputs=dropdown_spk_age, outputs=spk_age) - dropdown_spk_origin_city.change(fn=change_city, inputs=dropdown_spk_origin_city, outputs=spk_city) - - """with gr.TabItem('Voicebot playground'): - mic_recording = gr.Audio(source="microphone", type="filepath", label='Record your voice') - with gr.Row(): - button_transcribe = gr.Button("Transcribe speech") - - button_save_audio_and_trans = gr.Button("Save audio recording and transcription") - - out_asr = gr.Textbox(placeholder="ASR output", - lines=2, - max_lines=5, - show_label=False) - - with gr.Row(): - button_prompt_gpt = gr.Button("Prompt ChatGPT") - button_save_gpt_response = gr.Button("Save ChatGPT response") - - out_gpt = gr.Textbox(placeholder="ChatGPT output", - lines=4, - max_lines=10, - show_label=False) - with gr.Row(): - button_synth_speech = gr.Button("Synthesize speech") - button_save_synth_audio = gr.Button("Save synthetic audio") - - synth_recording = gr.Audio() - - # Events actions - button_save_audio_and_trans.click(save_recording_and_meta, inputs=[project_name, mic_recording, out_asr, language_code, spk_age, spk_accent, spk_city, spk_gender, spk_nativity], outputs=[]) - button_transcribe.click(transcribe, inputs=[mic_recording, language_code, whisper_model,whisper_model_type], outputs=out_asr) - button_prompt_gpt.click(prompt "dates":["20230922"], - "speakers":["Test"]_gpt_assistant, inputs=[out_asr, openai_api_key, slider_temp], outputs=out_gpt) - button_synth_speech.click(synthesize_speech, inputs=[out_gpt, language_code], outputs=synth_recording) - - radio_lang.change(fn=change_language, inputs=radio_lang, outputs=language_code) - radio_whisper_model.change(fn=change_whisper_model, inputs=radio_whisper_model, outputs=[whisper_model_type, whisper_model]) - """ - with gr.TabItem('Speech recordings app'): - - with gr.Accordion(label="Project settings"): - radio_project = gr.Dropdown(["bridge"], label="Select project", info="") - #radio_promptset_type = gr.Radio(["New promptset generation", "Existing promptset use"], label="Language", value ="Existing promptset use", info="New promptset is generated using. Requires providing open AI key in general settings tab") - var_promptset_size = gr.Textbox(label="How many recordings do you intend to make? (max 200)") - button_get_prompts = gr.Button("Save settings and get first prompt!") - - prompts_left_info = gr.Number(placeholder='',label="Recordings left",lines=1, max_lines=1, show_label=True, interactive=False) - prompt_text = gr.Textbox(placeholder='Prompt to read during recording',label="Prompt to read") - speech_recording = gr.Audio(source="microphone",label="Select 'record from microphone' and read the prompt displayed above", type="filepath") - - radio_project.change(fn=change_project, inputs=radio_project, outputs=project_name) - #radio_promptset_type.change(fn=change_prompts_type, inputs=radio_promptset_type, outputs=prompts_type) - #prompts_left.change(change_prompts_left, inputs = [prompts_left, current_prompt, var_promptset_size], outputs = [prompts_left]) - - button_save_and_next = gr.Button("Save recording and get the next prompt") - # TODO - add option to generate new promptset on the fly for new projects - button_get_prompts.click(get_prompts, inputs=[radio_project, var_promptset_size, language_code, prompts_left_info], outputs = [promptset, prompt_text, prompts_left_info]) - - button_save_and_next.click(save_recording_and_meta, inputs=[project_name, speech_recording, prompt_text, language_code, spk_name, spk_age, spk_accent, spk_city, spk_gender, spk_nativity, promptset, prompt_number, prompts_left_info], outputs=[prompt_text, prompt_number, speech_recording,prompts_left_info]) - -block.launch() \ No newline at end of file diff --git a/spaces/mikeee/gradio-deepl/okteto-up.bat b/spaces/mikeee/gradio-deepl/okteto-up.bat deleted file mode 100644 index a526b6673916dedd25656140b4e1b762f1eeb9a0..0000000000000000000000000000000000000000 --- a/spaces/mikeee/gradio-deepl/okteto-up.bat +++ /dev/null @@ -1 +0,0 @@ -okteto up diff --git a/spaces/milai-tk/clip-human-action-img2txt/README.md b/spaces/milai-tk/clip-human-action-img2txt/README.md deleted file mode 100644 index 229c003a34177860b498805998247448676f9da3..0000000000000000000000000000000000000000 --- a/spaces/milai-tk/clip-human-action-img2txt/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Clip Human Action Img2txt -emoji: 📊 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.0.20 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mingyuan/MotionDiffuse/datasets/evaluator_models.py b/spaces/mingyuan/MotionDiffuse/datasets/evaluator_models.py deleted file mode 100644 index 3177738d3f029a65fb4b26538d607d95fb1c84b7..0000000000000000000000000000000000000000 --- a/spaces/mingyuan/MotionDiffuse/datasets/evaluator_models.py +++ /dev/null @@ -1,438 +0,0 @@ -import torch -import torch.nn as nn -import numpy as np -import time -import math -from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence -# from networks.layers import * -import torch.nn.functional as F - - -class ContrastiveLoss(torch.nn.Module): - """ - Contrastive loss function. - Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf - """ - def __init__(self, margin=3.0): - super(ContrastiveLoss, self).__init__() - self.margin = margin - - def forward(self, output1, output2, label): - euclidean_distance = F.pairwise_distance(output1, output2, keepdim=True) - loss_contrastive = torch.mean((1-label) * torch.pow(euclidean_distance, 2) + - (label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2)) - return loss_contrastive - - -def init_weight(m): - if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose1d): - nn.init.xavier_normal_(m.weight) - # m.bias.data.fill_(0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - -def reparameterize(mu, logvar): - s_var = logvar.mul(0.5).exp_() - eps = s_var.data.new(s_var.size()).normal_() - return eps.mul(s_var).add_(mu) - - -# batch_size, dimension and position -# output: (batch_size, dim) -def positional_encoding(batch_size, dim, pos): - assert batch_size == pos.shape[0] - positions_enc = np.array([ - [pos[j] / np.power(10000, (i-i%2)/dim) for i in range(dim)] - for j in range(batch_size) - ], dtype=np.float32) - positions_enc[:, 0::2] = np.sin(positions_enc[:, 0::2]) - positions_enc[:, 1::2] = np.cos(positions_enc[:, 1::2]) - return torch.from_numpy(positions_enc).float() - - -def get_padding_mask(batch_size, seq_len, cap_lens): - cap_lens = cap_lens.data.tolist() - mask_2d = torch.ones((batch_size, seq_len, seq_len), dtype=torch.float32) - for i, cap_len in enumerate(cap_lens): - mask_2d[i, :, :cap_len] = 0 - return mask_2d.bool(), 1 - mask_2d[:, :, 0].clone() - - -class PositionalEncoding(nn.Module): - - def __init__(self, d_model, max_len=300): - super(PositionalEncoding, self).__init__() - - pe = torch.zeros(max_len, d_model) - position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) - div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) - pe[:, 0::2] = torch.sin(position * div_term) - pe[:, 1::2] = torch.cos(position * div_term) - # pe = pe.unsqueeze(0).transpose(0, 1) - self.register_buffer('pe', pe) - - def forward(self, pos): - return self.pe[pos] - - -class MovementConvEncoder(nn.Module): - def __init__(self, input_size, hidden_size, output_size): - super(MovementConvEncoder, self).__init__() - self.main = nn.Sequential( - nn.Conv1d(input_size, hidden_size, 4, 2, 1), - nn.Dropout(0.2, inplace=True), - nn.LeakyReLU(0.2, inplace=True), - nn.Conv1d(hidden_size, output_size, 4, 2, 1), - nn.Dropout(0.2, inplace=True), - nn.LeakyReLU(0.2, inplace=True), - ) - self.out_net = nn.Linear(output_size, output_size) - self.main.apply(init_weight) - self.out_net.apply(init_weight) - - def forward(self, inputs): - inputs = inputs.permute(0, 2, 1) - outputs = self.main(inputs).permute(0, 2, 1) - # print(outputs.shape) - return self.out_net(outputs) - - -class MovementConvDecoder(nn.Module): - def __init__(self, input_size, hidden_size, output_size): - super(MovementConvDecoder, self).__init__() - self.main = nn.Sequential( - nn.ConvTranspose1d(input_size, hidden_size, 4, 2, 1), - # nn.Dropout(0.2, inplace=True), - nn.LeakyReLU(0.2, inplace=True), - nn.ConvTranspose1d(hidden_size, output_size, 4, 2, 1), - # nn.Dropout(0.2, inplace=True), - nn.LeakyReLU(0.2, inplace=True), - ) - self.out_net = nn.Linear(output_size, output_size) - - self.main.apply(init_weight) - self.out_net.apply(init_weight) - - def forward(self, inputs): - inputs = inputs.permute(0, 2, 1) - outputs = self.main(inputs).permute(0, 2, 1) - return self.out_net(outputs) - - -class TextVAEDecoder(nn.Module): - def __init__(self, text_size, input_size, output_size, hidden_size, n_layers): - super(TextVAEDecoder, self).__init__() - self.input_size = input_size - self.output_size = output_size - self.hidden_size = hidden_size - self.n_layers = n_layers - self.emb = nn.Sequential( - nn.Linear(input_size, hidden_size), - nn.LayerNorm(hidden_size), - nn.LeakyReLU(0.2, inplace=True)) - - self.z2init = nn.Linear(text_size, hidden_size * n_layers) - self.gru = nn.ModuleList([nn.GRUCell(hidden_size, hidden_size) for i in range(self.n_layers)]) - self.positional_encoder = PositionalEncoding(hidden_size) - - - self.output = nn.Sequential( - nn.Linear(hidden_size, hidden_size), - nn.LayerNorm(hidden_size), - nn.LeakyReLU(0.2, inplace=True), - nn.Linear(hidden_size, output_size) - ) - - # - # self.output = nn.Sequential( - # nn.Linear(hidden_size, hidden_size), - # nn.LayerNorm(hidden_size), - # nn.LeakyReLU(0.2, inplace=True), - # nn.Linear(hidden_size, output_size-4) - # ) - - # self.contact_net = nn.Sequential( - # nn.Linear(output_size-4, 64), - # nn.LayerNorm(64), - # nn.LeakyReLU(0.2, inplace=True), - # nn.Linear(64, 4) - # ) - - self.output.apply(init_weight) - self.emb.apply(init_weight) - self.z2init.apply(init_weight) - # self.contact_net.apply(init_weight) - - def get_init_hidden(self, latent): - hidden = self.z2init(latent) - hidden = torch.split(hidden, self.hidden_size, dim=-1) - return list(hidden) - - def forward(self, inputs, last_pred, hidden, p): - h_in = self.emb(inputs) - pos_enc = self.positional_encoder(p).to(inputs.device).detach() - h_in = h_in + pos_enc - for i in range(self.n_layers): - # print(h_in.shape) - hidden[i] = self.gru[i](h_in, hidden[i]) - h_in = hidden[i] - pose_pred = self.output(h_in) - # pose_pred = self.output(h_in) + last_pred.detach() - # contact = self.contact_net(pose_pred) - # return torch.cat([pose_pred, contact], dim=-1), hidden - return pose_pred, hidden - - -class TextDecoder(nn.Module): - def __init__(self, text_size, input_size, output_size, hidden_size, n_layers): - super(TextDecoder, self).__init__() - self.input_size = input_size - self.output_size = output_size - self.hidden_size = hidden_size - self.n_layers = n_layers - self.emb = nn.Sequential( - nn.Linear(input_size, hidden_size), - nn.LayerNorm(hidden_size), - nn.LeakyReLU(0.2, inplace=True)) - - self.gru = nn.ModuleList([nn.GRUCell(hidden_size, hidden_size) for i in range(self.n_layers)]) - self.z2init = nn.Linear(text_size, hidden_size * n_layers) - self.positional_encoder = PositionalEncoding(hidden_size) - - self.mu_net = nn.Linear(hidden_size, output_size) - self.logvar_net = nn.Linear(hidden_size, output_size) - - self.emb.apply(init_weight) - self.z2init.apply(init_weight) - self.mu_net.apply(init_weight) - self.logvar_net.apply(init_weight) - - def get_init_hidden(self, latent): - - hidden = self.z2init(latent) - hidden = torch.split(hidden, self.hidden_size, dim=-1) - - return list(hidden) - - def forward(self, inputs, hidden, p): - # print(inputs.shape) - x_in = self.emb(inputs) - pos_enc = self.positional_encoder(p).to(inputs.device).detach() - x_in = x_in + pos_enc - - for i in range(self.n_layers): - hidden[i] = self.gru[i](x_in, hidden[i]) - h_in = hidden[i] - mu = self.mu_net(h_in) - logvar = self.logvar_net(h_in) - z = reparameterize(mu, logvar) - return z, mu, logvar, hidden - -class AttLayer(nn.Module): - def __init__(self, query_dim, key_dim, value_dim): - super(AttLayer, self).__init__() - self.W_q = nn.Linear(query_dim, value_dim) - self.W_k = nn.Linear(key_dim, value_dim, bias=False) - self.W_v = nn.Linear(key_dim, value_dim) - - self.softmax = nn.Softmax(dim=1) - self.dim = value_dim - - self.W_q.apply(init_weight) - self.W_k.apply(init_weight) - self.W_v.apply(init_weight) - - def forward(self, query, key_mat): - ''' - query (batch, query_dim) - key (batch, seq_len, key_dim) - ''' - # print(query.shape) - query_vec = self.W_q(query).unsqueeze(-1) # (batch, value_dim, 1) - val_set = self.W_v(key_mat) # (batch, seq_len, value_dim) - key_set = self.W_k(key_mat) # (batch, seq_len, value_dim) - - weights = torch.matmul(key_set, query_vec) / np.sqrt(self.dim) - - co_weights = self.softmax(weights) # (batch, seq_len, 1) - values = val_set * co_weights # (batch, seq_len, value_dim) - pred = values.sum(dim=1) # (batch, value_dim) - return pred, co_weights - - def short_cut(self, querys, keys): - return self.W_q(querys), self.W_k(keys) - - -class TextEncoderBiGRU(nn.Module): - def __init__(self, word_size, pos_size, hidden_size, device): - super(TextEncoderBiGRU, self).__init__() - self.device = device - - self.pos_emb = nn.Linear(pos_size, word_size) - self.input_emb = nn.Linear(word_size, hidden_size) - self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True) - # self.linear2 = nn.Linear(hidden_size, output_size) - - self.input_emb.apply(init_weight) - self.pos_emb.apply(init_weight) - # self.linear2.apply(init_weight) - # self.batch_size = batch_size - self.hidden_size = hidden_size - self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True)) - - # input(batch_size, seq_len, dim) - def forward(self, word_embs, pos_onehot, cap_lens): - num_samples = word_embs.shape[0] - - pos_embs = self.pos_emb(pos_onehot) - inputs = word_embs + pos_embs - input_embs = self.input_emb(inputs) - hidden = self.hidden.repeat(1, num_samples, 1) - - cap_lens = cap_lens.data.tolist() - emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True) - - gru_seq, gru_last = self.gru(emb, hidden) - - gru_last = torch.cat([gru_last[0], gru_last[1]], dim=-1) - gru_seq = pad_packed_sequence(gru_seq, batch_first=True)[0] - forward_seq = gru_seq[..., :self.hidden_size] - backward_seq = gru_seq[..., self.hidden_size:].clone() - - # Concate the forward and backward word embeddings - for i, length in enumerate(cap_lens): - backward_seq[i:i+1, :length] = torch.flip(backward_seq[i:i+1, :length].clone(), dims=[1]) - gru_seq = torch.cat([forward_seq, backward_seq], dim=-1) - - return gru_seq, gru_last - - -class TextEncoderBiGRUCo(nn.Module): - def __init__(self, word_size, pos_size, hidden_size, output_size, device): - super(TextEncoderBiGRUCo, self).__init__() - self.device = device - - self.pos_emb = nn.Linear(pos_size, word_size) - self.input_emb = nn.Linear(word_size, hidden_size) - self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True) - self.output_net = nn.Sequential( - nn.Linear(hidden_size * 2, hidden_size), - nn.LayerNorm(hidden_size), - nn.LeakyReLU(0.2, inplace=True), - nn.Linear(hidden_size, output_size) - ) - - self.input_emb.apply(init_weight) - self.pos_emb.apply(init_weight) - self.output_net.apply(init_weight) - # self.linear2.apply(init_weight) - # self.batch_size = batch_size - self.hidden_size = hidden_size - self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True)) - - # input(batch_size, seq_len, dim) - def forward(self, word_embs, pos_onehot, cap_lens): - num_samples = word_embs.shape[0] - - pos_embs = self.pos_emb(pos_onehot) - inputs = word_embs + pos_embs - input_embs = self.input_emb(inputs) - hidden = self.hidden.repeat(1, num_samples, 1) - - cap_lens = cap_lens.data.tolist() - emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True) - - gru_seq, gru_last = self.gru(emb, hidden) - - gru_last = torch.cat([gru_last[0], gru_last[1]], dim=-1) - - return self.output_net(gru_last) - - -class MotionEncoderBiGRUCo(nn.Module): - def __init__(self, input_size, hidden_size, output_size, device): - super(MotionEncoderBiGRUCo, self).__init__() - self.device = device - - self.input_emb = nn.Linear(input_size, hidden_size) - self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True) - self.output_net = nn.Sequential( - nn.Linear(hidden_size*2, hidden_size), - nn.LayerNorm(hidden_size), - nn.LeakyReLU(0.2, inplace=True), - nn.Linear(hidden_size, output_size) - ) - - self.input_emb.apply(init_weight) - self.output_net.apply(init_weight) - self.hidden_size = hidden_size - self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True)) - - # input(batch_size, seq_len, dim) - def forward(self, inputs, m_lens): - num_samples = inputs.shape[0] - - input_embs = self.input_emb(inputs) - hidden = self.hidden.repeat(1, num_samples, 1) - - cap_lens = m_lens.data.tolist() - emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True) - - gru_seq, gru_last = self.gru(emb, hidden) - - gru_last = torch.cat([gru_last[0], gru_last[1]], dim=-1) - - return self.output_net(gru_last) - - -class MotionLenEstimatorBiGRU(nn.Module): - def __init__(self, word_size, pos_size, hidden_size, output_size): - super(MotionLenEstimatorBiGRU, self).__init__() - - self.pos_emb = nn.Linear(pos_size, word_size) - self.input_emb = nn.Linear(word_size, hidden_size) - self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True) - nd = 512 - self.output = nn.Sequential( - nn.Linear(hidden_size*2, nd), - nn.LayerNorm(nd), - nn.LeakyReLU(0.2, inplace=True), - - nn.Linear(nd, nd // 2), - nn.LayerNorm(nd // 2), - nn.LeakyReLU(0.2, inplace=True), - - nn.Linear(nd // 2, nd // 4), - nn.LayerNorm(nd // 4), - nn.LeakyReLU(0.2, inplace=True), - - nn.Linear(nd // 4, output_size) - ) - # self.linear2 = nn.Linear(hidden_size, output_size) - - self.input_emb.apply(init_weight) - self.pos_emb.apply(init_weight) - self.output.apply(init_weight) - # self.linear2.apply(init_weight) - # self.batch_size = batch_size - self.hidden_size = hidden_size - self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True)) - - # input(batch_size, seq_len, dim) - def forward(self, word_embs, pos_onehot, cap_lens): - num_samples = word_embs.shape[0] - - pos_embs = self.pos_emb(pos_onehot) - inputs = word_embs + pos_embs - input_embs = self.input_emb(inputs) - hidden = self.hidden.repeat(1, num_samples, 1) - - cap_lens = cap_lens.data.tolist() - emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True) - - gru_seq, gru_last = self.gru(emb, hidden) - - gru_last = torch.cat([gru_last[0], gru_last[1]], dim=-1) - - return self.output(gru_last) diff --git a/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/modeling/heads/__init__.py b/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/modeling/heads/__init__.py deleted file mode 100644 index 52db7cce67b1686f7cab3698f15b8f309c897918..0000000000000000000000000000000000000000 --- a/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/modeling/heads/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Copyright (c) Meta Platforms, Inc. All Rights Reserved \ No newline at end of file diff --git a/spaces/monra/freegpt-webui/server/backend.py b/spaces/monra/freegpt-webui/server/backend.py deleted file mode 100644 index fd45b94d916512059e4d1f7850b63de6f9da6320..0000000000000000000000000000000000000000 --- a/spaces/monra/freegpt-webui/server/backend.py +++ /dev/null @@ -1,176 +0,0 @@ -import re -from datetime import datetime -from g4f import ChatCompletion -from flask import request, Response, stream_with_context -from requests import get -from server.config import special_instructions - - -class Backend_Api: - def __init__(self, bp, config: dict) -> None: - """ - Initialize the Backend_Api class. - :param app: Flask application instance - :param config: Configuration dictionary - """ - self.bp = bp - self.routes = { - '/backend-api/v2/conversation': { - 'function': self._conversation, - 'methods': ['POST'] - } - } - - def _conversation(self): - """ - Handles the conversation route. - - :return: Response object containing the generated conversation stream - """ - conversation_id = request.json['conversation_id'] - - try: - jailbreak = request.json['jailbreak'] - model = request.json['model'] - messages = build_messages(jailbreak) - - # Generate response - response = ChatCompletion.create( - model=model, - chatId=conversation_id, - messages=messages - ) - - return Response(stream_with_context(generate_stream(response, jailbreak)), mimetype='text/event-stream') - - except Exception as e: - print(e) - print(e.__traceback__.tb_next) - - return { - '_action': '_ask', - 'success': False, - "error": f"an error occurred {str(e)}" - }, 400 - - -def build_messages(jailbreak): - """ - Build the messages for the conversation. - - :param jailbreak: Jailbreak instruction string - :return: List of messages for the conversation - """ - _conversation = request.json['meta']['content']['conversation'] - internet_access = request.json['meta']['content']['internet_access'] - prompt = request.json['meta']['content']['parts'][0] - - # Add the existing conversation - conversation = _conversation - - # Add web results if enabled - if internet_access: - current_date = datetime.now().strftime("%Y-%m-%d") - query = f'Current date: {current_date}. ' + prompt["content"] - search_results = fetch_search_results(query) - conversation.extend(search_results) - - # Add jailbreak instructions if enabled - if jailbreak_instructions := getJailbreak(jailbreak): - conversation.extend(jailbreak_instructions) - - # Add the prompt - conversation.append(prompt) - - # Reduce conversation size to avoid API Token quantity error - if len(conversation) > 3: - conversation = conversation[-4:] - - return conversation - - -def fetch_search_results(query): - """ - Fetch search results for a given query. - - :param query: Search query string - :return: List of search results - """ - search = get('https://ddg-api.herokuapp.com/search', - params={ - 'query': query, - 'limit': 3, - }) - - snippets = "" - for index, result in enumerate(search.json()): - snippet = f'[{index + 1}] "{result["snippet"]}" URL:{result["link"]}.' - snippets += snippet - - response = "Here are some updated web searches. Use this to improve user response:" - response += snippets - - return [{'role': 'system', 'content': response}] - - -def generate_stream(response, jailbreak): - """ - Generate the conversation stream. - - :param response: Response object from ChatCompletion.create - :param jailbreak: Jailbreak instruction string - :return: Generator object yielding messages in the conversation - """ - if getJailbreak(jailbreak): - response_jailbreak = '' - jailbroken_checked = False - for message in response: - response_jailbreak += message - if jailbroken_checked: - yield message - else: - if response_jailbroken_success(response_jailbreak): - jailbroken_checked = True - if response_jailbroken_failed(response_jailbreak): - yield response_jailbreak - jailbroken_checked = True - else: - yield from response - - -def response_jailbroken_success(response: str) -> bool: - """Check if the response has been jailbroken. - - :param response: Response string - :return: Boolean indicating if the response has been jailbroken - """ - act_match = re.search(r'ACT:', response, flags=re.DOTALL) - return bool(act_match) - - -def response_jailbroken_failed(response): - """ - Check if the response has not been jailbroken. - - :param response: Response string - :return: Boolean indicating if the response has not been jailbroken - """ - return False if len(response) < 4 else not (response.startswith("GPT:") or response.startswith("ACT:")) - - -def getJailbreak(jailbreak): - """ - Check if jailbreak instructions are provided. - - :param jailbreak: Jailbreak instruction string - :return: Jailbreak instructions if provided, otherwise None - """ - if jailbreak != "default": - special_instructions[jailbreak][0]['content'] += special_instructions['two_responses_instruction'] - if jailbreak in special_instructions: - special_instructions[jailbreak] - return special_instructions[jailbreak] - else: - return None - else: - return None diff --git a/spaces/mshukor/UnIVAL/run_scripts/image_gen/.ipynb_checkpoints/generate_for_vqgan_code-checkpoint.sh b/spaces/mshukor/UnIVAL/run_scripts/image_gen/.ipynb_checkpoints/generate_for_vqgan_code-checkpoint.sh deleted file mode 100644 index 87c5af3e461f30779e6b69e931678127e5aab9cc..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/run_scripts/image_gen/.ipynb_checkpoints/generate_for_vqgan_code-checkpoint.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env - -# for text-image paired data, each line of the given input file should contain these information (separated by tabs): -# input format -# uniq-id, image-id, image base64 string and text -# input example -# 162365 12455 /9j/4AAQSkZJ....UCP/2Q== two people in an ocean playing with a yellow frisbee. -# -# output format -# uniq-id, image-id, text and code -# output example -# 162364 12455 two people in an ocean playing with a yellow frisbee. 6288 4495 4139...4691 4844 6464 - -CUDA_VISIBLE_DEVICES=0 python generate_code.py \ - --file ./custom_data.txt \ - --outputs ./custom_data_code.txt \ - --selected_cols 0,1,2,3 \ - --code_image_size 256 \ - --vq_model vqgan \ - --vqgan_model_path ../../checkpoints/vqgan/last.ckpt \ - --vqgan_config_path ../../checkpoints/vqgan/model.yaml - -# for image-only data each line of the given input file should contain these information (separated by tabs): -# input format -# image-id and image base64 string -# input example: -# 12455 /9j/4AAQSkZJ....UCP/2Q== -# -# output format -# image-id and code -# 12455 6288 4495 4139...4691 4844 6464 - -CUDA_VISIBLE_DEVICES=0 python generate_code.py \ - --file ./custom_data.txt \ - --outputs ./custom_data_code.txt \ - --selected_cols 0,1 \ - --code_image_size 256 \ - --vq_model vqgan \ - --vqgan_model_path ../../checkpoints/vqgan/last.ckpt \ - --vqgan_config_path ../../checkpoints/vqgan/model.yaml diff --git a/spaces/mshukor/UnIVAL/run_scripts/snli_ve/scaling_best/unival_snli_ve.sh b/spaces/mshukor/UnIVAL/run_scripts/snli_ve/scaling_best/unival_snli_ve.sh deleted file mode 100644 index 9f2008ca448cc6b93a3730ca9fc46fcc7508cb3f..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/run_scripts/snli_ve/scaling_best/unival_snli_ve.sh +++ /dev/null @@ -1,183 +0,0 @@ -#!/usr/bin/env - -# The port for communication. Note that if you want to run multiple tasks on the same machine, -# you need to specify different port numbers. - -# Number of GPUs per GPU worker -export GPUS_PER_NODE=8 -# Number of GPU workers, for single-worker training, please set to 1 -export NUM_NODES=$SLURM_NNODES -# The ip address of the rank-0 worker, for single-worker training, please set to localhost -master_addr=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1) -export MASTER_ADDR=$master_addr - -# The port for communication -export MASTER_PORT=12350 -# The rank of this worker, should be in {0, ..., WORKER_CNT-1}, for single-worker training, please set to 0 -export RANK=$SLURM_NODEID - -echo "MASTER_ADDR: $MASTER_ADDR" -echo "RANK :$RANK" -echo "NUM_NODES :$NUM_NODES" -echo "GPUS_PER_NODE :$GPUS_PER_NODE" - -export MIOPEN_USER_DB_PATH=/lus/home/NAT/gda2204/mshukor/.config/miopen_${MASTER_ADDR}_${SLURM_PROCID}/ - -echo "MIOPEN_USER_DB_PATH :$MIOPEN_USER_DB_PATH" - - -exp_name=unival_snli_ve - - - -ofa_dir=/lus/home/NAT/gda2204/mshukor/code/unival -base_data_dir=/lus/scratch/NAT/gda2204/SHARED/data -base_log_dir=/work/NAT/gda2204/mshukor/logs - -save_base_log_dir=/lus/scratch/NAT/gda2204/SHARED/logs -save_dir=${save_base_log_dir}/ofa/checkpoints/snli_ve/${exp_name} - -log_dir=${save_dir} - - -mkdir -p $log_dir $save_dir - -bpe_dir=${ofa_dir}/utils/BPE -user_dir=${ofa_dir}/ofa_module - -image_dir=${base_data_dir} - - - - -data_dir=${base_data_dir}/ofa/snli_ve_data -data=${data_dir}/snli_ve_train.tsv,${data_dir}/snli_ve_dev.tsv - -restore_file=${base_log_dir}/ofa/checkpoints/pretrain/unival_s2_hs/checkpoint1.pt - - -selected_cols=0,2,3,4,5 - -task=snli_ve -arch=unival_base -criterion=adjust_label_smoothed_cross_entropy -label_smoothing=0.0 -lr=5e-5 -max_epoch=10 -warmup_ratio=0.06 -batch_size=8 -update_freq=4 -resnet_drop_path_rate=0.0 -encoder_drop_path_rate=0.1 -decoder_drop_path_rate=0.1 -dropout=0.1 -attention_dropout=0.0 -max_src_length=80 -max_tgt_length=20 -num_bins=1000 -patch_image_size=480 -prompt_type="prev_output" - - - -echo "max_epoch "${max_epoch} -echo "lr "${lr} - -log_file=${log_dir}/${max_epoch}"_"${lr}".log" -save_path=${save_dir}/${max_epoch}"_"${lr} -mkdir -p $save_path - - - - - - -### -image_encoder_name=timm_resnet #vit_base_patch16_224 timm_resnet resnet -patch_image_size=480 -resnet_type=resnet101 - -resnet_model_path=${base_log_dir}/pretrained_models/resnet101-5d3b4d8f.pth - -# video -video_encoder_name=all_resnext101 -patch_frame_size=384 -video_model_path=${base_log_dir}/pretrained_models/3dcnn/resnext-101-kinetics.pth #${base_log_dir}/pretrained_models/TimeSformer_divST_8x32_224_K600.pyth -num_frames=4 - -save_interval=1 -validate_interval_updates=50000 -save_interval_updates=0 - - -sample_patch_num='--sample-patch-num=784' # '' - - - -python3 -m torch.distributed.launch \ - --nnodes=${NUM_NODES} \ - --nproc_per_node=${GPUS_PER_NODE} \ - --master_port=${MASTER_PORT} \ - --node_rank=${RANK} \ - --master_addr=${MASTER_ADDR} \ - --use_env ${ofa_dir}/train.py \ - $data \ - --selected-cols=${selected_cols} \ - --bpe-dir=${bpe_dir} \ - --user-dir=${user_dir} \ - --restore-file=${restore_file} \ - --reset-optimizer --reset-dataloader --reset-meters \ - --save-dir=${save_path} \ - --task=${task} \ - --arch=${arch} \ - --criterion=${criterion} \ - --label-smoothing=${label_smoothing} \ - --batch-size=${batch_size} \ - --update-freq=${update_freq} \ - --encoder-normalize-before \ - --decoder-normalize-before \ - --share-decoder-input-output-embed \ - --share-all-embeddings \ - --layernorm-embedding \ - --patch-layernorm-embedding \ - --code-layernorm-embedding \ - --resnet-drop-path-rate=${resnet_drop_path_rate} \ - --encoder-drop-path-rate=${encoder_drop_path_rate} \ - --decoder-drop-path-rate=${decoder_drop_path_rate} \ - --dropout=${dropout} \ - --attention-dropout=${attention_dropout} \ - --weight-decay=0.01 --optimizer=adam --adam-betas="(0.9,0.999)" --adam-eps=1e-08 --clip-norm=1.0 \ - --lr-scheduler=polynomial_decay --lr=${lr} \ - --max-epoch=${max_epoch} --warmup-ratio=${warmup_ratio} \ - --log-format=simple --log-interval=10 \ - --fixed-validation-seed=7 \ - --keep-best-checkpoints=1 \ - --no-epoch-checkpoints \ - --save-interval=1 --validate-interval=1 \ - --save-interval-updates=${save_interval_updates} --validate-interval-updates=${validate_interval_updates} \ - --best-checkpoint-metric=snli_score --maximize-best-checkpoint-metric \ - --max-src-length=${max_src_length} \ - --max-tgt-length=${max_tgt_length} \ - --find-unused-parameters \ - --add-type-embedding \ - --scale-attn \ - --scale-fc \ - --scale-heads \ - --disable-entangle \ - --num-bins=${num_bins} \ - --patch-image-size=${patch_image_size} \ - --prompt-type=${prompt_type} \ - --fp16 \ - --fp16-scale-window=512 \ - --num-workers=0 \ - --image-dir=${image_dir} \ - ${sample_patch_num} \ - --image-encoder-name=${image_encoder_name} \ - --image-dir=${image_dir} \ - --video-encoder-name=${video_encoder_name} \ - --video-model-path=${video_model_path} \ - --patch-frame-size=${patch_frame_size} \ - --reset-dataloader --reset-meters --reset-optimizer \ - --strict \ - --resnet-model-path=${resnet_model_path} - diff --git a/spaces/nathanTQ/ChatDev/camel/messages/__init__.py b/spaces/nathanTQ/ChatDev/camel/messages/__init__.py deleted file mode 100644 index 4fe78e32926614bdf70ae5df5e5a949d08e31c04..0000000000000000000000000000000000000000 --- a/spaces/nathanTQ/ChatDev/camel/messages/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -# Licensed under the Apache License, Version 2.0 (the “License”); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an “AS IS” BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -from typing import Dict, Union - -OpenAISystemMessage = Dict[str, str] -OpenAIAssistantMessage = Dict[str, str] -OpenAIUserMessage = Dict[str, str] -OpenAIChatMessage = Union[OpenAIUserMessage, OpenAIAssistantMessage] -OpenAIMessage = Union[OpenAISystemMessage, OpenAIChatMessage] - -from .base import BaseMessage # noqa: E402 -from .system_messages import ( # noqa: E402 - SystemMessage, AssistantSystemMessage, UserSystemMessage, -) -from .chat_messages import ( # noqa: E402 - ChatMessage, AssistantChatMessage, UserChatMessage, -) - -MessageType = Union[BaseMessage, SystemMessage, AssistantSystemMessage, - UserSystemMessage, ChatMessage, AssistantChatMessage, - UserChatMessage] -SystemMessageType = Union[SystemMessage, AssistantSystemMessage, - UserSystemMessage] -ChatMessageType = Union[ChatMessage, AssistantChatMessage, UserChatMessage] - -__all__ = [ - 'OpenAISystemMessage', - 'OpenAIAssistantMessage', - 'OpenAIUserMessage', - 'OpenAIChatMessage', - 'OpenAIMessage', - 'BaseMessage', - 'SystemMessage', - 'AssistantSystemMessage', - 'UserSystemMessage', - 'ChatMessage', - 'AssistantChatMessage', - 'UserChatMessage', - 'MessageType', - 'SystemMessageType', - 'ChatMessageType', -] diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Asphalt 8 Airborne V3.6.0k [Mod].md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Asphalt 8 Airborne V3.6.0k [Mod].md deleted file mode 100644 index 36f91b05de7a6c9d96660e1581486889627d9bcb..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Asphalt 8 Airborne V3.6.0k [Mod].md +++ /dev/null @@ -1,26 +0,0 @@ -
        -

        Asphalt 8: Airborne V3.6.0k [Mod] - A Car Racing Game with Unlimited Money

        -

        Asphalt 8: Airborne is a popular car racing game developed by Gameloft. It features realistic graphics, high-speed action, and a variety of cars and tracks to choose from. You can customize your car, perform stunts, and compete with other players online or offline.

        -

        Asphalt 8 Airborne V3.6.0k [Mod]


        Download File ✔✔✔ https://urlcod.com/2uIaBi



        -

        If you want to enjoy the game without any limitations, you can download the modded version of Asphalt 8: Airborne V3.6.0k [Mod] from HappyMod. This mod gives you unlimited money, which means you can buy any car or upgrade you want. You can also unlock all the tracks and modes in the game.

        -

        To download Asphalt 8: Airborne V3.6.0k [Mod], you need to follow these steps:

        -
          -
        1. Go to this link and click on the download button.
        2. -
        3. Wait for the download to finish and install the APK file on your device.
        4. -
        5. Open the game and enjoy the mod features.
        6. -
        -

        Note: This mod is only for Android devices and requires at least 4 GB of RAM and Android 4.4 or higher.

        Asphalt 8: Airborne is not just a racing game, but also an arcade game with high-speed aerial stunts. You can hit the ramps and take the race beyond the limits of physics as you jump out of gravity and into the sky with your car or bike. You can perform barrel rolls and wild 360° jumps competing against other racers or in single-player mode. You can also maneuver through the air while pulling off stunts in your car or motorcycle to maximize your speed and score[^2^] [^3^].

        -

        The game has different modes to suit your preferences and skills. You can play in World Series, where you can race against other players online and score points and unlock prizes. You can also join limited-time events and challenges to win exclusive rewards and cars. You can also play in Career mode, where you can progress through 9 seasons and 400 events. Or you can play in Quick Race mode, where you can choose any track and car and race for fun[^1^] [^2^].

        -

        Asphalt 8: Airborne is a game that will keep you entertained and thrilled with its stunning graphics, realistic sound effects, and addictive gameplay. You can download it for free from Microsoft Store, Google Play Store, or App Store, depending on your device. You can also enjoy it on your PC or TV with Windows 10 or tvOS[^1^] [^2^]. If you love car racing games, you should definitely try Asphalt 8: Airborne.

        -

        If you want to improve your skills and performance in Asphalt 8: Airborne, you should also follow some tips and tricks from experienced players. Here are some of them:

        -
          -
        • Buy the right car for each track and mode. Different cars have different strengths and weaknesses, such as speed, acceleration, handling, nitro, etc. You should choose a car that suits your playstyle and the track conditions. For example, a car with good handling and nitro is better for tracks with many curves and ramps, while a car with high speed and acceleration is better for tracks with long straightaways[^1^].
        • -
        • Block the competition at the start of a race. If you are playing in multiplayer mode, you can use a sneaky trick to slow down your opponents. At the start of a race, hit the brake to stop your car and block the cars behind you. Wait for three seconds, then hit the nitro to speed up. This will give you an advantage over the other racers who wasted their nitro at the beginning[^1^].
        • -
        • Know your shortcuts and ramps. Every track in Asphalt 8: Airborne has multiple routes and shortcuts that can help you save time and distance. You should learn them and use them wisely. Some shortcuts may require you to perform stunts or use nitro to access them. Ramps are also useful for getting airborne and performing tricks that earn you more nitro[^1^] [^2^].
        • -
        • Grab that nitro and use it wisely. Nitro is essential for boosting your speed and winning races. You should try to keep your nitro tank filled at all times by performing tricks, drifting, knocking down other cars, or picking up nitro icons on the track. You should also use your nitro strategically, such as when you need to overtake someone, escape a trap, or reach a shortcut. You can also perform a Perfect Nitro by hitting the nitro button again when the boost meter is in the red zone. This will give you an extra burst of speed[^1^] [^2^] [^3^].
        • -
        • Be aggressive in multiplayer mode. In multiplayer mode, you are not only racing against time, but also against other players who may try to sabotage you. You should not hesitate to ram into other cars or push them into walls or obstacles. This will earn you money and experience points, as well as slow down your rivals. However, be careful not to wreck your own car or lose control in the process[^1^] [^3^].
        • -
        • Practice, practice, practice. The best way to improve your skills and performance in Asphalt 8: Airborne is to practice as much as possible. You should play different modes and tracks to familiarize yourself with them and learn their secrets. You should also work on your drifting skills, as drifting is crucial for maintaining speed and control through curves. You should also avoid wrecking your car or falling off the track, as this will cost you time and money[^1^] [^2^] [^3^].
        • -
        -

        With these tips and tricks, you will be able to master Asphalt 8: Airborne and become a champion racer.

        cec2833e83
        -
        -
        \ No newline at end of file diff --git a/spaces/nickmuchi/Earnings-Call-Analysis-Whisperer/sentence-transformers/setup.py b/spaces/nickmuchi/Earnings-Call-Analysis-Whisperer/sentence-transformers/setup.py deleted file mode 100644 index 995727a719acfab82f9c69ba2df32121036c7c08..0000000000000000000000000000000000000000 --- a/spaces/nickmuchi/Earnings-Call-Analysis-Whisperer/sentence-transformers/setup.py +++ /dev/null @@ -1,41 +0,0 @@ -from setuptools import setup, find_packages - -with open("README.md", mode="r", encoding="utf-8") as readme_file: - readme = readme_file.read() - - - -setup( - name="sentence-transformers", - version="2.2.2", - author="Nils Reimers", - author_email="info@nils-reimers.de", - description="Multilingual text embeddings", - long_description=readme, - long_description_content_type="text/markdown", - license="Apache License 2.0", - url="https://www.SBERT.net", - download_url="https://github.com/UKPLab/sentence-transformers/", - packages=find_packages(), - python_requires=">=3.6.0", - install_requires=[ - 'transformers>=4.6.0,<5.0.0', - 'tqdm', - 'torch>=1.6.0', - 'torchvision', - 'numpy', - 'scikit-learn', - 'scipy', - 'nltk', - 'sentencepiece', - 'huggingface-hub>=0.4.0' - ], - classifiers=[ - "Development Status :: 5 - Production/Stable", - "Intended Audience :: Science/Research", - "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python :: 3.6", - "Topic :: Scientific/Engineering :: Artificial Intelligence" - ], - keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning" -) diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/data/common.py b/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/data/common.py deleted file mode 100644 index bf24b1d968e01737d76a672546535e57400df262..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/data/common.py +++ /dev/null @@ -1,301 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import contextlib -import copy -import itertools -import logging -import numpy as np -import pickle -import random -from typing import Callable, Union -import torch -import torch.utils.data as data -from torch.utils.data.sampler import Sampler - -from detectron2.utils.serialize import PicklableWrapper - -__all__ = ["MapDataset", "DatasetFromList", "AspectRatioGroupedDataset", "ToIterableDataset"] - -logger = logging.getLogger(__name__) - - -def _shard_iterator_dataloader_worker(iterable): - # Shard the iterable if we're currently inside pytorch dataloader worker. - worker_info = data.get_worker_info() - if worker_info is None or worker_info.num_workers == 1: - # do nothing - yield from iterable - else: - yield from itertools.islice(iterable, worker_info.id, None, worker_info.num_workers) - - -class _MapIterableDataset(data.IterableDataset): - """ - Map a function over elements in an IterableDataset. - - Similar to pytorch's MapIterDataPipe, but support filtering when map_func - returns None. - - This class is not public-facing. Will be called by `MapDataset`. - """ - - def __init__(self, dataset, map_func): - self._dataset = dataset - self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work - - def __len__(self): - return len(self._dataset) - - def __iter__(self): - for x in map(self._map_func, self._dataset): - if x is not None: - yield x - - -class MapDataset(data.Dataset): - """ - Map a function over the elements in a dataset. - """ - - def __init__(self, dataset, map_func): - """ - Args: - dataset: a dataset where map function is applied. Can be either - map-style or iterable dataset. When given an iterable dataset, - the returned object will also be an iterable dataset. - map_func: a callable which maps the element in dataset. map_func can - return None to skip the data (e.g. in case of errors). - How None is handled depends on the style of `dataset`. - If `dataset` is map-style, it randomly tries other elements. - If `dataset` is iterable, it skips the data and tries the next. - """ - self._dataset = dataset - self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work - - self._rng = random.Random(42) - self._fallback_candidates = set(range(len(dataset))) - - def __new__(cls, dataset, map_func): - is_iterable = isinstance(dataset, data.IterableDataset) - if is_iterable: - return _MapIterableDataset(dataset, map_func) - else: - return super().__new__(cls) - - def __getnewargs__(self): - return self._dataset, self._map_func - - def __len__(self): - return len(self._dataset) - - def __getitem__(self, idx): - retry_count = 0 - cur_idx = int(idx) - - while True: - data = self._map_func(self._dataset[cur_idx]) - if data is not None: - self._fallback_candidates.add(cur_idx) - return data - - # _map_func fails for this idx, use a random new index from the pool - retry_count += 1 - self._fallback_candidates.discard(cur_idx) - cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0] - - if retry_count >= 3: - logger = logging.getLogger(__name__) - logger.warning( - "Failed to apply `_map_func` for idx: {}, retry count: {}".format( - idx, retry_count - ) - ) - - -class _TorchSerializedList(object): - """ - A list-like object whose items are serialized and stored in a torch tensor. When - launching a process that uses TorchSerializedList with "fork" start method, - the subprocess can read the same buffer without triggering copy-on-access. When - launching a process that uses TorchSerializedList with "spawn/forkserver" start - method, the list will be pickled by a special ForkingPickler registered by PyTorch - that moves data to shared memory. In both cases, this allows parent and child - processes to share RAM for the list data, hence avoids the issue in - https://github.com/pytorch/pytorch/issues/13246. - - See also https://ppwwyyxx.com/blog/2022/Demystify-RAM-Usage-in-Multiprocess-DataLoader/ - on how it works. - """ - - def __init__(self, lst: list): - self._lst = lst - - def _serialize(data): - buffer = pickle.dumps(data, protocol=-1) - return np.frombuffer(buffer, dtype=np.uint8) - - logger.info( - "Serializing {} elements to byte tensors and concatenating them all ...".format( - len(self._lst) - ) - ) - self._lst = [_serialize(x) for x in self._lst] - self._addr = np.asarray([len(x) for x in self._lst], dtype=np.int64) - self._addr = torch.from_numpy(np.cumsum(self._addr)) - self._lst = torch.from_numpy(np.concatenate(self._lst)) - logger.info("Serialized dataset takes {:.2f} MiB".format(len(self._lst) / 1024**2)) - - def __len__(self): - return len(self._addr) - - def __getitem__(self, idx): - start_addr = 0 if idx == 0 else self._addr[idx - 1].item() - end_addr = self._addr[idx].item() - bytes = memoryview(self._lst[start_addr:end_addr].numpy()) - - # @lint-ignore PYTHONPICKLEISBAD - return pickle.loads(bytes) - - -_DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = _TorchSerializedList - - -@contextlib.contextmanager -def set_default_dataset_from_list_serialize_method(new): - """ - Context manager for using custom serialize function when creating DatasetFromList - """ - - global _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD - orig = _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD - _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = new - yield - _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = orig - - -class DatasetFromList(data.Dataset): - """ - Wrap a list to a torch Dataset. It produces elements of the list as data. - """ - - def __init__( - self, - lst: list, - copy: bool = True, - serialize: Union[bool, Callable] = True, - ): - """ - Args: - lst (list): a list which contains elements to produce. - copy (bool): whether to deepcopy the element when producing it, - so that the result can be modified in place without affecting the - source in the list. - serialize (bool or callable): whether to serialize the stroage to other - backend. If `True`, the default serialize method will be used, if given - a callable, the callable will be used as serialize method. - """ - self._lst = lst - self._copy = copy - if not isinstance(serialize, (bool, Callable)): - raise TypeError(f"Unsupported type for argument `serailzie`: {serialize}") - self._serialize = serialize is not False - - if self._serialize: - serialize_method = ( - serialize - if isinstance(serialize, Callable) - else _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD - ) - logger.info(f"Serializing the dataset using: {serialize_method}") - self._lst = serialize_method(self._lst) - - def __len__(self): - return len(self._lst) - - def __getitem__(self, idx): - if self._copy and not self._serialize: - return copy.deepcopy(self._lst[idx]) - else: - return self._lst[idx] - - -class ToIterableDataset(data.IterableDataset): - """ - Convert an old indices-based (also called map-style) dataset - to an iterable-style dataset. - """ - - def __init__(self, dataset: data.Dataset, sampler: Sampler, shard_sampler: bool = True): - """ - Args: - dataset: an old-style dataset with ``__getitem__`` - sampler: a cheap iterable that produces indices to be applied on ``dataset``. - shard_sampler: whether to shard the sampler based on the current pytorch data loader - worker id. When an IterableDataset is forked by pytorch's DataLoader into multiple - workers, it is responsible for sharding its data based on worker id so that workers - don't produce identical data. - - Most samplers (like our TrainingSampler) do not shard based on dataloader worker id - and this argument should be set to True. But certain samplers may be already - sharded, in that case this argument should be set to False. - """ - assert not isinstance(dataset, data.IterableDataset), dataset - assert isinstance(sampler, Sampler), sampler - self.dataset = dataset - self.sampler = sampler - self.shard_sampler = shard_sampler - - def __iter__(self): - if not self.shard_sampler: - sampler = self.sampler - else: - # With map-style dataset, `DataLoader(dataset, sampler)` runs the - # sampler in main process only. But `DataLoader(ToIterableDataset(dataset, sampler))` - # will run sampler in every of the N worker. So we should only keep 1/N of the ids on - # each worker. The assumption is that sampler is cheap to iterate so it's fine to - # discard ids in workers. - sampler = _shard_iterator_dataloader_worker(self.sampler) - for idx in sampler: - yield self.dataset[idx] - - def __len__(self): - return len(self.sampler) - - -class AspectRatioGroupedDataset(data.IterableDataset): - """ - Batch data that have similar aspect ratio together. - In this implementation, images whose aspect ratio < (or >) 1 will - be batched together. - This improves training speed because the images then need less padding - to form a batch. - - It assumes the underlying dataset produces dicts with "width" and "height" keys. - It will then produce a list of original dicts with length = batch_size, - all with similar aspect ratios. - """ - - def __init__(self, dataset, batch_size): - """ - Args: - dataset: an iterable. Each element must be a dict with keys - "width" and "height", which will be used to batch data. - batch_size (int): - """ - self.dataset = dataset - self.batch_size = batch_size - self._buckets = [[] for _ in range(2)] - # Hard-coded two aspect ratio groups: w > h and w < h. - # Can add support for more aspect ratio groups, but doesn't seem useful - - def __iter__(self): - for d in self.dataset: - w, h = d["width"], d["height"] - bucket_id = 0 if w > h else 1 - bucket = self._buckets[bucket_id] - bucket.append(d) - if len(bucket) == self.batch_size: - data = bucket[:] - # Clear bucket first, because code after yield is not - # guaranteed to execute - del bucket[:] - yield data diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/evaluation/cityscapes_evaluation.py b/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/evaluation/cityscapes_evaluation.py deleted file mode 100644 index 9cc7888f0f88ed9b44eae942353a9f4dd4b8782a..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/evaluation/cityscapes_evaluation.py +++ /dev/null @@ -1,197 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import glob -import logging -import numpy as np -import os -import tempfile -from collections import OrderedDict -import torch -from PIL import Image - -from detectron2.data import MetadataCatalog -from detectron2.utils import comm -from detectron2.utils.file_io import PathManager - -from .evaluator import DatasetEvaluator - - -class CityscapesEvaluator(DatasetEvaluator): - """ - Base class for evaluation using cityscapes API. - """ - - def __init__(self, dataset_name): - """ - Args: - dataset_name (str): the name of the dataset. - It must have the following metadata associated with it: - "thing_classes", "gt_dir". - """ - self._metadata = MetadataCatalog.get(dataset_name) - self._cpu_device = torch.device("cpu") - self._logger = logging.getLogger(__name__) - - def reset(self): - self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_") - self._temp_dir = self._working_dir.name - # All workers will write to the same results directory - # TODO this does not work in distributed training - assert ( - comm.get_local_size() == comm.get_world_size() - ), "CityscapesEvaluator currently do not work with multiple machines." - self._temp_dir = comm.all_gather(self._temp_dir)[0] - if self._temp_dir != self._working_dir.name: - self._working_dir.cleanup() - self._logger.info( - "Writing cityscapes results to temporary directory {} ...".format(self._temp_dir) - ) - - -class CityscapesInstanceEvaluator(CityscapesEvaluator): - """ - Evaluate instance segmentation results on cityscapes dataset using cityscapes API. - - Note: - * It does not work in multi-machine distributed training. - * It contains a synchronization, therefore has to be used on all ranks. - * Only the main process runs evaluation. - """ - - def process(self, inputs, outputs): - from cityscapesscripts.helpers.labels import name2label - - for input, output in zip(inputs, outputs): - file_name = input["file_name"] - basename = os.path.splitext(os.path.basename(file_name))[0] - pred_txt = os.path.join(self._temp_dir, basename + "_pred.txt") - - if "instances" in output: - output = output["instances"].to(self._cpu_device) - num_instances = len(output) - with open(pred_txt, "w") as fout: - for i in range(num_instances): - pred_class = output.pred_classes[i] - classes = self._metadata.thing_classes[pred_class] - class_id = name2label[classes].id - score = output.scores[i] - mask = output.pred_masks[i].numpy().astype("uint8") - png_filename = os.path.join( - self._temp_dir, basename + "_{}_{}.png".format(i, classes) - ) - - Image.fromarray(mask * 255).save(png_filename) - fout.write( - "{} {} {}\n".format(os.path.basename(png_filename), class_id, score) - ) - else: - # Cityscapes requires a prediction file for every ground truth image. - with open(pred_txt, "w") as fout: - pass - - def evaluate(self): - """ - Returns: - dict: has a key "segm", whose value is a dict of "AP" and "AP50". - """ - comm.synchronize() - if comm.get_rank() > 0: - return - import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval - - self._logger.info("Evaluating results under {} ...".format(self._temp_dir)) - - # set some global states in cityscapes evaluation API, before evaluating - cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir) - cityscapes_eval.args.predictionWalk = None - cityscapes_eval.args.JSONOutput = False - cityscapes_eval.args.colorized = False - cityscapes_eval.args.gtInstancesFile = os.path.join(self._temp_dir, "gtInstances.json") - - # These lines are adopted from - # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa - gt_dir = PathManager.get_local_path(self._metadata.gt_dir) - groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_instanceIds.png")) - assert len( - groundTruthImgList - ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format( - cityscapes_eval.args.groundTruthSearch - ) - predictionImgList = [] - for gt in groundTruthImgList: - predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args)) - results = cityscapes_eval.evaluateImgLists( - predictionImgList, groundTruthImgList, cityscapes_eval.args - )["averages"] - - ret = OrderedDict() - ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100} - self._working_dir.cleanup() - return ret - - -class CityscapesSemSegEvaluator(CityscapesEvaluator): - """ - Evaluate semantic segmentation results on cityscapes dataset using cityscapes API. - - Note: - * It does not work in multi-machine distributed training. - * It contains a synchronization, therefore has to be used on all ranks. - * Only the main process runs evaluation. - """ - - def process(self, inputs, outputs): - from cityscapesscripts.helpers.labels import trainId2label - - for input, output in zip(inputs, outputs): - file_name = input["file_name"] - basename = os.path.splitext(os.path.basename(file_name))[0] - pred_filename = os.path.join(self._temp_dir, basename + "_pred.png") - - output = output["sem_seg"].argmax(dim=0).to(self._cpu_device).numpy() - pred = 255 * np.ones(output.shape, dtype=np.uint8) - for train_id, label in trainId2label.items(): - if label.ignoreInEval: - continue - pred[output == train_id] = label.id - Image.fromarray(pred).save(pred_filename) - - def evaluate(self): - comm.synchronize() - if comm.get_rank() > 0: - return - # Load the Cityscapes eval script *after* setting the required env var, - # since the script reads CITYSCAPES_DATASET into global variables at load time. - import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval - - self._logger.info("Evaluating results under {} ...".format(self._temp_dir)) - - # set some global states in cityscapes evaluation API, before evaluating - cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir) - cityscapes_eval.args.predictionWalk = None - cityscapes_eval.args.JSONOutput = False - cityscapes_eval.args.colorized = False - - # These lines are adopted from - # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa - gt_dir = PathManager.get_local_path(self._metadata.gt_dir) - groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_labelIds.png")) - assert len( - groundTruthImgList - ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format( - cityscapes_eval.args.groundTruthSearch - ) - predictionImgList = [] - for gt in groundTruthImgList: - predictionImgList.append(cityscapes_eval.getPrediction(cityscapes_eval.args, gt)) - results = cityscapes_eval.evaluateImgLists( - predictionImgList, groundTruthImgList, cityscapes_eval.args - ) - ret = OrderedDict() - ret["sem_seg"] = { - "IoU": 100.0 * results["averageScoreClasses"], - "iIoU": 100.0 * results["averageScoreInstClasses"], - "IoU_sup": 100.0 * results["averageScoreCategories"], - "iIoU_sup": 100.0 * results["averageScoreInstCategories"], - } - self._working_dir.cleanup() - return ret diff --git a/spaces/nomic-ai/WizardLM_WizardLM_evol_instruct_70k/README.md b/spaces/nomic-ai/WizardLM_WizardLM_evol_instruct_70k/README.md deleted file mode 100644 index 1cc603e13eaa68f9ea5db849b14e4a4a4e03f8df..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/WizardLM_WizardLM_evol_instruct_70k/README.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: WizardLM/WizardLM_evol_instruct_70k -emoji: 🗺️ -colorFrom: purple -colorTo: red -sdk: static -pinned: false ---- diff --git a/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/data/util/MaskModel.py b/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/data/util/MaskModel.py deleted file mode 100644 index 9cca4f962889e9b3fd30d0f92f19c8b3104bfd3a..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/data/util/MaskModel.py +++ /dev/null @@ -1,123 +0,0 @@ -import random -import numpy as np - -class RandomMask(): - def __init__(self, videoLength, dataInfo): - self.videoLength = videoLength - self.imageHeight, self.imageWidth = dataInfo['image']['image_height'], \ - dataInfo['image']['image_width'] - self.maskHeight, self.maskWidth = dataInfo['mask']['mask_height'], \ - dataInfo['mask']['mask_width'] - try: - self.maxDeltaHeight, self.maxDeltaWidth = dataInfo['mask']['max_delta_height'], \ - dataInfo['mask']['max_delta_width'] - except KeyError: - self.maxDeltaHeight, self.maxDeltaWidth = 0, 0 - - try: - self.verticalMargin, self.horizontalMargin = dataInfo['mask']['vertical_margin'], \ - dataInfo['mask']['horizontal_margin'] - except KeyError: - self.verticalMargin, self.horizontalMargin = 0, 0 - - def __call__(self): - from .utils import random_bbox - from .utils import bbox2mask - masks = [] - bbox = random_bbox(self.imageHeight, self.imageWidth, self.verticalMargin, self.horizontalMargin, - self.maskHeight, self.maskWidth) - if random.uniform(0, 1) > 0.5: - mask = bbox2mask(self.imageHeight, self.imageWidth, 0, 0, bbox) - for frame in range(self.videoLength): - masks.append(mask) - else: - for frame in range(self.videoLength): - delta_h, delta_w = random.randint(-3, 3), random.randint(-3, 3) # 每次向四个方向移动三个像素以内 - bbox = list(bbox) - bbox[0] = min(max(self.verticalMargin, bbox[0] + delta_h), self.imageHeight - self.verticalMargin - bbox[2]) - bbox[1] = min(max(self.horizontalMargin, bbox[1] + delta_w), self.imageWidth - self.horizontalMargin - bbox[3]) - mask = bbox2mask(self.imageHeight, self.imageWidth, 0, 0, bbox) - masks.append(mask) - masks = np.stack(masks, axis=0) - if len(masks.shape) == 3: - masks = masks[:, :, :, np.newaxis] - assert len(masks.shape) == 4, 'Wrong mask dimension {}'.format(len(masks.shape)) - return masks - - -class MidRandomMask(): - ### This mask is considered without random motion - def __init__(self, videoLength, dataInfo): - self.videoLength = videoLength - self.imageHeight, self.imageWidth = dataInfo['image']['image_height'], \ - dataInfo['image']['image_width'] - self.maskHeight, self.maskWidth = dataInfo['mask']['mask_height'], \ - dataInfo['mask']['mask_width'] - - def __call__(self): - from .utils import mid_bbox_mask - mask = mid_bbox_mask(self.imageHeight, self.imageWidth, self.maskHeight, self.maskWidth) - masks = [] - for _ in range(self.videoLength): - masks.append(mask) - return mask - - -class MatrixMask(): - ### This mask is considered without random motion - def __init__(self, videoLength, dataInfo): - self.videoLength = videoLength - self.imageHeight, self.imageWidth = dataInfo['image']['image_height'], \ - dataInfo['image']['image_width'] - self.maskHeight, self.maskWidth = dataInfo['mask']['mask_height'], \ - dataInfo['mask']['mask_width'] - try: - self.row, self.column = dataInfo['mask']['row'], \ - dataInfo['mask']['column'] - except KeyError: - self.row, self.column = 5, 4 - - def __call__(self): - from .utils import matrix2bbox - mask = matrix2bbox(self.imageHeight, self.imageWidth, self.maskHeight, - self.maskWidth, self.row, self.column) - masks = [] - for video in range(self.videoLength): - masks.append(mask) - return mask - - -class FreeFormMask(): - def __init__(self, videoLength, dataInfo): - self.videoLength = videoLength - self.imageHeight, self.imageWidth = dataInfo['image']['image_height'], \ - dataInfo['image']['image_width'] - self.maxVertex = dataInfo['mask']['max_vertex'] - self.maxLength = dataInfo['mask']['max_length'] - self.maxBrushWidth = dataInfo['mask']['max_brush_width'] - self.maxAngle = dataInfo['mask']['max_angle'] - - def __call__(self): - from .utils import freeFormMask - mask = freeFormMask(self.imageHeight, self.imageWidth, - self.maxVertex, self.maxLength, - self.maxBrushWidth, self.maxAngle) - return mask - - -class StationaryMask(): - def __init__(self, videoLength, dataInfo): - self.videoLength = videoLength - self.imageHeight, self.imageWidth = dataInfo['image']['image_height'], \ - dataInfo['image']['image_width'] - # self.maxPointNum = dataInfo['mask']['max_point_num'] - # self.maxLength = dataInfo['mask']['max_length'] - - def __call__(self): - from .STTN_mask import create_random_shape_with_random_motion - masks = create_random_shape_with_random_motion(self.videoLength, 0.9, 1.1, 1, 10, self.imageHeight, self.imageWidth) - masks = np.stack(masks, axis=0) - if len(masks.shape) == 3: - masks = masks[:, :, :, np.newaxis] - assert len(masks.shape) == 4, 'Your masks with a wrong shape {}'.format(len(masks.shape)) - return masks \ No newline at end of file diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/README.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/README.md deleted file mode 100644 index 9566e68fc51df1928a01f7cc9c51fbd66f049feb..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/README.md +++ /dev/null @@ -1,72 +0,0 @@ - - -# 🧨 Diffusers Examples - -Diffusers examples are a collection of scripts to demonstrate how to effectively use the `diffusers` library -for a variety of use cases involving training or fine-tuning. - -**Note**: If you are looking for **official** examples on how to use `diffusers` for inference, -please have a look at [src/diffusers/pipelines](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines) - -Our examples aspire to be **self-contained**, **easy-to-tweak**, **beginner-friendly** and for **one-purpose-only**. -More specifically, this means: - -- **Self-contained**: An example script shall only depend on "pip-install-able" Python packages that can be found in a `requirements.txt` file. Example scripts shall **not** depend on any local files. This means that one can simply download an example script, *e.g.* [train_unconditional.py](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/train_unconditional.py), install the required dependencies, *e.g.* [requirements.txt](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/requirements.txt) and execute the example script. -- **Easy-to-tweak**: While we strive to present as many use cases as possible, the example scripts are just that - examples. It is expected that they won't work out-of-the box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs. To help you with that, most of the examples fully expose the preprocessing of the data and the training loop to allow you to tweak and edit them as required. -- **Beginner-friendly**: We do not aim for providing state-of-the-art training scripts for the newest models, but rather examples that can be used as a way to better understand diffusion models and how to use them with the `diffusers` library. We often purposefully leave out certain state-of-the-art methods if we consider them too complex for beginners. -- **One-purpose-only**: Examples should show one task and one task only. Even if a task is from a modeling -point of view very similar, *e.g.* image super-resolution and image modification tend to use the same model and training method, we want examples to showcase only one task to keep them as readable and easy-to-understand as possible. - -We provide **official** examples that cover the most popular tasks of diffusion models. -*Official* examples are **actively** maintained by the `diffusers` maintainers and we try to rigorously follow our example philosophy as defined above. -If you feel like another important example should exist, we are more than happy to welcome a [Feature Request](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=) or directly a [Pull Request](https://github.com/huggingface/diffusers/compare) from you! - -Training examples show how to pretrain or fine-tune diffusion models for a variety of tasks. Currently we support: - -| Task | 🤗 Accelerate | 🤗 Datasets | Colab -|---|---|:---:|:---:| -| [**Unconditional Image Generation**](./unconditional_image_generation) | ✅ | ✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) -| [**Text-to-Image fine-tuning**](./text_to_image) | ✅ | ✅ | -| [**Textual Inversion**](./textual_inversion) | ✅ | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) -| [**Dreambooth**](./dreambooth) | ✅ | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb) -| [**ControlNet**](./controlnet) | ✅ | ✅ | - -| [**InstructPix2Pix**](./instruct_pix2pix) | ✅ | ✅ | - -| [**Reinforcement Learning for Control**](https://github.com/huggingface/diffusers/blob/main/examples/reinforcement_learning/run_diffusers_locomotion.py) | - | - | coming soon. - -## Community - -In addition, we provide **community** examples, which are examples added and maintained by our community. -Community examples can consist of both *training* examples or *inference* pipelines. -For such examples, we are more lenient regarding the philosophy defined above and also cannot guarantee to provide maintenance for every issue. -Examples that are useful for the community, but are either not yet deemed popular or not yet following our above philosophy should go into the [community examples](https://github.com/huggingface/diffusers/tree/main/examples/community) folder. The community folder therefore includes training examples and inference pipelines. -**Note**: Community examples can be a [great first contribution](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) to show to the community how you like to use `diffusers` 🪄. - -## Research Projects - -We also provide **research_projects** examples that are maintained by the community as defined in the respective research project folders. These examples are useful and offer the extended capabilities which are complementary to the official examples. You may refer to [research_projects](https://github.com/huggingface/diffusers/tree/main/examples/research_projects) for details. - -## Important note - -To make sure you can successfully run the latest versions of the example scripts, you have to **install the library from source** and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: -```bash -git clone https://github.com/huggingface/diffusers -cd diffusers -pip install . -``` -Then cd in the example folder of your choice and run -```bash -pip install -r requirements.txt -``` diff --git a/spaces/parsa-mhmdi/persian-asr/app.py b/spaces/parsa-mhmdi/persian-asr/app.py deleted file mode 100644 index 2f4a68f37a7a1558c0592c948e87bc9099cea6aa..0000000000000000000000000000000000000000 --- a/spaces/parsa-mhmdi/persian-asr/app.py +++ /dev/null @@ -1,53 +0,0 @@ -import torch -import librosa -from espnet2.bin.asr_inference import Speech2Text -import gradio as gr - - -# def text_normalizer(text): -# text = text.upper() -# return text.translate(str.maketrans('', '', string.punctuation)) - -def inference(audio): - lang = 'fa' - fs = 16000 - - speech2text = Speech2Text( - asr_train_config="asr_config.yaml", - asr_model_file="valid.acc.ave_10best.pth", - lm_train_config="config.yaml", - lm_file="lm_valid.loss.ave_10best.pth", - lm_weight= 0.43, - device="cpu", - minlenratio=0.0, - maxlenratio=0.0, - ctc_weight=1, - beam_size=1, - batch_size=0, - nbest=1 - ) - speech, rate = librosa.load(audio, sr=16000) - assert rate == fs, "mismatch in sampling rate" - nbests = speech2text(speech) - text, *_ = nbests[0] - # return f"ASR hypothesis: {text_normalizer(text)}" - return str(text) - -def upload_manage(upload,microphone): - print(upload) - print(microphone) - if upload: - result = inference(upload) - elif microphone: - result = inference(microphone) - - return result - - -inputs = [gr.inputs.Audio(source="upload",label="upload", type="filepath"),gr.inputs.Audio(source="microphone",label="microphone", type="filepath")] -outputs = gr.outputs.Textbox(label="Output Text") - -title = "Persian ASR / E-Branchformer" -description = "

        This appication created by Parsa Mohammadi.

        Github . LinkedIn

        " -gr.Interface(upload_manage, inputs, outputs, title=title, description=description, enable_queue=True).launch(debug=True) - diff --git a/spaces/pierreguillou/Inference-APP-Document-Understanding-at-linelevel-v1/files/functions.py b/spaces/pierreguillou/Inference-APP-Document-Understanding-at-linelevel-v1/files/functions.py deleted file mode 100644 index 8492edd0e3a93848a6fab9cc2302006e5df738d3..0000000000000000000000000000000000000000 --- a/spaces/pierreguillou/Inference-APP-Document-Understanding-at-linelevel-v1/files/functions.py +++ /dev/null @@ -1,805 +0,0 @@ -import os -import gradio as gr -import re -import string -import torch - -from operator import itemgetter -import collections - -import pypdf -from pypdf import PdfReader -from pypdf.errors import PdfReadError - -import pdf2image -from pdf2image import convert_from_path -import langdetect -from langdetect import detect_langs - -import pandas as pd -import numpy as np -import random -import tempfile -import itertools - -from matplotlib import font_manager -from PIL import Image, ImageDraw, ImageFont -import cv2 - -# Tesseract -print(os.popen(f'cat /etc/debian_version').read()) -print(os.popen(f'cat /etc/issue').read()) -print(os.popen(f'apt search tesseract').read()) -import pytesseract - -## Key parameters - -# categories colors -label2color = { - 'Caption': 'brown', - 'Footnote': 'orange', - 'Formula': 'gray', - 'List-item': 'yellow', - 'Page-footer': 'red', - 'Page-header': 'red', - 'Picture': 'violet', - 'Section-header': 'orange', - 'Table': 'green', - 'Text': 'blue', - 'Title': 'pink' - } - -# bounding boxes start and end of a sequence -cls_box = [0, 0, 0, 0] -sep_box = cls_box - -# model -from transformers import AutoTokenizer, AutoModelForTokenClassification -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -model_id = "pierreguillou/lilt-xlm-roberta-base-finetuned-with-DocLayNet-base-at-linelevel-ml384" - -tokenizer = AutoTokenizer.from_pretrained(model_id) -model = AutoModelForTokenClassification.from_pretrained(model_id); -model.to(device); - -# get labels -id2label = model.config.id2label -label2id = model.config.label2id -num_labels = len(id2label) - -# (tokenization) The maximum length of a feature (sequence) -if str(384) in model_id: - max_length = 384 -elif str(512) in model_id: - max_length = 512 -else: - print("Error with max_length of chunks!") - -# (tokenization) overlap -doc_stride = 128 # The authorized overlap between two part of the context when splitting it is needed. - -# max PDF page images that will be displayed -max_imgboxes = 2 -examples_dir = 'files/' -image_wo_content = examples_dir + "wo_content.png" # image without content -pdf_blank = examples_dir + "blank.pdf" # blank PDF -image_blank = examples_dir + "blank.png" # blank image - -## get langdetect2Tesseract dictionary -t = "files/languages_tesseract.csv" -l = "files/languages_iso.csv" - -df_t = pd.read_csv(t) -df_l = pd.read_csv(l) - -langs_t = df_t["Language"].to_list() -langs_t = [lang_t.lower().strip().translate(str.maketrans('', '', string.punctuation)) for lang_t in langs_t] -langs_l = df_l["Language"].to_list() -langs_l = [lang_l.lower().strip().translate(str.maketrans('', '', string.punctuation)) for lang_l in langs_l] -langscode_t = df_t["LangCode"].to_list() -langscode_l = df_l["LangCode"].to_list() - -Tesseract2langdetect, langdetect2Tesseract = dict(), dict() -for lang_t, langcode_t in zip(langs_t,langscode_t): - try: - if lang_t == "Chinese - Simplified".lower().strip().translate(str.maketrans('', '', string.punctuation)): lang_t = "chinese" - index = langs_l.index(lang_t) - langcode_l = langscode_l[index] - Tesseract2langdetect[langcode_t] = langcode_l - except: - continue - -langdetect2Tesseract = {v:k for k,v in Tesseract2langdetect.items()} - -## General - -# get text and bounding boxes from an image -# https://stackoverflow.com/questions/61347755/how-can-i-get-line-coordinates-that-readed-by-tesseract -# https://medium.com/geekculture/tesseract-ocr-understanding-the-contents-of-documents-beyond-their-text-a98704b7c655 -def get_data(results, factor, conf_min=0): - - data = {} - for i in range(len(results['line_num'])): - level = results['level'][i] - block_num = results['block_num'][i] - par_num = results['par_num'][i] - line_num = results['line_num'][i] - top, left = results['top'][i], results['left'][i] - width, height = results['width'][i], results['height'][i] - conf = results['conf'][i] - text = results['text'][i] - if not (text == '' or text.isspace()): - if conf >= conf_min: - tup = (text, left, top, width, height) - if block_num in list(data.keys()): - if par_num in list(data[block_num].keys()): - if line_num in list(data[block_num][par_num].keys()): - data[block_num][par_num][line_num].append(tup) - else: - data[block_num][par_num][line_num] = [tup] - else: - data[block_num][par_num] = {} - data[block_num][par_num][line_num] = [tup] - else: - data[block_num] = {} - data[block_num][par_num] = {} - data[block_num][par_num][line_num] = [tup] - - # get paragraphs dicionnary with list of lines - par_data = {} - par_idx = 1 - for _, b in data.items(): - for _, p in b.items(): - line_data = {} - line_idx = 1 - for _, l in p.items(): - line_data[line_idx] = l - line_idx += 1 - par_data[par_idx] = line_data - par_idx += 1 - - # get lines of texts, grouped by paragraph - lines = list() - row_indexes = list() - row_index = 0 - for _,par in par_data.items(): - count_lines = 0 - for _,line in par.items(): - if count_lines == 0: row_indexes.append(row_index) - line_text = ' '.join([item[0] for item in line]) - lines.append(line_text) - count_lines += 1 - row_index += 1 - # lines.append("\n") - row_index += 1 - # lines = lines[:-1] - - # get paragraphes boxes (par_boxes) - # get lines boxes (line_boxes) - par_boxes = list() - par_idx = 1 - line_boxes = list() - line_idx = 1 - for _, par in par_data.items(): - xmins, ymins, xmaxs, ymaxs = list(), list(), list(), list() - for _, line in par.items(): - xmin, ymin = line[0][1], line[0][2] - xmax, ymax = (line[-1][1] + line[-1][3]), (line[-1][2] + line[-1][4]) - line_boxes.append([int(xmin/factor), int(ymin/factor), int(xmax/factor), int(ymax/factor)]) - xmins.append(xmin) - ymins.append(ymin) - xmaxs.append(xmax) - ymaxs.append(ymax) - line_idx += 1 - xmin, ymin, xmax, ymax = min(xmins), min(ymins), max(xmaxs), max(ymaxs) - par_boxes.append([int(xmin/factor), int(ymin/factor), int(xmax/factor), int(ymax/factor)]) - par_idx += 1 - - return lines, row_indexes, par_boxes, line_boxes #data, par_data # - -# rescale image to get 300dpi -def set_image_dpi_resize(image): - """ - Rescaling image to 300dpi while resizing - :param image: An image - :return: A rescaled image - """ - length_x, width_y = image.size - factor = min(1, float(1024.0 / length_x)) - size = int(factor * length_x), int(factor * width_y) - image_resize = image.resize(size, Image.Resampling.LANCZOS) - temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='1.png') - temp_filename = temp_file.name - image_resize.save(temp_filename, dpi=(300, 300)) - return factor, temp_filename - -# it is important that each bounding box should be in (upper left, lower right) format. -# source: https://github.com/NielsRogge/Transformers-Tutorials/issues/129 -def upperleft_to_lowerright(bbox): - x0, y0, x1, y1 = tuple(bbox) - if bbox[2] < bbox[0]: - x0 = bbox[2] - x1 = bbox[0] - if bbox[3] < bbox[1]: - y0 = bbox[3] - y1 = bbox[1] - return [x0, y0, x1, y1] - -# convert boundings boxes (left, top, width, height) format to (left, top, left+widght, top+height) format. -def convert_box(bbox): - x, y, w, h = tuple(bbox) # the row comes in (left, top, width, height) format - return [x, y, x+w, y+h] # we turn it into (left, top, left+widght, top+height) to get the actual box - -# LiLT model gets 1000x10000 pixels images -def normalize_box(bbox, width, height): - return [ - int(1000 * (bbox[0] / width)), - int(1000 * (bbox[1] / height)), - int(1000 * (bbox[2] / width)), - int(1000 * (bbox[3] / height)), - ] - -# LiLT model gets 1000x10000 pixels images -def denormalize_box(bbox, width, height): - return [ - int(width * (bbox[0] / 1000)), - int(height * (bbox[1] / 1000)), - int(width* (bbox[2] / 1000)), - int(height * (bbox[3] / 1000)), - ] - -# get back original size -def original_box(box, original_width, original_height, coco_width, coco_height): - return [ - int(original_width * (box[0] / coco_width)), - int(original_height * (box[1] / coco_height)), - int(original_width * (box[2] / coco_width)), - int(original_height* (box[3] / coco_height)), - ] - -def get_blocks(bboxes_block, categories, texts): - - # get list of unique block boxes - bbox_block_dict, bboxes_block_list, bbox_block_prec = dict(), list(), list() - for count_block, bbox_block in enumerate(bboxes_block): - if bbox_block != bbox_block_prec: - bbox_block_indexes = [i for i, bbox in enumerate(bboxes_block) if bbox == bbox_block] - bbox_block_dict[count_block] = bbox_block_indexes - bboxes_block_list.append(bbox_block) - bbox_block_prec = bbox_block - - # get list of categories and texts by unique block boxes - category_block_list, text_block_list = list(), list() - for bbox_block in bboxes_block_list: - count_block = bboxes_block.index(bbox_block) - bbox_block_indexes = bbox_block_dict[count_block] - category_block = np.array(categories, dtype=object)[bbox_block_indexes].tolist()[0] - category_block_list.append(category_block) - text_block = np.array(texts, dtype=object)[bbox_block_indexes].tolist() - text_block = [text.replace("\n","").strip() for text in text_block] - if id2label[category_block] == "Text" or id2label[category_block] == "Caption" or id2label[category_block] == "Footnote": - text_block = ' '.join(text_block) - else: - text_block = '\n'.join(text_block) - text_block_list.append(text_block) - - return bboxes_block_list, category_block_list, text_block_list - -# function to sort bounding boxes -def get_sorted_boxes(bboxes): - - # sort by y from page top to bottom - sorted_bboxes = sorted(bboxes, key=itemgetter(1), reverse=False) - y_list = [bbox[1] for bbox in sorted_bboxes] - - # sort by x from page left to right when boxes with same y - if len(list(set(y_list))) != len(y_list): - y_list_duplicates_indexes = dict() - y_list_duplicates = [item for item, count in collections.Counter(y_list).items() if count > 1] - for item in y_list_duplicates: - y_list_duplicates_indexes[item] = [i for i, e in enumerate(y_list) if e == item] - bbox_list_y_duplicates = sorted(np.array(sorted_bboxes, dtype=object)[y_list_duplicates_indexes[item]].tolist(), key=itemgetter(0), reverse=False) - np_array_bboxes = np.array(sorted_bboxes) - np_array_bboxes[y_list_duplicates_indexes[item]] = np.array(bbox_list_y_duplicates) - sorted_bboxes = np_array_bboxes.tolist() - - return sorted_bboxes - -# sort data from y = 0 to end of page (and after, x=0 to end of page when necessary) -def sort_data(bboxes, categories, texts): - - sorted_bboxes = get_sorted_boxes(bboxes) - sorted_bboxes_indexes = [bboxes.index(bbox) for bbox in sorted_bboxes] - sorted_categories = np.array(categories, dtype=object)[sorted_bboxes_indexes].tolist() - sorted_texts = np.array(texts, dtype=object)[sorted_bboxes_indexes].tolist() - - return sorted_bboxes, sorted_categories, sorted_texts - -# sort data from y = 0 to end of page (and after, x=0 to end of page when necessary) -def sort_data_wo_labels(bboxes, texts): - - sorted_bboxes = get_sorted_boxes(bboxes) - sorted_bboxes_indexes = [bboxes.index(bbox) for bbox in sorted_bboxes] - sorted_texts = np.array(texts, dtype=object)[sorted_bboxes_indexes].tolist() - - return sorted_bboxes, sorted_texts - -## PDF processing - -# get filename and images of PDF pages -def pdf_to_images(uploaded_pdf): - - # Check if None object - if uploaded_pdf is None: - path_to_file = pdf_blank - filename = path_to_file.replace(examples_dir,"") - msg = "Invalid PDF file." - images = [Image.open(image_blank)] - else: - # path to the uploaded PDF - path_to_file = uploaded_pdf.name - filename = path_to_file.replace("/tmp/","") - - try: - PdfReader(path_to_file) - except PdfReadError: - path_to_file = pdf_blank - filename = path_to_file.replace(examples_dir,"") - msg = "Invalid PDF file." - images = [Image.open(image_blank)] - else: - try: - images = convert_from_path(path_to_file, last_page=max_imgboxes) - num_imgs = len(images) - msg = f'The PDF "{filename}" was converted into {num_imgs} images.' - except: - msg = f'Error with the PDF "{filename}": it was not converted into images.' - images = [Image.open(image_wo_content)] - - return filename, msg, images - -# Extraction of image data (text and bounding boxes) -def extraction_data_from_image(images): - - num_imgs = len(images) - - if num_imgs > 0: - - # https://pyimagesearch.com/2021/11/15/tesseract-page-segmentation-modes-psms-explained-how-to-improve-your-ocr-accuracy/ - custom_config = r'--oem 3 --psm 3 -l eng' # default config PyTesseract: --oem 3 --psm 3 -l eng+deu+fra+jpn+por+spa+rus+hin+chi_sim - results, lines, row_indexes, par_boxes, line_boxes = dict(), dict(), dict(), dict(), dict() - images_ids_list, lines_list, par_boxes_list, line_boxes_list, images_list, page_no_list, num_pages_list = list(), list(), list(), list(), list(), list(), list() - - try: - for i,image in enumerate(images): - # image preprocessing - # https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.html - img = image.copy() - factor, path_to_img = set_image_dpi_resize(img) # Rescaling to 300dpi - img = Image.open(path_to_img) - img = np.array(img, dtype='uint8') # convert PIL to cv2 - img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # gray scale image - ret,img = cv2.threshold(img,127,255,cv2.THRESH_BINARY) - - # OCR PyTesseract | get langs of page - txt = pytesseract.image_to_string(img, config=custom_config) - txt = txt.strip().lower() - txt = re.sub(r" +", " ", txt) # multiple space - txt = re.sub(r"(\n\s*)+\n+", "\n", txt) # multiple line - # txt = os.popen(f'tesseract {img_filepath} - {custom_config}').read() - try: - langs = detect_langs(txt) - langs = [langdetect2Tesseract[langs[i].lang] for i in range(len(langs))] - langs_string = '+'.join(langs) - except: - langs_string = "eng" - langs_string += '+osd' - custom_config = f'--oem 3 --psm 3 -l {langs_string}' # default config PyTesseract: --oem 3 --psm 3 - - # OCR PyTesseract | get data - results[i] = pytesseract.image_to_data(img, config=custom_config, output_type=pytesseract.Output.DICT) - # results[i] = os.popen(f'tesseract {img_filepath} - {custom_config}').read() - - lines[i], row_indexes[i], par_boxes[i], line_boxes[i] = get_data(results[i], factor, conf_min=0) - lines_list.append(lines[i]) - par_boxes_list.append(par_boxes[i]) - line_boxes_list.append(line_boxes[i]) - images_ids_list.append(i) - images_list.append(images[i]) - page_no_list.append(i) - num_pages_list.append(num_imgs) - - except: - print(f"There was an error within the extraction of PDF text by the OCR!") - else: - from datasets import Dataset - dataset = Dataset.from_dict({"images_ids": images_ids_list, "images": images_list, "page_no": page_no_list, "num_pages": num_pages_list, "texts": lines_list, "bboxes_line": line_boxes_list}) - - # print(f"The text data was successfully extracted by the OCR!") - - return dataset, lines, row_indexes, par_boxes, line_boxes - -## Inference - -def prepare_inference_features(example, cls_box = cls_box, sep_box = sep_box): - - images_ids_list, chunks_ids_list, input_ids_list, attention_mask_list, bb_list = list(), list(), list(), list(), list() - - # get batch - batch_images_ids = example["images_ids"] - batch_images = example["images"] - batch_bboxes_line = example["bboxes_line"] - batch_texts = example["texts"] - batch_images_size = [image.size for image in batch_images] - - batch_width, batch_height = [image_size[0] for image_size in batch_images_size], [image_size[1] for image_size in batch_images_size] - - # add a dimension if not a batch but only one image - if not isinstance(batch_images_ids, list): - batch_images_ids = [batch_images_ids] - batch_images = [batch_images] - batch_bboxes_line = [batch_bboxes_line] - batch_texts = [batch_texts] - batch_width, batch_height = [batch_width], [batch_height] - - # process all images of the batch - for num_batch, (image_id, boxes, texts, width, height) in enumerate(zip(batch_images_ids, batch_bboxes_line, batch_texts, batch_width, batch_height)): - tokens_list = [] - bboxes_list = [] - - # add a dimension if only on image - if not isinstance(texts, list): - texts, boxes = [texts], [boxes] - - # convert boxes to original - normalize_bboxes_line = [normalize_box(upperleft_to_lowerright(box), width, height) for box in boxes] - - # sort boxes with texts - # we want sorted lists from top to bottom of the image - boxes, texts = sort_data_wo_labels(normalize_bboxes_line, texts) - - count = 0 - for box, text in zip(boxes, texts): - tokens = tokenizer.tokenize(text) - num_tokens = len(tokens) # get number of tokens - tokens_list.extend(tokens) - - bboxes_list.extend([box] * num_tokens) # number of boxes must be the same as the number of tokens - - # use of return_overflowing_tokens=True / stride=doc_stride - # to get parts of image with overlap - # source: https://huggingface.co/course/chapter6/3b?fw=tf#handling-long-contexts - encodings = tokenizer(" ".join(texts), - truncation=True, - padding="max_length", - max_length=max_length, - stride=doc_stride, - return_overflowing_tokens=True, - return_offsets_mapping=True - ) - - otsm = encodings.pop("overflow_to_sample_mapping") - offset_mapping = encodings.pop("offset_mapping") - - # Let's label those examples and get their boxes - sequence_length_prev = 0 - for i, offsets in enumerate(offset_mapping): - # truncate tokens, boxes and labels based on length of chunk - 2 (special tokens and ) - sequence_length = len(encodings.input_ids[i]) - 2 - if i == 0: start = 0 - else: start += sequence_length_prev - doc_stride - end = start + sequence_length - sequence_length_prev = sequence_length - - # get tokens, boxes and labels of this image chunk - bb = [cls_box] + bboxes_list[start:end] + [sep_box] - - # as the last chunk can have a length < max_length - # we must to add [tokenizer.pad_token] (tokens), [sep_box] (boxes) and [-100] (labels) - if len(bb) < max_length: - bb = bb + [sep_box] * (max_length - len(bb)) - - # append results - input_ids_list.append(encodings["input_ids"][i]) - attention_mask_list.append(encodings["attention_mask"][i]) - bb_list.append(bb) - images_ids_list.append(image_id) - chunks_ids_list.append(i) - - return { - "images_ids": images_ids_list, - "chunk_ids": chunks_ids_list, - "input_ids": input_ids_list, - "attention_mask": attention_mask_list, - "normalized_bboxes": bb_list, - } - -from torch.utils.data import Dataset - -class CustomDataset(Dataset): - def __init__(self, dataset, tokenizer): - self.dataset = dataset - self.tokenizer = tokenizer - - def __len__(self): - return len(self.dataset) - - def __getitem__(self, idx): - # get item - example = self.dataset[idx] - encoding = dict() - encoding["images_ids"] = example["images_ids"] - encoding["chunk_ids"] = example["chunk_ids"] - encoding["input_ids"] = example["input_ids"] - encoding["attention_mask"] = example["attention_mask"] - encoding["bbox"] = example["normalized_bboxes"] - - return encoding - -import torch.nn.functional as F - -# get predictions at token level -def predictions_token_level(images, custom_encoded_dataset): - - num_imgs = len(images) - if num_imgs > 0: - - chunk_ids, input_ids, bboxes, outputs, token_predictions = dict(), dict(), dict(), dict(), dict() - images_ids_list = list() - - for i,encoding in enumerate(custom_encoded_dataset): - - # get custom encoded data - image_id = encoding['images_ids'] - chunk_id = encoding['chunk_ids'] - input_id = torch.tensor(encoding['input_ids'])[None] - attention_mask = torch.tensor(encoding['attention_mask'])[None] - bbox = torch.tensor(encoding['bbox'])[None] - - # save data in dictionnaries - if image_id not in images_ids_list: images_ids_list.append(image_id) - - if image_id in chunk_ids: chunk_ids[image_id].append(chunk_id) - else: chunk_ids[image_id] = [chunk_id] - - if image_id in input_ids: input_ids[image_id].append(input_id) - else: input_ids[image_id] = [input_id] - - if image_id in bboxes: bboxes[image_id].append(bbox) - else: bboxes[image_id] = [bbox] - - # get prediction with forward pass - with torch.no_grad(): - output = model( - input_ids=input_id, - attention_mask=attention_mask, - bbox=bbox - ) - - # save probabilities of predictions in dictionnary - if image_id in outputs: outputs[image_id].append(F.softmax(output.logits.squeeze(), dim=-1)) - else: outputs[image_id] = [F.softmax(output.logits.squeeze(), dim=-1)] - - return outputs, images_ids_list, chunk_ids, input_ids, bboxes - - else: - print("An error occurred while getting predictions!") - -from functools import reduce - -# Get predictions (line level) -def predictions_line_level(dataset, outputs, images_ids_list, chunk_ids, input_ids, bboxes): - - ten_probs_dict, ten_input_ids_dict, ten_bboxes_dict = dict(), dict(), dict() - bboxes_list_dict, input_ids_dict_dict, probs_dict_dict, df = dict(), dict(), dict(), dict() - - if len(images_ids_list) > 0: - - for i, image_id in enumerate(images_ids_list): - - # get image information - images_list = dataset.filter(lambda example: example["images_ids"] == image_id)["images"] - image = images_list[0] - width, height = image.size - - # get data - chunk_ids_list = chunk_ids[image_id] - outputs_list = outputs[image_id] - input_ids_list = input_ids[image_id] - bboxes_list = bboxes[image_id] - - # create zeros tensors - ten_probs = torch.zeros((outputs_list[0].shape[0] - 2)*len(outputs_list), outputs_list[0].shape[1]) - ten_input_ids = torch.ones(size=(1, (outputs_list[0].shape[0] - 2)*len(outputs_list)), dtype =int) - ten_bboxes = torch.zeros(size=(1, (outputs_list[0].shape[0] - 2)*len(outputs_list), 4), dtype =int) - - if len(outputs_list) > 1: - - for num_output, (output, input_id, bbox) in enumerate(zip(outputs_list, input_ids_list, bboxes_list)): - start = num_output*(max_length - 2) - max(0,num_output)*doc_stride - end = start + (max_length - 2) - - if num_output == 0: - ten_probs[start:end,:] += output[1:-1] - ten_input_ids[:,start:end] = input_id[:,1:-1] - ten_bboxes[:,start:end,:] = bbox[:,1:-1,:] - else: - ten_probs[start:start + doc_stride,:] += output[1:1 + doc_stride] - ten_probs[start:start + doc_stride,:] = ten_probs[start:start + doc_stride,:] * 0.5 - ten_probs[start + doc_stride:end,:] += output[1 + doc_stride:-1] - - ten_input_ids[:,start:start + doc_stride] = input_id[:,1:1 + doc_stride] - ten_input_ids[:,start + doc_stride:end] = input_id[:,1 + doc_stride:-1] - - ten_bboxes[:,start:start + doc_stride,:] = bbox[:,1:1 + doc_stride,:] - ten_bboxes[:,start + doc_stride:end,:] = bbox[:,1 + doc_stride:-1,:] - - else: - ten_probs += outputs_list[0][1:-1] - ten_input_ids = input_ids_list[0][:,1:-1] - ten_bboxes = bboxes_list[0][:,1:-1] - - ten_probs_list, ten_input_ids_list, ten_bboxes_list = ten_probs.tolist(), ten_input_ids.tolist()[0], ten_bboxes.tolist()[0] - bboxes_list = list() - input_ids_dict, probs_dict = dict(), dict() - bbox_prev = [-100, -100, -100, -100] - for probs, input_id, bbox in zip(ten_probs_list, ten_input_ids_list, ten_bboxes_list): - bbox = denormalize_box(bbox, width, height) - if bbox != bbox_prev and bbox != cls_box and bbox != sep_box and bbox[0] != bbox[2] and bbox[1] != bbox[3]: - bboxes_list.append(bbox) - input_ids_dict[str(bbox)] = [input_id] - probs_dict[str(bbox)] = [probs] - elif bbox != cls_box and bbox != sep_box and bbox[0] != bbox[2] and bbox[1] != bbox[3]: - input_ids_dict[str(bbox)].append(input_id) - probs_dict[str(bbox)].append(probs) - bbox_prev = bbox - - probs_bbox = dict() - for i,bbox in enumerate(bboxes_list): - probs = probs_dict[str(bbox)] - probs = np.array(probs).T.tolist() - - probs_label = list() - for probs_list in probs: - prob_label = reduce(lambda x, y: x*y, probs_list) - prob_label = prob_label**(1./(len(probs_list))) # normalization - probs_label.append(prob_label) - max_value = max(probs_label) - max_index = probs_label.index(max_value) - probs_bbox[str(bbox)] = max_index - - bboxes_list_dict[image_id] = bboxes_list - input_ids_dict_dict[image_id] = input_ids_dict - probs_dict_dict[image_id] = probs_bbox - - df[image_id] = pd.DataFrame() - df[image_id]["bboxes"] = bboxes_list - df[image_id]["texts"] = [tokenizer.decode(input_ids_dict[str(bbox)]) for bbox in bboxes_list] - df[image_id]["labels"] = [id2label[probs_bbox[str(bbox)]] for bbox in bboxes_list] - - return probs_bbox, bboxes_list_dict, input_ids_dict_dict, probs_dict_dict, df - - else: - print("An error occurred while getting predictions!") - -# Get labeled images with lines bounding boxes -def get_labeled_images(dataset, images_ids_list, bboxes_list_dict, probs_dict_dict): - - labeled_images = list() - - for i, image_id in enumerate(images_ids_list): - - # get image - images_list = dataset.filter(lambda example: example["images_ids"] == image_id)["images"] - image = images_list[0] - width, height = image.size - - # get predicted boxes and labels - bboxes_list = bboxes_list_dict[image_id] - probs_bbox = probs_dict_dict[image_id] - - draw = ImageDraw.Draw(image) - # https://stackoverflow.com/questions/66274858/choosing-a-pil-imagefont-by-font-name-rather-than-filename-and-cross-platform-f - font = font_manager.FontProperties(family='sans-serif', weight='bold') - font_file = font_manager.findfont(font) - font_size = 30 - font = ImageFont.truetype(font_file, font_size) - - for bbox in bboxes_list: - predicted_label = id2label[probs_bbox[str(bbox)]] - draw.rectangle(bbox, outline=label2color[predicted_label]) - draw.text((bbox[0] + 10, bbox[1] - font_size), text=predicted_label, fill=label2color[predicted_label], font=font) - - labeled_images.append(image) - - return labeled_images - -# get data of encoded chunk -def get_encoded_chunk_inference(index_chunk=None): - - # get datasets - example = dataset - encoded_example = encoded_dataset - - # get randomly a document in dataset - if index_chunk == None: index_chunk = random.randint(0, len(encoded_example)-1) - encoded_example = encoded_example[index_chunk] - encoded_image_ids = encoded_example["images_ids"] - - # get the image - example = example.filter(lambda example: example["images_ids"] == encoded_image_ids)[0] - image = example["images"] # original image - width, height = image.size - page_no = example["page_no"] - num_pages = example["num_pages"] - - # get boxes, texts, categories - bboxes, input_ids = encoded_example["normalized_bboxes"][1:-1], encoded_example["input_ids"][1:-1] - bboxes = [denormalize_box(bbox, width, height) for bbox in bboxes] - num_tokens = len(input_ids) + 2 - - # get unique bboxes and corresponding labels - bboxes_list, input_ids_list = list(), list() - input_ids_dict = dict() - bbox_prev = [-100, -100, -100, -100] - for i, (bbox, input_id) in enumerate(zip(bboxes, input_ids)): - if bbox != bbox_prev: - bboxes_list.append(bbox) - input_ids_dict[str(bbox)] = [input_id] - else: - input_ids_dict[str(bbox)].append(input_id) - - # start_indexes_list.append(i) - bbox_prev = bbox - - # do not keep "..." - if input_ids_dict[str(bboxes_list[-1])][0] == (tokenizer.convert_tokens_to_ids('
        ')): - del input_ids_dict[str(bboxes_list[-1])] - bboxes_list = bboxes_list[:-1] - - # get texts by line - input_ids_list = input_ids_dict.values() - texts_list = [tokenizer.decode(input_ids) for input_ids in input_ids_list] - - # display DataFrame - df = pd.DataFrame({"texts": texts_list, "input_ids": input_ids_list, "bboxes": bboxes_list}) - - return image, df, num_tokens, page_no, num_pages - -# display chunk of PDF image and its data -def display_chunk_lines_inference(index_chunk=None): - - # get image and image data - image, df, num_tokens, page_no, num_pages = get_encoded_chunk_inference(index_chunk=index_chunk) - - # get data from dataframe - input_ids = df["input_ids"] - texts = df["texts"] - bboxes = df["bboxes"] - - print(f'Chunk ({num_tokens} tokens) of the PDF (page: {page_no+1} / {num_pages})\n') - - # display image with bounding boxes - print(">> PDF image with bounding boxes of lines\n") - draw = ImageDraw.Draw(image) - - labels = list() - for box, text in zip(bboxes, texts): - color = "red" - draw.rectangle(box, outline=color) - - # resize image to original - width, height = image.size - image = image.resize((int(0.5*width), int(0.5*height))) - - # convert to cv and display - img = np.array(image, dtype='uint8') # PIL to cv2 - cv2_imshow(img) - cv2.waitKey(0) - - # display image dataframe - print("\n>> Dataframe of annotated lines\n") - cols = ["texts", "bboxes"] - df = df[cols] - display(df) \ No newline at end of file diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/sjisprober.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/sjisprober.py deleted file mode 100644 index 91df077961b6310b8e1c708b74003d5343bff6a8..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/sjisprober.py +++ /dev/null @@ -1,105 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from typing import Union - -from .chardistribution import SJISDistributionAnalysis -from .codingstatemachine import CodingStateMachine -from .enums import MachineState, ProbingState -from .jpcntx import SJISContextAnalysis -from .mbcharsetprober import MultiByteCharSetProber -from .mbcssm import SJIS_SM_MODEL - - -class SJISProber(MultiByteCharSetProber): - def __init__(self) -> None: - super().__init__() - self.coding_sm = CodingStateMachine(SJIS_SM_MODEL) - self.distribution_analyzer = SJISDistributionAnalysis() - self.context_analyzer = SJISContextAnalysis() - self.reset() - - def reset(self) -> None: - super().reset() - self.context_analyzer.reset() - - @property - def charset_name(self) -> str: - return self.context_analyzer.charset_name - - @property - def language(self) -> str: - return "Japanese" - - def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState: - assert self.coding_sm is not None - assert self.distribution_analyzer is not None - - for i, byte in enumerate(byte_str): - coding_state = self.coding_sm.next_state(byte) - if coding_state == MachineState.ERROR: - self.logger.debug( - "%s %s prober hit error at byte %s", - self.charset_name, - self.language, - i, - ) - self._state = ProbingState.NOT_ME - break - if coding_state == MachineState.ITS_ME: - self._state = ProbingState.FOUND_IT - break - if coding_state == MachineState.START: - char_len = self.coding_sm.get_current_charlen() - if i == 0: - self._last_char[1] = byte - self.context_analyzer.feed( - self._last_char[2 - char_len :], char_len - ) - self.distribution_analyzer.feed(self._last_char, char_len) - else: - self.context_analyzer.feed( - byte_str[i + 1 - char_len : i + 3 - char_len], char_len - ) - self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len) - - self._last_char[0] = byte_str[-1] - - if self.state == ProbingState.DETECTING: - if self.context_analyzer.got_enough_data() and ( - self.get_confidence() > self.SHORTCUT_THRESHOLD - ): - self._state = ProbingState.FOUND_IT - - return self.state - - def get_confidence(self) -> float: - assert self.distribution_analyzer is not None - - context_conf = self.context_analyzer.get_confidence() - distrib_conf = self.distribution_analyzer.get_confidence() - return max(context_conf, distrib_conf) diff --git a/spaces/podsni/twitter_sentiment_id/app.py b/spaces/podsni/twitter_sentiment_id/app.py deleted file mode 100644 index b1aea3f2b964b2d5cca3fd723a0d94dc8238dd05..0000000000000000000000000000000000000000 --- a/spaces/podsni/twitter_sentiment_id/app.py +++ /dev/null @@ -1,139 +0,0 @@ -import streamlit as st - -import streamlit as st -import pandas as pd -import script.functions as fn -import plotly.express as px -import matplotlib.pyplot as plt -# import text_proc in script folder -import script.text_proc as tp -from sentence_transformers import SentenceTransformer - - -st.set_page_config( - page_title="twitter sentiment analysis", - page_icon="👋", -) - -st.sidebar.markdown("📚 Twitter Sentiment Analysis App") - -# Load data -# add tiwtter logo inside title -st.markdown("

        📚 Twitter Sentiment Analysis App

        ", unsafe_allow_html=True) -st.write("Aplikasi sederhana untuk melakukan analisis sentimen terhadap tweet yang diinputkan dan mengekstrak topik dari setiap sentimen.") -# streamlit selectbox simple and advanced - -sb1,sb2 = st.columns([2,4]) -with sb1: - option = st.selectbox('Pilih Mode Pencarian',('Simple','Advanced')) -with sb2: - option_model = st.selectbox('Pilih Model',("IndoBERT (Accurate,Slow)",'Naive Bayes','Logistic Regression (Less Accurate,Fast)','XGBoost','Catboost','SVM','Random Forest')) - -if option == 'Simple': -# create col1 and col2 - col1, col2 = st.columns([3,2]) - with col1: - input = st.text_input("Masukkan User/Hastag", "@traveloka") - with col2: - length = st.number_input("Jumlah Tweet", 10, 10500, 100) -else : - col1, col2 = st.columns([3,1]) - with col1: - input = st.text_input("Masukkan Parameter Pencarian", "(to:@traveloka AND @traveloka) -filter:links filter:replies lang:id") - with col2: - length = st.number_input("Jumlah Tweet", 10, 10500, 100) - st.caption("anda bisa menggunakan parameter pencarian yang lebih spesifik, parameter ini sama dengan paremeter pencarian di twitter") - -submit = st.button("🔍Cari Tweet") - -st.caption("semakin banyak tweet yang diambil maka semakin lama proses analisis sentimen") - -if submit: - with st.spinner('Mengambil data dari twitter... (1/2)'): - df = fn.get_tweets(input, length, option) - with st.spinner('Melakukan Prediksi Sentimen... (2/2)'): - df = fn.get_sentiment(df,option_model) - df.to_csv('assets/data.csv',index=False) - # plot - st.write("Preview Dataset",unsafe_allow_html=True) - def color_sentiment(val): - color_dict = {"positif": "#00cc96", "negatif": "#ef553b","netral": "#636efa"} - return f'color: {color_dict[val]}' - st.dataframe(df.style.applymap(color_sentiment, subset=['sentiment']),use_container_width=True,height = 200) - # st.dataframe(df,use_container_width=True,height = 200) - st.write ("Jumlah Tweet: ",df.shape[0]) - # download datasets - - - st.write("

        📊 Analisis Sentimen

        ",unsafe_allow_html=True) - col_fig1, col_fig2 = st.columns([4,3]) - with col_fig1: - with st.spinner('Sedang Membuat Grafik...'): - st.write("Jumlah Tweet Tiap Sentiment",unsafe_allow_html=True) - fig_1 = fn.get_bar_chart(df) - st.plotly_chart(fig_1,use_container_width=True,theme="streamlit") - with col_fig2: - st.write("Wordcloud Tiap Sentiment",unsafe_allow_html=True) - tab1,tab2,tab3 = st.tabs(["😞 negatif","😐 netral","😃 positif"]) - with tab1: - wordcloud_pos = tp.get_wordcloud(df,"negatif") - fig = plt.figure(figsize=(10, 5)) - plt.imshow(wordcloud_pos, interpolation="bilinear") - plt.axis("off") - st.pyplot(fig) - with tab2: - wordcloud_neg = tp.get_wordcloud(df,"netral") - fig = plt.figure(figsize=(10, 5)) - plt.imshow(wordcloud_neg, interpolation="bilinear") - plt.axis("off") - st.pyplot(fig) - with tab3: - wordcloud_net = tp.get_wordcloud(df,"positif") - fig = plt.figure(figsize=(10, 5)) - plt.imshow(wordcloud_net, interpolation="bilinear") - plt.axis("off") - st.pyplot(fig) - st.write("

        ✨ Sentiment Clustering

        ",unsafe_allow_html=True) - @st.experimental_singleton - def load_sentence_model(): - embedding_model = SentenceTransformer('sentence_bert') - return embedding_model - embedding_model = load_sentence_model() - tab4,tab5,tab6 = st.tabs(["😞 negatif","😐 netral","😃 positif"]) - with tab4: - if len(df[df["sentiment"]=="negatif"]) < 11: - st.write("Tweet Terlalu Sedikit, Tidak dapat melakukan clustering") - st.write(df[df["sentiment"]=="negatif"]) - else: - with st.spinner('Sedang Membuat Grafik...(1/2)'): - text,data,fig = tp.plot_text(df,"negatif",embedding_model) - st.plotly_chart(fig,use_container_width=True,theme=None) - with st.spinner('Sedang Mengekstrak Topik... (2/2)'): - fig,topic_modelling = tp.topic_modelling(text,data) - st.plotly_chart(fig,use_container_width=True,theme="streamlit") - with tab5: - if len(df[df["sentiment"]=="netral"]) < 11: - st.write("Tweet Terlalu Sedikit, Tidak dapat melakukan clustering") - st.write(df[df["sentiment"]=="netral"]) - else: - with st.spinner('Sedang Membuat Grafik... (1/2)'): - text,data,fig = tp.plot_text(df,"netral",embedding_model) - st.plotly_chart(fig,use_container_width=True,theme=None) - with st.spinner('Sedang Mengekstrak Topik... (2/2)'): - fig,topic_modelling = tp.topic_modelling(text,data) - st.plotly_chart(fig,use_container_width=True,theme="streamlit") - with tab6: - if len(df[df["sentiment"]=="positif"]) < 11: - st.write("Tweet Terlalu Sedikit, Tidak dapat melakukan clustering") - st.write(df[df["sentiment"]=="positif"]) - else: - with st.spinner('Sedang Membuat Grafik...(1/2)'): - text,data,fig = tp.plot_text(df,"positif",embedding_model) - st.plotly_chart(fig,use_container_width=True,theme=None) - with st.spinner('Sedang Mengekstrak Topik... (2/2)'): - fig,topic_modelling = tp.topic_modelling(text,data) - st.plotly_chart(fig,use_container_width=True,theme="streamlit") - - - - diff --git a/spaces/pongping/converter/README.md b/spaces/pongping/converter/README.md deleted file mode 100644 index a5a0253b7fd9e78b7db2cefae10173fbc0288ce2..0000000000000000000000000000000000000000 --- a/spaces/pongping/converter/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Converter -emoji: ♻ -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.10.1 -app_file: app.py -pinned: false -license: mit -duplicated_from: camenduru/converter ---- diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/varLib/__main__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/varLib/__main__.py deleted file mode 100644 index 56fab06e0fe6ac22fce428209c373ecb82d8472a..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/varLib/__main__.py +++ /dev/null @@ -1,6 +0,0 @@ -import sys -from fontTools.varLib import main - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/mpl_toolkits/axisartist/parasite_axes.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/mpl_toolkits/axisartist/parasite_axes.py deleted file mode 100644 index 4ebd6acc03be2dbb0f5c3360ede2a6a36a3be01b..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/mpl_toolkits/axisartist/parasite_axes.py +++ /dev/null @@ -1,7 +0,0 @@ -from mpl_toolkits.axes_grid1.parasite_axes import ( - host_axes_class_factory, parasite_axes_class_factory) -from .axislines import Axes - - -ParasiteAxes = parasite_axes_class_factory(Axes) -HostAxes = SubplotHost = host_axes_class_factory(Axes) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_ops.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_ops.py deleted file mode 100644 index 95ebe8528c2e5fec1a580b00bd79e0617fe7609f..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_ops.py +++ /dev/null @@ -1,27 +0,0 @@ -import pandas as pd -import pandas._testing as tm - - -class TestUnaryOps: - def test_invert(self): - a = pd.array([True, False, None], dtype="boolean") - expected = pd.array([False, True, None], dtype="boolean") - tm.assert_extension_array_equal(~a, expected) - - expected = pd.Series(expected, index=["a", "b", "c"], name="name") - result = ~pd.Series(a, index=["a", "b", "c"], name="name") - tm.assert_series_equal(result, expected) - - df = pd.DataFrame({"A": a, "B": [True, False, False]}, index=["a", "b", "c"]) - result = ~df - expected = pd.DataFrame( - {"A": expected, "B": [False, True, True]}, index=["a", "b", "c"] - ) - tm.assert_frame_equal(result, expected) - - def test_abs(self): - # matching numpy behavior, abs is the identity function - arr = pd.array([True, False, None], dtype="boolean") - result = abs(arr) - - tm.assert_extension_array_equal(result, arr) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/period/test_indexing.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/period/test_indexing.py deleted file mode 100644 index c0c6f3c977ceb98c06ae391e376210c9a980d3aa..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/period/test_indexing.py +++ /dev/null @@ -1,815 +0,0 @@ -from datetime import datetime -import re - -import numpy as np -import pytest - -from pandas._libs.tslibs import period as libperiod -from pandas.errors import InvalidIndexError - -import pandas as pd -from pandas import ( - DatetimeIndex, - NaT, - Period, - PeriodIndex, - Series, - Timedelta, - date_range, - notna, - period_range, -) -import pandas._testing as tm - -dti4 = date_range("2016-01-01", periods=4) -dti = dti4[:-1] -rng = pd.Index(range(3)) - - -@pytest.fixture( - params=[ - dti, - dti.tz_localize("UTC"), - dti.to_period("W"), - dti - dti[0], - rng, - pd.Index([1, 2, 3]), - pd.Index([2.0, 3.0, 4.0]), - pd.Index([4, 5, 6], dtype="u8"), - pd.IntervalIndex.from_breaks(dti4), - ] -) -def non_comparable_idx(request): - # All have length 3 - return request.param - - -class TestGetItem: - def test_getitem_slice_keeps_name(self): - idx = period_range("20010101", periods=10, freq="D", name="bob") - assert idx.name == idx[1:].name - - def test_getitem(self): - idx1 = period_range("2011-01-01", "2011-01-31", freq="D", name="idx") - - for idx in [idx1]: - result = idx[0] - assert result == Period("2011-01-01", freq="D") - - result = idx[-1] - assert result == Period("2011-01-31", freq="D") - - result = idx[0:5] - expected = period_range("2011-01-01", "2011-01-05", freq="D", name="idx") - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - assert result.freq == "D" - - result = idx[0:10:2] - expected = PeriodIndex( - ["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-07", "2011-01-09"], - freq="D", - name="idx", - ) - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - assert result.freq == "D" - - result = idx[-20:-5:3] - expected = PeriodIndex( - ["2011-01-12", "2011-01-15", "2011-01-18", "2011-01-21", "2011-01-24"], - freq="D", - name="idx", - ) - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - assert result.freq == "D" - - result = idx[4::-1] - expected = PeriodIndex( - ["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"], - freq="D", - name="idx", - ) - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - assert result.freq == "D" - - def test_getitem_index(self): - idx = period_range("2007-01", periods=10, freq="M", name="x") - - result = idx[[1, 3, 5]] - exp = PeriodIndex(["2007-02", "2007-04", "2007-06"], freq="M", name="x") - tm.assert_index_equal(result, exp) - - result = idx[[True, True, False, False, False, True, True, False, False, False]] - exp = PeriodIndex( - ["2007-01", "2007-02", "2007-06", "2007-07"], freq="M", name="x" - ) - tm.assert_index_equal(result, exp) - - def test_getitem_partial(self): - rng = period_range("2007-01", periods=50, freq="M") - ts = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) - - with pytest.raises(KeyError, match=r"^'2006'$"): - ts["2006"] - - result = ts["2008"] - assert (result.index.year == 2008).all() - - result = ts["2008":"2009"] - assert len(result) == 24 - - result = ts["2008-1":"2009-12"] - assert len(result) == 24 - - result = ts["2008Q1":"2009Q4"] - assert len(result) == 24 - - result = ts[:"2009"] - assert len(result) == 36 - - result = ts["2009":] - assert len(result) == 50 - 24 - - exp = result - result = ts[24:] - tm.assert_series_equal(exp, result) - - ts = pd.concat([ts[10:], ts[10:]]) - msg = "left slice bound for non-unique label: '2008'" - with pytest.raises(KeyError, match=msg): - ts[slice("2008", "2009")] - - def test_getitem_datetime(self): - rng = period_range(start="2012-01-01", periods=10, freq="W-MON") - ts = Series(range(len(rng)), index=rng) - - dt1 = datetime(2011, 10, 2) - dt4 = datetime(2012, 4, 20) - - rs = ts[dt1:dt4] - tm.assert_series_equal(rs, ts) - - def test_getitem_nat(self): - idx = PeriodIndex(["2011-01", "NaT", "2011-02"], freq="M") - assert idx[0] == Period("2011-01", freq="M") - assert idx[1] is NaT - - s = Series([0, 1, 2], index=idx) - assert s[NaT] == 1 - - s = Series(idx, index=idx) - assert s[Period("2011-01", freq="M")] == Period("2011-01", freq="M") - assert s[NaT] is NaT - - def test_getitem_list_periods(self): - # GH 7710 - rng = period_range(start="2012-01-01", periods=10, freq="D") - ts = Series(range(len(rng)), index=rng) - exp = ts.iloc[[1]] - tm.assert_series_equal(ts[[Period("2012-01-02", freq="D")]], exp) - - @pytest.mark.arm_slow - def test_getitem_seconds(self): - # GH#6716 - didx = date_range(start="2013/01/01 09:00:00", freq="S", periods=4000) - pidx = period_range(start="2013/01/01 09:00:00", freq="S", periods=4000) - - for idx in [didx, pidx]: - # getitem against index should raise ValueError - values = [ - "2014", - "2013/02", - "2013/01/02", - "2013/02/01 9H", - "2013/02/01 09:00", - ] - for val in values: - # GH7116 - # these show deprecations as we are trying - # to slice with non-integer indexers - with pytest.raises(IndexError, match="only integers, slices"): - idx[val] - - ser = Series(np.random.default_rng(2).random(len(idx)), index=idx) - tm.assert_series_equal(ser["2013/01/01 10:00"], ser[3600:3660]) - tm.assert_series_equal(ser["2013/01/01 9H"], ser[:3600]) - for d in ["2013/01/01", "2013/01", "2013"]: - tm.assert_series_equal(ser[d], ser) - - @pytest.mark.parametrize( - "idx_range", - [ - date_range, - period_range, - ], - ) - def test_getitem_day(self, idx_range): - # GH#6716 - # Confirm DatetimeIndex and PeriodIndex works identically - # getitem against index should raise ValueError - idx = idx_range(start="2013/01/01", freq="D", periods=400) - values = [ - "2014", - "2013/02", - "2013/01/02", - "2013/02/01 9H", - "2013/02/01 09:00", - ] - for val in values: - # GH7116 - # these show deprecations as we are trying - # to slice with non-integer indexers - with pytest.raises(IndexError, match="only integers, slices"): - idx[val] - - ser = Series(np.random.default_rng(2).random(len(idx)), index=idx) - tm.assert_series_equal(ser["2013/01"], ser[0:31]) - tm.assert_series_equal(ser["2013/02"], ser[31:59]) - tm.assert_series_equal(ser["2014"], ser[365:]) - - invalid = ["2013/02/01 9H", "2013/02/01 09:00"] - for val in invalid: - with pytest.raises(KeyError, match=val): - ser[val] - - -class TestGetLoc: - def test_get_loc_msg(self): - idx = period_range("2000-1-1", freq="A", periods=10) - bad_period = Period("2012", "A") - with pytest.raises(KeyError, match=r"^Period\('2012', 'A-DEC'\)$"): - idx.get_loc(bad_period) - - try: - idx.get_loc(bad_period) - except KeyError as inst: - assert inst.args[0] == bad_period - - def test_get_loc_nat(self): - didx = DatetimeIndex(["2011-01-01", "NaT", "2011-01-03"]) - pidx = PeriodIndex(["2011-01-01", "NaT", "2011-01-03"], freq="M") - - # check DatetimeIndex compat - for idx in [didx, pidx]: - assert idx.get_loc(NaT) == 1 - assert idx.get_loc(None) == 1 - assert idx.get_loc(float("nan")) == 1 - assert idx.get_loc(np.nan) == 1 - - def test_get_loc(self): - # GH 17717 - p0 = Period("2017-09-01") - p1 = Period("2017-09-02") - p2 = Period("2017-09-03") - - # get the location of p1/p2 from - # monotonic increasing PeriodIndex with non-duplicate - idx0 = PeriodIndex([p0, p1, p2]) - expected_idx1_p1 = 1 - expected_idx1_p2 = 2 - - assert idx0.get_loc(p1) == expected_idx1_p1 - assert idx0.get_loc(str(p1)) == expected_idx1_p1 - assert idx0.get_loc(p2) == expected_idx1_p2 - assert idx0.get_loc(str(p2)) == expected_idx1_p2 - - msg = "Cannot interpret 'foo' as period" - with pytest.raises(KeyError, match=msg): - idx0.get_loc("foo") - with pytest.raises(KeyError, match=r"^1\.1$"): - idx0.get_loc(1.1) - - with pytest.raises(InvalidIndexError, match=re.escape(str(idx0))): - idx0.get_loc(idx0) - - # get the location of p1/p2 from - # monotonic increasing PeriodIndex with duplicate - idx1 = PeriodIndex([p1, p1, p2]) - expected_idx1_p1 = slice(0, 2) - expected_idx1_p2 = 2 - - assert idx1.get_loc(p1) == expected_idx1_p1 - assert idx1.get_loc(str(p1)) == expected_idx1_p1 - assert idx1.get_loc(p2) == expected_idx1_p2 - assert idx1.get_loc(str(p2)) == expected_idx1_p2 - - msg = "Cannot interpret 'foo' as period" - with pytest.raises(KeyError, match=msg): - idx1.get_loc("foo") - - with pytest.raises(KeyError, match=r"^1\.1$"): - idx1.get_loc(1.1) - - with pytest.raises(InvalidIndexError, match=re.escape(str(idx1))): - idx1.get_loc(idx1) - - # get the location of p1/p2 from - # non-monotonic increasing/decreasing PeriodIndex with duplicate - idx2 = PeriodIndex([p2, p1, p2]) - expected_idx2_p1 = 1 - expected_idx2_p2 = np.array([True, False, True]) - - assert idx2.get_loc(p1) == expected_idx2_p1 - assert idx2.get_loc(str(p1)) == expected_idx2_p1 - tm.assert_numpy_array_equal(idx2.get_loc(p2), expected_idx2_p2) - tm.assert_numpy_array_equal(idx2.get_loc(str(p2)), expected_idx2_p2) - - def test_get_loc_integer(self): - dti = date_range("2016-01-01", periods=3) - pi = dti.to_period("D") - with pytest.raises(KeyError, match="16801"): - pi.get_loc(16801) - - pi2 = dti.to_period("Y") # duplicates, ordinals are all 46 - with pytest.raises(KeyError, match="46"): - pi2.get_loc(46) - - def test_get_loc_invalid_string_raises_keyerror(self): - # GH#34240 - pi = period_range("2000", periods=3, name="A") - with pytest.raises(KeyError, match="A"): - pi.get_loc("A") - - ser = Series([1, 2, 3], index=pi) - with pytest.raises(KeyError, match="A"): - ser.loc["A"] - - with pytest.raises(KeyError, match="A"): - ser["A"] - - assert "A" not in ser - assert "A" not in pi - - def test_get_loc_mismatched_freq(self): - # see also test_get_indexer_mismatched_dtype testing we get analogous - # behavior for get_loc - dti = date_range("2016-01-01", periods=3) - pi = dti.to_period("D") - pi2 = dti.to_period("W") - pi3 = pi.view(pi2.dtype) # i.e. matching i8 representations - - with pytest.raises(KeyError, match="W-SUN"): - pi.get_loc(pi2[0]) - - with pytest.raises(KeyError, match="W-SUN"): - # even though we have matching i8 values - pi.get_loc(pi3[0]) - - -class TestGetIndexer: - def test_get_indexer(self): - # GH 17717 - p1 = Period("2017-09-01") - p2 = Period("2017-09-04") - p3 = Period("2017-09-07") - - tp0 = Period("2017-08-31") - tp1 = Period("2017-09-02") - tp2 = Period("2017-09-05") - tp3 = Period("2017-09-09") - - idx = PeriodIndex([p1, p2, p3]) - - tm.assert_numpy_array_equal( - idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp) - ) - - target = PeriodIndex([tp0, tp1, tp2, tp3]) - tm.assert_numpy_array_equal( - idx.get_indexer(target, "pad"), np.array([-1, 0, 1, 2], dtype=np.intp) - ) - tm.assert_numpy_array_equal( - idx.get_indexer(target, "backfill"), np.array([0, 1, 2, -1], dtype=np.intp) - ) - tm.assert_numpy_array_equal( - idx.get_indexer(target, "nearest"), np.array([0, 0, 1, 2], dtype=np.intp) - ) - - res = idx.get_indexer(target, "nearest", tolerance=Timedelta("1 day")) - tm.assert_numpy_array_equal(res, np.array([0, 0, 1, -1], dtype=np.intp)) - - def test_get_indexer_mismatched_dtype(self): - # Check that we return all -1s and do not raise or cast incorrectly - - dti = date_range("2016-01-01", periods=3) - pi = dti.to_period("D") - pi2 = dti.to_period("W") - - expected = np.array([-1, -1, -1], dtype=np.intp) - - result = pi.get_indexer(dti) - tm.assert_numpy_array_equal(result, expected) - - # This should work in both directions - result = dti.get_indexer(pi) - tm.assert_numpy_array_equal(result, expected) - - result = pi.get_indexer(pi2) - tm.assert_numpy_array_equal(result, expected) - - # We expect the same from get_indexer_non_unique - result = pi.get_indexer_non_unique(dti)[0] - tm.assert_numpy_array_equal(result, expected) - - result = dti.get_indexer_non_unique(pi)[0] - tm.assert_numpy_array_equal(result, expected) - - result = pi.get_indexer_non_unique(pi2)[0] - tm.assert_numpy_array_equal(result, expected) - - def test_get_indexer_mismatched_dtype_different_length(self, non_comparable_idx): - # without method we aren't checking inequalities, so get all-missing - # but do not raise - dti = date_range("2016-01-01", periods=3) - pi = dti.to_period("D") - - other = non_comparable_idx - - res = pi[:-1].get_indexer(other) - expected = -np.ones(other.shape, dtype=np.intp) - tm.assert_numpy_array_equal(res, expected) - - @pytest.mark.parametrize("method", ["pad", "backfill", "nearest"]) - def test_get_indexer_mismatched_dtype_with_method(self, non_comparable_idx, method): - dti = date_range("2016-01-01", periods=3) - pi = dti.to_period("D") - - other = non_comparable_idx - - msg = re.escape(f"Cannot compare dtypes {pi.dtype} and {other.dtype}") - with pytest.raises(TypeError, match=msg): - pi.get_indexer(other, method=method) - - for dtype in ["object", "category"]: - other2 = other.astype(dtype) - if dtype == "object" and isinstance(other, PeriodIndex): - continue - # Two different error message patterns depending on dtypes - msg = "|".join( - [ - re.escape(msg) - for msg in ( - f"Cannot compare dtypes {pi.dtype} and {other.dtype}", - " not supported between instances of ", - ) - ] - ) - with pytest.raises(TypeError, match=msg): - pi.get_indexer(other2, method=method) - - def test_get_indexer_non_unique(self): - # GH 17717 - p1 = Period("2017-09-02") - p2 = Period("2017-09-03") - p3 = Period("2017-09-04") - p4 = Period("2017-09-05") - - idx1 = PeriodIndex([p1, p2, p1]) - idx2 = PeriodIndex([p2, p1, p3, p4]) - - result = idx1.get_indexer_non_unique(idx2) - expected_indexer = np.array([1, 0, 2, -1, -1], dtype=np.intp) - expected_missing = np.array([2, 3], dtype=np.intp) - - tm.assert_numpy_array_equal(result[0], expected_indexer) - tm.assert_numpy_array_equal(result[1], expected_missing) - - # TODO: This method came from test_period; de-dup with version above - def test_get_indexer2(self): - idx = period_range("2000-01-01", periods=3).asfreq("H", how="start") - tm.assert_numpy_array_equal( - idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp) - ) - - target = PeriodIndex( - ["1999-12-31T23", "2000-01-01T12", "2000-01-02T01"], freq="H" - ) - tm.assert_numpy_array_equal( - idx.get_indexer(target, "pad"), np.array([-1, 0, 1], dtype=np.intp) - ) - tm.assert_numpy_array_equal( - idx.get_indexer(target, "backfill"), np.array([0, 1, 2], dtype=np.intp) - ) - tm.assert_numpy_array_equal( - idx.get_indexer(target, "nearest"), np.array([0, 1, 1], dtype=np.intp) - ) - tm.assert_numpy_array_equal( - idx.get_indexer(target, "nearest", tolerance="1 hour"), - np.array([0, -1, 1], dtype=np.intp), - ) - - msg = "Input has different freq=None from PeriodArray\\(freq=H\\)" - with pytest.raises(ValueError, match=msg): - idx.get_indexer(target, "nearest", tolerance="1 minute") - - tm.assert_numpy_array_equal( - idx.get_indexer(target, "nearest", tolerance="1 day"), - np.array([0, 1, 1], dtype=np.intp), - ) - tol_raw = [ - Timedelta("1 hour"), - Timedelta("1 hour"), - np.timedelta64(1, "D"), - ] - tm.assert_numpy_array_equal( - idx.get_indexer( - target, "nearest", tolerance=[np.timedelta64(x) for x in tol_raw] - ), - np.array([0, -1, 1], dtype=np.intp), - ) - tol_bad = [ - Timedelta("2 hour").to_timedelta64(), - Timedelta("1 hour").to_timedelta64(), - np.timedelta64(1, "M"), - ] - with pytest.raises( - libperiod.IncompatibleFrequency, match="Input has different freq=None from" - ): - idx.get_indexer(target, "nearest", tolerance=tol_bad) - - -class TestWhere: - def test_where(self, listlike_box): - i = period_range("20130101", periods=5, freq="D") - cond = [True] * len(i) - expected = i - result = i.where(listlike_box(cond)) - tm.assert_index_equal(result, expected) - - cond = [False] + [True] * (len(i) - 1) - expected = PeriodIndex([NaT] + i[1:].tolist(), freq="D") - result = i.where(listlike_box(cond)) - tm.assert_index_equal(result, expected) - - def test_where_other(self): - i = period_range("20130101", periods=5, freq="D") - for arr in [np.nan, NaT]: - result = i.where(notna(i), other=arr) - expected = i - tm.assert_index_equal(result, expected) - - i2 = i.copy() - i2 = PeriodIndex([NaT, NaT] + i[2:].tolist(), freq="D") - result = i.where(notna(i2), i2) - tm.assert_index_equal(result, i2) - - i2 = i.copy() - i2 = PeriodIndex([NaT, NaT] + i[2:].tolist(), freq="D") - result = i.where(notna(i2), i2.values) - tm.assert_index_equal(result, i2) - - def test_where_invalid_dtypes(self): - pi = period_range("20130101", periods=5, freq="D") - - tail = pi[2:].tolist() - i2 = PeriodIndex([NaT, NaT] + tail, freq="D") - mask = notna(i2) - - result = pi.where(mask, i2.asi8) - expected = pd.Index([NaT._value, NaT._value] + tail, dtype=object) - assert isinstance(expected[0], int) - tm.assert_index_equal(result, expected) - - tdi = i2.asi8.view("timedelta64[ns]") - expected = pd.Index([tdi[0], tdi[1]] + tail, dtype=object) - assert isinstance(expected[0], np.timedelta64) - result = pi.where(mask, tdi) - tm.assert_index_equal(result, expected) - - dti = i2.to_timestamp("S") - expected = pd.Index([dti[0], dti[1]] + tail, dtype=object) - assert expected[0] is NaT - result = pi.where(mask, dti) - tm.assert_index_equal(result, expected) - - td = Timedelta(days=4) - expected = pd.Index([td, td] + tail, dtype=object) - assert expected[0] == td - result = pi.where(mask, td) - tm.assert_index_equal(result, expected) - - def test_where_mismatched_nat(self): - pi = period_range("20130101", periods=5, freq="D") - cond = np.array([True, False, True, True, False]) - - tdnat = np.timedelta64("NaT", "ns") - expected = pd.Index([pi[0], tdnat, pi[2], pi[3], tdnat], dtype=object) - assert expected[1] is tdnat - result = pi.where(cond, tdnat) - tm.assert_index_equal(result, expected) - - -class TestTake: - def test_take(self): - # GH#10295 - idx1 = period_range("2011-01-01", "2011-01-31", freq="D", name="idx") - - for idx in [idx1]: - result = idx.take([0]) - assert result == Period("2011-01-01", freq="D") - - result = idx.take([5]) - assert result == Period("2011-01-06", freq="D") - - result = idx.take([0, 1, 2]) - expected = period_range("2011-01-01", "2011-01-03", freq="D", name="idx") - tm.assert_index_equal(result, expected) - assert result.freq == "D" - assert result.freq == expected.freq - - result = idx.take([0, 2, 4]) - expected = PeriodIndex( - ["2011-01-01", "2011-01-03", "2011-01-05"], freq="D", name="idx" - ) - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - assert result.freq == "D" - - result = idx.take([7, 4, 1]) - expected = PeriodIndex( - ["2011-01-08", "2011-01-05", "2011-01-02"], freq="D", name="idx" - ) - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - assert result.freq == "D" - - result = idx.take([3, 2, 5]) - expected = PeriodIndex( - ["2011-01-04", "2011-01-03", "2011-01-06"], freq="D", name="idx" - ) - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - assert result.freq == "D" - - result = idx.take([-3, 2, 5]) - expected = PeriodIndex( - ["2011-01-29", "2011-01-03", "2011-01-06"], freq="D", name="idx" - ) - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - assert result.freq == "D" - - def test_take_misc(self): - index = period_range(start="1/1/10", end="12/31/12", freq="D", name="idx") - expected = PeriodIndex( - [ - datetime(2010, 1, 6), - datetime(2010, 1, 7), - datetime(2010, 1, 9), - datetime(2010, 1, 13), - ], - freq="D", - name="idx", - ) - - taken1 = index.take([5, 6, 8, 12]) - taken2 = index[[5, 6, 8, 12]] - - for taken in [taken1, taken2]: - tm.assert_index_equal(taken, expected) - assert isinstance(taken, PeriodIndex) - assert taken.freq == index.freq - assert taken.name == expected.name - - def test_take_fill_value(self): - # GH#12631 - idx = PeriodIndex( - ["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", freq="D" - ) - result = idx.take(np.array([1, 0, -1])) - expected = PeriodIndex( - ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D" - ) - tm.assert_index_equal(result, expected) - - # fill_value - result = idx.take(np.array([1, 0, -1]), fill_value=True) - expected = PeriodIndex( - ["2011-02-01", "2011-01-01", "NaT"], name="xxx", freq="D" - ) - tm.assert_index_equal(result, expected) - - # allow_fill=False - result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) - expected = PeriodIndex( - ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D" - ) - tm.assert_index_equal(result, expected) - - msg = ( - "When allow_fill=True and fill_value is not None, " - "all indices must be >= -1" - ) - with pytest.raises(ValueError, match=msg): - idx.take(np.array([1, 0, -2]), fill_value=True) - with pytest.raises(ValueError, match=msg): - idx.take(np.array([1, 0, -5]), fill_value=True) - - msg = "index -5 is out of bounds for( axis 0 with)? size 3" - with pytest.raises(IndexError, match=msg): - idx.take(np.array([1, -5])) - - -class TestGetValue: - @pytest.mark.parametrize("freq", ["H", "D"]) - def test_get_value_datetime_hourly(self, freq): - # get_loc and get_value should treat datetime objects symmetrically - # TODO: this test used to test get_value, which is removed in 2.0. - # should this test be moved somewhere, or is what's left redundant? - dti = date_range("2016-01-01", periods=3, freq="MS") - pi = dti.to_period(freq) - ser = Series(range(7, 10), index=pi) - - ts = dti[0] - - assert pi.get_loc(ts) == 0 - assert ser[ts] == 7 - assert ser.loc[ts] == 7 - - ts2 = ts + Timedelta(hours=3) - if freq == "H": - with pytest.raises(KeyError, match="2016-01-01 03:00"): - pi.get_loc(ts2) - with pytest.raises(KeyError, match="2016-01-01 03:00"): - ser[ts2] - with pytest.raises(KeyError, match="2016-01-01 03:00"): - ser.loc[ts2] - else: - assert pi.get_loc(ts2) == 0 - assert ser[ts2] == 7 - assert ser.loc[ts2] == 7 - - -class TestContains: - def test_contains(self): - # GH 17717 - p0 = Period("2017-09-01") - p1 = Period("2017-09-02") - p2 = Period("2017-09-03") - p3 = Period("2017-09-04") - - ps0 = [p0, p1, p2] - idx0 = PeriodIndex(ps0) - - for p in ps0: - assert p in idx0 - assert str(p) in idx0 - - # GH#31172 - # Higher-resolution period-like are _not_ considered as contained - key = "2017-09-01 00:00:01" - assert key not in idx0 - with pytest.raises(KeyError, match=key): - idx0.get_loc(key) - - assert "2017-09" in idx0 - - assert p3 not in idx0 - - def test_contains_freq_mismatch(self): - rng = period_range("2007-01", freq="M", periods=10) - - assert Period("2007-01", freq="M") in rng - assert Period("2007-01", freq="D") not in rng - assert Period("2007-01", freq="2M") not in rng - - def test_contains_nat(self): - # see gh-13582 - idx = period_range("2007-01", freq="M", periods=10) - assert NaT not in idx - assert None not in idx - assert float("nan") not in idx - assert np.nan not in idx - - idx = PeriodIndex(["2011-01", "NaT", "2011-02"], freq="M") - assert NaT in idx - assert None in idx - assert float("nan") in idx - assert np.nan in idx - - -class TestAsOfLocs: - def test_asof_locs_mismatched_type(self): - dti = date_range("2016-01-01", periods=3) - pi = dti.to_period("D") - pi2 = dti.to_period("H") - - mask = np.array([0, 1, 0], dtype=bool) - - msg = "must be DatetimeIndex or PeriodIndex" - with pytest.raises(TypeError, match=msg): - pi.asof_locs(pd.Index(pi.asi8, dtype=np.int64), mask) - - with pytest.raises(TypeError, match=msg): - pi.asof_locs(pd.Index(pi.asi8, dtype=np.float64), mask) - - with pytest.raises(TypeError, match=msg): - # TimedeltaIndex - pi.asof_locs(dti - dti, mask) - - msg = "Input has different freq=H" - with pytest.raises(libperiod.IncompatibleFrequency, match=msg): - pi.asof_locs(pi2, mask) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/tseries/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/tseries/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/idna/compat.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/idna/compat.py deleted file mode 100644 index 786e6bda63699b72d588ba91dd73df017570aee5..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/idna/compat.py +++ /dev/null @@ -1,13 +0,0 @@ -from .core import * -from .codec import * -from typing import Any, Union - -def ToASCII(label: str) -> bytes: - return encode(label) - -def ToUnicode(label: Union[bytes, bytearray]) -> str: - return decode(label) - -def nameprep(s: Any) -> None: - raise NotImplementedError('IDNA 2008 does not utilise nameprep protocol') - diff --git a/spaces/pustozerov/poc-handwriting-ocr/modules/ocr_model_en/characters.py b/spaces/pustozerov/poc-handwriting-ocr/modules/ocr_model_en/characters.py deleted file mode 100644 index 4764634c588a34b5c3924247865ed1983b96259f..0000000000000000000000000000000000000000 --- a/spaces/pustozerov/poc-handwriting-ocr/modules/ocr_model_en/characters.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -import os - -import cv2 -import numpy as np - -# Preloading trained model with activation function -# Loading is slow -> prevent multiple loads -from modules.ocr_model_en.tfhelpers import Model - -print("Loading segmentation models...") -location = os.path.dirname(os.path.abspath(__file__)) -CNN_model = Model('model/char_classifier/char_classifier') -CNN_slider = (60, 30) -RNN_model = Model('model/gap_classifier/gap_classifier', 'prediction') -RNN_slider = (60, 60) - - -def _classify(img, step=2, RNN=False, slider=(60, 60)): - """Slice the image and return raw output of classifier.""" - length = (img.shape[1] - slider[1]) // 2 + 1 - if RNN: - input_seq = np.zeros((1, length, slider[0] * slider[1]), dtype=np.float32) - input_seq[0][:] = [img[:, loc * step: loc * step + slider[1]].flatten() - for loc in range(length)] - pred = RNN_model.eval_feed({'inputs:0': input_seq, - 'length:0': [length], - 'keep_prob:0': 1})[0] - else: - input_seq = np.zeros((length, slider[0] * slider[1]), dtype=np.float32) - input_seq[:] = [img[:, loc * step: loc * step + slider[1]].flatten() - for loc in range(length)] - pred = CNN_model.run(input_seq) - - return pred - - -def segment(img, step=2, RNN=False, debug=False): - """Take preprocessed image of word and - returns array of positions separating characters. - """ - slider = CNN_slider - if RNN: - slider = RNN_slider - - # Run the classifier - pred = _classify(img, step=step, RNN=RNN, slider=slider) - - # Finalize the gap positions from raw prediction - gaps = [] - last_gap = 0 - gap_count = 1 - gap_position_sum = slider[1] / 2 - first_gap = True - gap_block_first = 0 - gap_block_last = slider[1] / 2 - - for i, p in enumerate(pred): - if p == 1: - gap_position_sum += i * step + slider[1] / 2 - gap_block_last = i * step + slider[1] / 2 - gap_count += 1 - last_gap = 0 - if gap_block_first == 0: - gap_block_first = i * step + slider[1] / 2 - else: - if gap_count != 0 and last_gap >= 1: - if first_gap: - gaps.append(int(gap_block_last)) - first_gap = False - else: - gaps.append(int(gap_position_sum // gap_count)) - gap_position_sum = 0 - gap_count = 0 - gap_block_first = 0 - last_gap += 1 - - # Adding final gap position - if gap_block_first != 0: - gaps.append(int(gap_block_first)) - else: - gap_position_sum += (len(pred) - 1) * 2 + slider[1] / 2 - gaps.append(int(gap_position_sum / (gap_count + 1))) - - if debug: - # Drawing lines - img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) - for gap in gaps: - cv2.line(img, - (int(gap), 0), - (int(gap), slider[0]), - (0, 255, 0), 1) - # implt(img, t="Separated characters") - - return gaps diff --git a/spaces/pxiaoer/ChatGPT/README.md b/spaces/pxiaoer/ChatGPT/README.md deleted file mode 100644 index c0691f0f20cd2859456c4ba80e3ec7955cbfe726..0000000000000000000000000000000000000000 --- a/spaces/pxiaoer/ChatGPT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ChatGPT -emoji: 🌖 -colorFrom: indigo -colorTo: yellow -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git "a/spaces/qingxu98/academic-chatgpt-beta/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" "b/spaces/qingxu98/academic-chatgpt-beta/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" deleted file mode 100644 index 505086455af8d2676055ab084cf97058b954c7d5..0000000000000000000000000000000000000000 --- "a/spaces/qingxu98/academic-chatgpt-beta/crazy_functions/\347\220\206\350\247\243PDF\346\226\207\346\241\243\345\206\205\345\256\271.py" +++ /dev/null @@ -1,112 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_execption -from .crazy_utils import read_and_clean_pdf_text -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -fast_debug = False - - -def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import tiktoken - print('begin analysis on:', file_name) - - ############################## <第 0 步,切割PDF> ################################## - # 递归地切割PDF文件,每一块(尽量是完整的一个section,比如introduction,experiment等,必要时再进行切割) - # 的长度必须小于 2500 个 Token - file_content, page_one = read_and_clean_pdf_text(file_name) # (尝试)按照章节切割PDF - - TOKEN_LIMIT_PER_FRAGMENT = 2500 - - from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf - from request_llm.bridge_all import model_info - enc = model_info["gpt-3.5-turbo"]['tokenizer'] - def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) - paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf( - txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT) - page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf( - txt=str(page_one), get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4) - # 为了更好的效果,我们剥离Introduction之后的部分(如果有) - paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0] - - ############################## <第 1 步,从摘要中提取高价值信息,放到history中> ################################## - final_results = [] - final_results.append(paper_meta) - - ############################## <第 2 步,迭代地历遍整个文章,提取精炼信息> ################################## - i_say_show_user = f'首先你在英文语境下通读整篇论文。'; gpt_say = "[Local Message] 收到。" # 用户提示 - chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=[]) # 更新UI - - iteration_results = [] - last_iteration_result = paper_meta # 初始值是摘要 - MAX_WORD_TOTAL = 4096 - n_fragment = len(paper_fragments) - if n_fragment >= 20: print('文章极长,不能达到预期效果') - for i in range(n_fragment): - NUM_OF_WORD = MAX_WORD_TOTAL // n_fragment - i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i]}" - i_say_show_user = f"[{i+1}/{n_fragment}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i][:200]}" - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问 - llm_kwargs, chatbot, - history=["The main idea of the previous section is?", last_iteration_result], # 迭代上一次的结果 - sys_prompt="Extract the main idea of this section." # 提示 - ) - iteration_results.append(gpt_say) - last_iteration_result = gpt_say - - ############################## <第 3 步,整理history> ################################## - final_results.extend(iteration_results) - final_results.append(f'接下来,你是一名专业的学术教授,利用以上信息,使用中文回答我的问题。') - # 接下来两句话只显示在界面上,不起实际作用 - i_say_show_user = f'接下来,你是一名专业的学术教授,利用以上信息,使用中文回答我的问题。'; gpt_say = "[Local Message] 收到。" - chatbot.append([i_say_show_user, gpt_say]) - - ############################## <第 4 步,设置一个token上限,防止回答时Token溢出> ################################## - from .crazy_utils import input_clipping - _, final_results = input_clipping("", final_results, max_token_limit=3200) - yield from update_ui(chatbot=chatbot, history=final_results) # 注意这里的历史记录被替代了 - - -@CatchException -def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - import glob, os - - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "理解PDF论文内容,并且将结合上下文内容,进行学术解答。函数插件贡献者: Hanzoe, binary-husky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import fitz - except: - report_execption(chatbot, history, - a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 清空历史,以免输入溢出 - history = [] - - # 检测输入参数,如没有给定输入参数,直接退出 - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": - txt = '空空如也的输入栏' - report_execption(chatbot, history, - a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 搜索需要处理的文件清单 - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] - # 如果没找到任何文件 - if len(file_manifest) == 0: - report_execption(chatbot, history, - a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - txt = file_manifest[0] - # 开始正式执行任务 - yield from 解析PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Anyview Microscope Software.epub.md b/spaces/quidiaMuxgu/Expedit-SAM/Anyview Microscope Software.epub.md deleted file mode 100644 index 731117b4c313523a8489ddcc3502546744fbe226..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Anyview Microscope Software.epub.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Anyview Microscope Software.epub


        Download File ⇒⇒⇒ https://geags.com/2uCpYJ



        - -We present a software package called Microscopy Image Segmentation Tool (MIST). ... Publishers; For any queries, please email at epub@benthamscience.org. ... separate paper and (2)'MirrorScope', explained as follows: for any view plane ... 4d29de3e1b
        -
        -
        -

        diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Bhalobashar Rong Movie Video Song Free Download.md b/spaces/quidiaMuxgu/Expedit-SAM/Bhalobashar Rong Movie Video Song Free Download.md deleted file mode 100644 index 3d0d18e61173705b8f96345ea81d326595d82aca..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Bhalobashar Rong Movie Video Song Free Download.md +++ /dev/null @@ -1,14 +0,0 @@ -

        bhalobashar rong movie video song free download


        DOWNLOAD ✪✪✪ https://geags.com/2uCpZW



        -
        -August 22, 2017 - Film: Bhalobashar Rong Cast: Bappy & Mahi Directed by: Shaheen Sumon Produced and distributed by Jaaz Multimedia Click to share on . Release date: 2017 -Movie description: -Bhalobashar Rong is a touching love story that tells the story of a man who, like many other men, falls head over heels in love with a new girl. -Like all other men, he forgets that his father is not as good as he could be. -Like everyone else, he marries a girl who has her own boyfriend. -Like everyone else, he loves his wife. -Like everyone else, he forgets that his wife's father was a cruel man who kept humiliating and disgracing him all the time. (official video) from the movie song valobashar rong Watch video (Note: default video playback is HD. â–¶ Watch video - Bhalobashar Rong - Bhalobashar Cheye Ektu movie (official video) -Bhalobashar Rong - Film Bhalobashar Cheye Ektu (official video) -Bhalobashar Cheye Ektu Beshi _ Bappy _ Mahi _ Nancy _ Bhalobashar Rong Bengali movie 2012 - Bhalobashar Rong movie (official video) from movie song valobashar rong Watch video (Note: Default video playback is HD. â–¶ Watch video - Bhalobashar Rong - Film Bhalobashar Cheye Ektu (official video) 8a78ff9644
        -
        -
        -

        diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Duaeganjularshpdfdownload _TOP_.md b/spaces/quidiaMuxgu/Expedit-SAM/Duaeganjularshpdfdownload _TOP_.md deleted file mode 100644 index c7f27f71a0362a41f0cc686d147a2bb54ae95726..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Duaeganjularshpdfdownload _TOP_.md +++ /dev/null @@ -1,46 +0,0 @@ -

        duaeganjularshpdfdownload


        DOWNLOADhttps://geags.com/2uCrUE



        -
        -[Version translated by The Translator in the shape of the dialect of the original Arabic - -Many may regard that it is from a new origin but this du’a is of the greatest importance to the children of the Prophet pbuh (صلى الله عليه وسلم) - -Allah knows the best: - -Muhammad sallallaahu alaihi wa sallam - -“Allah forgive us and our children” - -[English Translation] - -(The Prophet – Peace Be Upon Him) - -In the Name of Allah, the most gracious, the most merciful. - -Allah has given us many times mercy and forgiveness and we can not seek for it any more. - -Allah says: - -(Surely I am) the most forgiving and merciful to the Muslims –) - -We did not seek any more. Allah said: - -(Then surely) Allah is the most forgiving and merciful to the believers - -(Surely Allah is) the most forgiving and merciful to the believers - -We did not seek any more. Allah says: - -(He says) to His slave: “Remember Me, I will remember you.” - -(He says) to His slave: “I will show you the Home of Eternity.” - -(He says) to His slave: “Do not ask Me concerning what you could not find with your intellect.” - -(He says) to His slave: “Know that I am with you wherever you are.” - -(And know that) My slave is the slave of the slaves of Allah. - -(And know that) My slave is the slave of the slaves 4fefd39f24
        -
        -
        -

        diff --git a/spaces/r3gm/RVC_HF/lib/uvr5_pack/utils.py b/spaces/r3gm/RVC_HF/lib/uvr5_pack/utils.py deleted file mode 100644 index 0fafe8793b0d539fa58dd024342250b24b6187a9..0000000000000000000000000000000000000000 --- a/spaces/r3gm/RVC_HF/lib/uvr5_pack/utils.py +++ /dev/null @@ -1,120 +0,0 @@ -import torch -import numpy as np -from tqdm import tqdm -import json - - -def load_data(file_name: str = "./lib/uvr5_pack/name_params.json") -> dict: - with open(file_name, "r") as f: - data = json.load(f) - - return data - - -def make_padding(width, cropsize, offset): - left = offset - roi_size = cropsize - left * 2 - if roi_size == 0: - roi_size = cropsize - right = roi_size - (width % roi_size) + left - - return left, right, roi_size - - -def inference(X_spec, device, model, aggressiveness, data): - """ - data : dic configs - """ - - def _execute( - X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half=True - ): - model.eval() - with torch.no_grad(): - preds = [] - - iterations = [n_window] - - total_iterations = sum(iterations) - for i in tqdm(range(n_window)): - start = i * roi_size - X_mag_window = X_mag_pad[ - None, :, :, start : start + data["window_size"] - ] - X_mag_window = torch.from_numpy(X_mag_window) - if is_half: - X_mag_window = X_mag_window.half() - X_mag_window = X_mag_window.to(device) - - pred = model.predict(X_mag_window, aggressiveness) - - pred = pred.detach().cpu().numpy() - preds.append(pred[0]) - - pred = np.concatenate(preds, axis=2) - return pred - - def preprocess(X_spec): - X_mag = np.abs(X_spec) - X_phase = np.angle(X_spec) - - return X_mag, X_phase - - X_mag, X_phase = preprocess(X_spec) - - coef = X_mag.max() - X_mag_pre = X_mag / coef - - n_frame = X_mag_pre.shape[2] - pad_l, pad_r, roi_size = make_padding(n_frame, data["window_size"], model.offset) - n_window = int(np.ceil(n_frame / roi_size)) - - X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant") - - if list(model.state_dict().values())[0].dtype == torch.float16: - is_half = True - else: - is_half = False - pred = _execute( - X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half - ) - pred = pred[:, :, :n_frame] - - if data["tta"]: - pad_l += roi_size // 2 - pad_r += roi_size // 2 - n_window += 1 - - X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant") - - pred_tta = _execute( - X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half - ) - pred_tta = pred_tta[:, :, roi_size // 2 :] - pred_tta = pred_tta[:, :, :n_frame] - - return (pred + pred_tta) * 0.5 * coef, X_mag, np.exp(1.0j * X_phase) - else: - return pred * coef, X_mag, np.exp(1.0j * X_phase) - - -def _get_name_params(model_path, model_hash): - data = load_data() - flag = False - ModelName = model_path - for type in list(data): - for model in list(data[type][0]): - for i in range(len(data[type][0][model])): - if str(data[type][0][model][i]["hash_name"]) == model_hash: - flag = True - elif str(data[type][0][model][i]["hash_name"]) in ModelName: - flag = True - - if flag: - model_params_auto = data[type][0][model][i]["model_params"] - param_name_auto = data[type][0][model][i]["param_name"] - if type == "equivalent": - return param_name_auto, model_params_auto - else: - flag = False - return param_name_auto, model_params_auto diff --git a/spaces/raedeXanto/academic-chatgpt-beta/3D Album Commercial Suite 329 Full Crack A Complete Guide to 3D Album Design and Production.md b/spaces/raedeXanto/academic-chatgpt-beta/3D Album Commercial Suite 329 Full Crack A Complete Guide to 3D Album Design and Production.md deleted file mode 100644 index 2feecced88c40e1a12667d61723fd23c7b1cc065..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/3D Album Commercial Suite 329 Full Crack A Complete Guide to 3D Album Design and Production.md +++ /dev/null @@ -1,146 +0,0 @@ - -

        What is 3D Album Commercial Suite 329?

        -

        If you are looking for a software that can help you create amazing digital albums with your photos, then you may want to check out 3D Album Commercial Suite 329. This is a software that can transform your images into stunning photo slideshows and videos with a 3D environment. You can also edit your images, add audio clips, choose from different backgrounds, and apply various transition effects and creative templates. Whether you want to make a personal album, a wedding album, a business presentation, or a video advertisement, 3D Album Commercial Suite 329 can help you achieve your goals.

        -

        Why do you need 3D Album Commercial Suite 329?

        -

        There are many reasons why you may need 3D Album Commercial Suite 329 for your projects. Here are some of them:

        -

        3d Album Commercial Suite 329 Full Crack


        Download Zip ->>->>->> https://tinourl.com/2uKZrS



        -
          -
        • Easy to use: You don't need any special skills or experience to use this software. All you need to do is import your images, choose a template, and customize it as you wish. You can also preview your work before saving it.
        • -
        • Predefined creative backgrounds: You can choose from a wide range of predefined backgrounds that suit different themes and occasions. You can also add your own images as backgrounds if you want.
        • -
        • Convert images into video: You can easily convert your images into a video file that can be edited further or shared online. You can also adjust the duration, speed, and quality of your video.
        • -
        • Stunning transition effects: You can make your photo slideshows more dynamic and attractive by applying various transition effects such as zoom, rotate, flip, fade, etc.
        • -
        • Image editor: You can edit your images with this software by using tools such as paint brush, crop, resize, rotate, etc. You can also add text, stickers, frames, and other elements to your images.
        • -
        -

        How to download and install 3D Album Commercial Suite 329 Full Crack?

        -

        If you want to get 3D Album Commercial Suite 329 for free with a crack file, then you need to follow these steps:

        -
          -
        1. Download the software from this link. The file name is 3D_Album_329.rar and the size is 728 MB.
        2. -
        3. Extract the file using WinRAR or any other software that can handle RAR files.
        4. -
        5. Run the setup file 3d-album-cs-3.29.exe and follow the instructions to install the software.
        6. -
        7. Copy the crack file Album.exe from the folder C:\Program Files\Visviva Software\Album\Album.exe and paste it into the installation directory C:\Program Files\Visviva Software\Album\Album.exe.
        8. -
        9. Run the software and enter any serial number when prompted. You can use this one: A4A9-9A44-4A99-A4A9-9A44-4A99-A4A9-9A44-4A99-A4A9-9A44-4A99-A4A9-9A44-4A99-A4A9-9A44-4A99-A4A9-9A44-4A99-A4A9-9A44-4A99-A4A9-9A44-4A99-A4A9-9A44-4A99-A4A9-9A44-4A99-A4A9-9A44-4A99-A4A9-9A44-4A99-A4A9-9A44-4AA0.
        10. -
        11. Congratulations! You have successfully installed 3D Album Commercial Suite 329 Full Crack.
        12. -
        -

        How to use 3D Album Commercial Suite 329?

        -

        To use 3D Album Commercial Suite 329 for creating photo slideshows and videos, you need to follow these steps:

        -

        How to download 3d Album Commercial Suite 329 for free
        -3d Album Commercial Suite 329 serial key generator
        -Best alternatives to 3d Album Commercial Suite 329
        -3d Album Commercial Suite 329 review and features
        -3d Album Commercial Suite 329 tutorial and guide
        -Where to buy 3d Album Commercial Suite 329 with discount
        -3d Album Commercial Suite 329 system requirements and compatibility
        -How to install and activate 3d Album Commercial Suite 329
        -3d Album Commercial Suite 329 vs other photo album software
        -Tips and tricks for using 3d Album Commercial Suite 329
        -How to create stunning photo albums with 3d Album Commercial Suite 329
        -How to fix common errors and issues with 3d Album Commercial Suite 329
        -How to uninstall and remove 3d Album Commercial Suite 329 completely
        -How to update and upgrade 3d Album Commercial Suite 329
        -How to backup and restore your photo albums with 3d Album Commercial Suite 329
        -How to customize and personalize your photo albums with 3d Album Commercial Suite 329
        -How to share and export your photo albums with 3d Album Commercial Suite 329
        -How to add music and sound effects to your photo albums with 3d Album Commercial Suite 329
        -How to add transitions and animations to your photo albums with 3d Album Commercial Suite 329
        -How to add text and captions to your photo albums with 3d Album Commercial Suite 329
        -How to add stickers and clipart to your photo albums with 3d Album Commercial Suite 329
        -How to edit and enhance your photos with 3d Album Commercial Suite 329
        -How to crop and resize your photos with 3d Album Commercial Suite 329
        -How to rotate and flip your photos with 3d Album Commercial Suite 329
        -How to adjust the brightness and contrast of your photos with 3d Album Commercial Suite 329
        -How to apply filters and effects to your photos with 3d Album Commercial Suite 329
        -How to remove red-eye and blemishes from your photos with 3d Album Commercial Suite 329
        -How to use the magic wand and lasso tools with 3d Album Commercial Suite 329
        -How to use the clone stamp and healing brush tools with 3d Album Commercial Suite 329
        -How to use the eraser and paint bucket tools with 3d Album Commercial Suite 329
        -How to use the pen and pencil tools with 3d Album Commercial Suite 329
        -How to use the text and shape tools with 3d Album Commercial Suite 329
        -How to use the gradient and pattern tools with 3d Album Commercial Suite 329
        -How to use the layer and mask tools with 3d Album Commercial Suite 329
        -How to use the selection and transform tools with 3d Album Commercial Suite

        -
          -
        1. Launch the software and click on New Project.
        2. -
        3. Select a template from the list or click on Browse Templates to see more options.
        4. -
        5. Click on Add Photos to import your images from your computer or other devices.
        6. -
        7. Drag and drop your images into the timeline at the bottom of the screen. You can rearrange them as you like.
        8. -
        9. Click on Edit Photo to edit your images with tools such as paint brush, crop, resize, rotate, etc. You can also add text, stickers, frames, and other elements to your images.
        10. -
        11. Click on Add Background to choose a background for your slideshow or video. You can select from predefined backgrounds or add your own images as backgrounds.
        12. -
        13. Click on Add Music to add audio clips to your slideshow or video. You can import audio files from your computer or record your own voice.
        14. -
        15. Click on Add Effect to apply transition effects between your images. You can choose from various effects such as zoom, rotate, flip, fade, etc.
        16. -
        17. Click on Create Video to convert your slideshow into a video file. You can adjust the duration, speed, and quality of your video.
        18. -
        19. Click on Publish Video to save your video file on your computer or share it online via email or social media.
        20. -
        -

        What are some of the best templates and effects in 3D Album Commercial Suite 329?

        -

        The software offers many templates and effects that can make your photo slideshows and videos more creative and stunning. Here are some of them:

        - - - - - - - - - - - - - - -

        What are some of the drawbacks and risks of using 3D Album Commercial Suite 329 Full Crack?

        -

        While 3D Album Commercial Suite 329 Full Crack may seem like a great deal, it also comes with some drawbacks and risks that you should be aware of. Here are some of them:

        -
          -
        • Legal issues: Using a cracked version of the software is illegal and violates the terms and conditions of the original software. You may face legal consequences such as fines or lawsuits if you are caught using or distributing the cracked software.
        • -
        • Ethical issues: Using a cracked version of the software is unethical and unfair to the developers who spent time and money to create the software. You are depriving them of their rightful income and recognition for their work.
        • -
        • Technical issues: Using a cracked version of the software may cause technical problems such as errors, bugs, crashes, viruses, malware, etc. You may lose your data or damage your device if you use the cracked software. You also may not be able to access updates, support, or features that are available in the original software.
        • -
        -

        What are some of the alternatives to 3D Album Commercial Suite 329?

        -

        If you are looking for some alternatives to 3D Album Commercial Suite 329 that are legal, ethical, and reliable, then you may want to check out these software:

        -
        Template/Effect NameDescriptionExample Image
        Classic StyleThis template gives your slideshow a classic look with elegant frames and backgrounds.Classic Style
        Wedding StyleThis template is perfect for creating a wedding album with romantic themes and decorations.
        - - - - - - - - - - - - - - - - - - - - - - - - - - -

        Conclusion

        -

        In conclusion, 3D Album Commercial Suite 329 is a software that can help you create amazing digital albums with your photos. You can also edit your images, add audio clips, choose from different backgrounds, and apply various transition effects and creative templates. However, using a cracked version of the software is illegal, unethical, and risky. You may face legal consequences, technical problems, or ethical dilemmas if you use or distribute the cracked software. Therefore, it is better to use legal, ethical, and reliable alternatives such as Movavi Slideshow Maker, Wondershare DVD Slideshow Builder Deluxe, PhotoStage Slideshow Software, or SmartSHOW 3D. These software can also help you create photo slideshows and videos with ease and quality.

        -

        FAQs

        -

        What is the difference between 3D Album Commercial Suite 329 and 3D Album PicturePro Platinum?

        -

        3D Album Commercial Suite 329 is an upgraded version of 3D Album PicturePro Platinum. It has more templates, effects, and features than the latter. It also supports more formats and devices than the latter.

        -

        How can I update 3D Album Commercial Suite 329?

        -

        If you have a legal version of 3D Album Commercial Suite 329, you can update it by downloading the latest version from the official website. If you have a cracked version of 3D Album Commercial Suite 329, you cannot update it because it may cause errors or crashes.

        -

        How can I get support for 3D Album Commercial Suite 329?

        -

        If you have a legal version of 3D Album Commercial Suite 329, you can get support by contacting the customer service or visiting the online forum. If you have a cracked version of 3D Album Commercial Suite 329, you cannot get support because it is illegal and violates the terms and conditions of the original software.

        -

        How can I uninstall 3D Album Commercial Suite 329?

        -

        You can uninstall 3D Album Commercial Suite 329 by following these steps:

        -
          -
        1. Go to Start > Control Panel > Add or Remove Programs.
        2. -
        3. Select 3D Album Commercial Suite from the list and click on Remove.
        4. -
        5. Follow the instructions to complete the uninstallation process.
        6. -
        7. Delete any leftover files or folders related to the software from your computer.
        8. -
        -

        How can I learn more about 3D Album Commercial Suite 329?

        -

        You can learn more about 3D Album Commercial Suite 329 by visiting the official website, watching the video tutorials, or reading the user manual. You can also search for online reviews or articles about the software.

        -

        0a6ba089eb
        -
        -
        \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Creative Media Toolbox 6 Keygen Idm.md b/spaces/raedeXanto/academic-chatgpt-beta/Creative Media Toolbox 6 Keygen Idm.md deleted file mode 100644 index bf26eb0e48f7540e99a347b152354085651237db..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Creative Media Toolbox 6 Keygen Idm.md +++ /dev/null @@ -1,5 +0,0 @@ - -

        ,

        ,

        , etc., to create headings and subheadings, and include your keywords in them. You should also use transition words and phrases to connect your paragraphs and sentences smoothly. - Write informative and original content. The body of your article is where you provide the information, evidence, examples, or arguments that support your thesis statement. You should write in a clear, concise, and conversational style, using short sentences and paragraphs, active voice, and simple words. You should also avoid plagiarism, grammar errors, spelling mistakes, and factual inaccuracies. You can use tools like Grammarly or Hemingway Editor to check and improve your writing. - Use rhetorical questions and analogies. Rhetorical questions are questions that you ask without expecting an answer, but rather to make a point or provoke a thought. Analogies are comparisons between two things that are similar in some way, but different in others. Both rhetorical questions and analogies can help you engage the reader's interest, illustrate your points, or explain complex concepts in a simple way. For example: - Rhetorical question: Do you want to rank higher on Google and drive more traffic to your website? Of course you do! That's why you need to write SEO-friendly articles. - Analogy: Writing an SEO article is like baking a cake. You need the right ingredients (keywords), the right recipe (structure), and the right oven (platform) to make it delicious (engaging) and appealing (optimized). - Create a table or a list. Tables and lists are useful ways to present data or information in a structured and visual way. They can help you highlight important points, compare or contrast different items, or provide tips or steps for a process. You can use HTML tags such as

        Software NameDescriptionPriceWebsite
        Movavi Slideshow MakerThis is a software that can help you create photo slideshows and videos with ease. You can add music, transitions, effects, stickers, captions, and more to your slideshows. You can also edit your photos and videos with tools such as crop, rotate, enhance, etc.$39.95 for a lifetime licensehttps://www.movavi.com/photo-to-dvd-slideshow/
        Wondershare DVD Slideshow Builder DeluxeThis is a software that can help you create photo slideshows and videos with over 480 templates and effects. You can also add music, voiceovers, animations, transitions, and more to your slideshows. You can also edit your photos and videos with tools such as crop, rotate, red-eye removal, etc.$69.95 for a lifetime licensehttps://www.wondershare.com/pro/dvd-slideshow-builder-deluxe.html
        PhotoStage Slideshow SoftwareThis is a software that can help you create photo slideshows and videos with ease. You can add music, transitions, effects, narration, captions, and more to your slideshows. You can also edit your photos and videos with tools such as crop, rotate, brightness, contrast, etc.$49.99 for a lifetime licensehttps://www.nchsoftware.com/slideshow/index.html
        SmartSHOW 3D
        , ,
        ,
          ,
            ,
          1. , etc., to create tables or lists in your article. For example: - Table: | Keyword | Search Volume | Competition | |---------|---------------|-------------| | SEO writing | 5,400 | Low | | SEO article | 2,900 | Medium | | SEO content | 1,600 | High | - List: - Choose your keywords wisely - Craft a catchy title and meta description - Write an engaging introduction - Use headings and subheadings - Write informative and original content - Use rhetorical questions and analogies - Create a table or a list - Write a strong conclusion. The conclusion is where you wrap up your article and leave the reader with a lasting impression. You should restate your thesis statement in different words, summarize your main points or findings, suggest the key takeaways from your article, and end with a call to action or a question that encourages further discussion or research. For example: In conclusion, writing a good SEO article is not as hard as it may seem. You just need to follow some basic steps: choose your keywords wisely, craft a catchy title and meta description, write an engaging introduction, use headings and subheadings, write informative and original content, use rhetorical questions and analogies, create a table or a list, and write a strong conclusion. By following these steps, you can create content that is both relevant and engaging for your target audience, and also optimized for search engines to rank it higher in the search results. If you want to learn more about SEO writing, you can check out some of the resources below. Happy writing! Resources - [SEO Writing: How to Write SEO Content 2021](https://backlinko.com/seo-writing) - [How to Write SEO Content: 6 Steps (with Pictures) - wikiHow](https://www.wikihow.com/Write-SEO-Content) - [SEO Copywriting: The Definitive Guide](https://neilpatel.com/blog/seo-copywriting-how-to-write-content-for-people-and-optimize-for-google-2/) FAQs - What is SEO writing? - SEO writing is the process of creating content that is both relevant and engaging for your target audience, and also optimized for search engines to rank it higher in the search results. - Why is SEO writing important? - SEO writing is important because it can help you attract more organic traffic to your website, increase your brand awareness, authority, and credibility, and generate more leads, conversions, and sales. - How do I choose keywords for my SEO article? - You can use tools like Google Keyword Planner, SEMrush, or Ubersuggest to find and analyze keywords for your article. You should choose keywords that are relevant to your topic, have a high search volume, and low competition. - How do I optimize my title and meta description for SEO? - You can use tools like CoSchedule Headline Analyzer or Yoast SEO to optimize your title and meta description. You should include your main keyword in them, and make them clear, concise, and compelling. - How do I use headings and subheadings in my SEO article? - You should use HTML tags such as

            ,

            ,

            , etc., to create headings and subheadings in your article. You should include your keywords in them, and organize your article into logical sections.

            -

            creative media toolbox 6 keygen idm


            Download Ziphttps://tinourl.com/2uL3DB



            b2dd77e56b
            -
            -
            \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/HD Online Player (Korean Movie White Night 2012 Eng Su) Stream the Award-Winning Film Now.md b/spaces/raedeXanto/academic-chatgpt-beta/HD Online Player (Korean Movie White Night 2012 Eng Su) Stream the Award-Winning Film Now.md deleted file mode 100644 index 18c66a2cd7ff8aa996081d77e41b154e7e7b65f2..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/HD Online Player (Korean Movie White Night 2012 Eng Su) Stream the Award-Winning Film Now.md +++ /dev/null @@ -1,131 +0,0 @@ - -

            HD Online Player (Korean Movie White Night 2012 Eng Sub)

            -

            If you are looking for a Korean movie that explores the themes of homosexuality, trauma, and redemption, you might want to check out White Night. This film, also known as Baek-Ya, is a 2012 drama directed and written by Lee-Song Hee-il. It tells the story of a flight attendant who returns to Korea after two years and meets a delivery man who helps him confront his past. In this article, we will tell you more about this movie and how you can watch it online with English subtitles.

            -

            HD Online Player (Korean Movie White Night 2012 Eng Su)


            Download File ✵✵✵ https://tinourl.com/2uL3uU



            -

            Introduction

            -

            What is White Night?

            -

            White Night is a Korean film that premiered at the 2012 Jeonju International Film Festival. It is part of a trilogy of films by Lee-Song Hee-il that deal with gay issues in Korea, along with Suddenly Last Summer and Going South. The film has a running time of 75 minutes and stars Kim Hyun-sung, Lee Yi-kyung, and Won Tae-hee.

            -

            Why watch White Night online?

            -

            There are many reasons why you might want to watch White Night online. Here are some of them:

            -
              -
            • You are interested in Korean cinema and culture.
            • -
            • You are curious about the representation of homosexuality in Korean media.
            • -
            • You are looking for a realistic and emotional drama that tackles social issues.
            • -
            • You are a fan of the actors or the director.
            • -
            • You want to enjoy the beautiful cinematography and music of the film.
            • -
            -

            Plot summary

            -

            A painful reunion

            -

            The film begins with Won-Gyu (Kim Hyun-sung), a flight attendant who left Korea two years ago after a traumatic incident. He comes back for one night and meets his ex-lover Do-Yoon (Won Tae-hee) at a cafe. Do-Yoon is still hurt by Won-Gyu's departure and reveals that he is seeing someone else. Won-Gyu also claims that he has a new partner in Germany, but he seems to be lying. He leaves the cafe abruptly when Do-Yoon goes to order another drink.

            -

            Watch White Night 2012 Korean film online
            -White Night 2012 HD 720p KOR Eng Sub Bilibili
            -White Night 2012 Leesong Hee-il film streaming
            -White Night Baek-ya 2012 Korean Film Biz Zone
            -White Night 2012 Korean queer film inspired by true story
            -How to download White Night 2012 Korean movie with subtitles
            -White Night 2012 Won-gyu and Tae-jun romance movie
            -White Night 2012 Korean film review and rating
            -White Night 2012 cast and crew information
            -White Night 2012 trailer and poster
            -Where to watch White Night 2012 online for free
            -White Night 2012 Korean film festival awards and nominations
            -White Night 2012 plot summary and analysis
            -White Night 2012 behind the scenes and interviews
            -White Night 2012 soundtrack and songs
            -White Night 2012 DVD and Blu-ray release date
            -White Night 2012 fan art and fan fiction
            -White Night 2012 trivia and facts
            -White Night 2012 quotes and dialogues
            -White Night 2012 best scenes and moments
            -White Night 2012 comparison with other Korean queer films
            -White Night 2012 social media and hashtags
            -White Night 2012 box office and revenue
            -White Night 2012 critical reception and feedback
            -White Night 2012 themes and messages
            -Watch Korean movie White Night 2012 with English subtitles online
            -Bilibili video White Night 2012 HD 720p KOR Eng Sub
            -Streaming Leesong Hee-il's film White Night 2012 online
            -Korean Film Biz Zone's directory of White Night Baek-ya 2012
            -Website article on White Night 2012 Korean queer film based on true story
            -Download link for Korean movie White Night 2012 with Eng Subs
            -Romance movie White Night 2012 featuring Won-gyu and Tae-jun
            -Review and rating of Korean film White Night 2012 online
            -Cast and crew information of White Night 2012 movie
            -Trailer and poster of Korean film White Night 2012 online
            -Free online streaming of White Night 2012 movie
            -Awards and nominations of Korean film festival for White Night 2012
            -Plot summary and analysis of Korean movie White Night 2012
            -Behind the scenes and interviews of White Night 2012 film
            -Soundtrack and songs of Korean movie White Night 2012 online
            -DVD and Blu-ray release date of White Night 2012 film
            -Fan art and fan fiction of Korean movie White Night 2012
            -Trivia and facts of Korean film White Night 2012 online
            -Quotes and dialogues of White Night 2012 movie
            -Best scenes and moments of Korean film White Night 2012 online
            -Comparison with other Korean queer films of White Night 2012 movie
            -Social media and hashtags of Korean film White Night 2012 online
            -Box office and revenue of Korean movie White Night 2012
            -Critical reception and feedback of Korean film White Night 2012 online
            -Themes and messages of Korean movie White Night 2012

            -

            A new encounter

            -

            Later that night, Won-Gyu is approached by Tae-Joon (Lee Yi-kyung), a delivery man who contacted him online. They flirt and go to a motel, where Tae-Joon notices a scar on Won-Gyu's shoulder. Won-Gyu becomes defensive and refuses to have sex with Tae-Joon, but he still wants his company. They go to a bar, where Won-Gyu recognizes a man who was involved in his past attack. He chases him down the streets, followed by Tae-Joon.

            -

            A night of revenge and healing

            -

            Won-Gyu explains to Tae-Joon that he was stabbed by a homophobic gang two years ago, along with Do-Yoon. That incident changed his life and forced him to leave Korea. He wants to find the man who stabbed him and get revenge. Tae-Joon reluctantly agrees to help him and they go to a pool hall where the attacker is hiding. They beat him up and escape. Afterwards, they go to a park and talk about their lives. Won-Gyu admits that he is not seeing anyone in Germany, while Tae-Joon reveals his name and his dreams. They share a tender moment and have sex. Before parting ways, Tae-Joon gives Won-Gyu his phone number.

            -

            Cast and crew

            -

            The main actors

            -

            The film features three main actors who deliver convincing performances:

            - - - - - -
            NameRoleOther works
            Kim Hyun-sungWon-GyuThe King's Case Note, The Divine Fury, Vincenzo
            Lee Yi-kyungTae-JoonMirror of the Witch, Welcome to Waikiki, Royal Secret Agent
            Won Tae-heeDo-YoonThe King Loves, The Smile Has Left Your Eyes, Vagabond
            -

            The director and writer

            -

            The film was directed and written by Lee-Song Hee-il, who is one of the most prominent filmmakers in Korean queer cinema. He made his debut with the short film Good Romance in 2001, which won the Teddy Award at the Berlin International Film Festival. He also directed other acclaimed films such as No Regret, Night Flight, A Distant Place, and Courier Boy Kissing Club 2020: The Movie.

            -

            The music composer

            -

            The film's music was composed by Chris Garneau, an American singer-songwriter who is known for his indie folk style. He has released four albums: Music for Tourists, El Radio, Winter Games, and Yours. He also contributed songs to other films such as C.R.A.Z.Y., The Smell of Us, and Kiss Me Again: The Series 2 - PeteKao Cut.

            -

            Reviews and ratings

            -

            Critics' opinions

            -

            The film received mostly positive reviews from critics who praised its realism, sensitivity, and aesthetics. Here are some excerpts from their reviews:

            -
              -
            • "White Night is an intimate portrait of two men who find each other in the darkness of Seoul...The film captures the loneliness, fear, and longing of its characters with honesty and grace." - James Marsh, Screen Anarchy.
            • -
            • "White Night is a quiet but powerful film that explores the effects of homophobia on individuals and society...The film's strength lies in its subtle storytelling, naturalistic acting, and beautiful cinematography." - Rowena Santos Aquino, VCinema.
            • -
            • "White Night is a compelling drama that tackles complex issues with sensitivity and nuance...The film's minimalist style allows the audience to focus on the emotions and motivations of its characters." - Pierce Conran, Korean Film Council.
            • -
            • "White Night is a disappointing film that fails to deliver on its potential...The film's plot is thin, its pace is slow, and its characters are underdeveloped." - Darcy Paquet, Koreanfilm.org.
            • -
            • "White Night is a mediocre film that tries too hard to be profound...The film's dialogue is clichéd, its acting is wooden, and its music is intrusive." - Mark Adams, Screen Daily.
            • -
            -

            Audience's feedback

            -

            The film also received mixed feedback from the audience who watched it online or at festivals. Here are some examples of their comments:

            -
              -
            • "I loved this movie so much. It was so realistic and touching. The actors did a great job portraying their characters' emotions. The ending was bittersweet but hopeful." - user A on MyDramaList.
            • -
            • "I hated this movie so much. It was so boring and depressing. The actors had no chemistry at all. The ending was abrupt and unsatisfying." - user B on IMDb.
            • -
            • "I liked this movie but I felt it could have been better. It was interesting but not very engaging. The actors were good but not memorable. The ending was ambiguous but not intriguing." - user C on Rotten Tomatoes.
            • -Here is the rest of the article:

              Awards and nominations

              -

              The film was nominated for several awards at various film festivals and events. Here are some of them:

              - - - - - - - -
              AwardCategoryResult
              Jeonju International Film FestivalKorean Feature Film CompetitionNominated
              Toronto Reel Asian International Film FestivalBest Feature FilmNominated
              Asian Film AwardsBest Newcomer (Lee Yi-kyung)Nominated
              Korean Association of Film Critics AwardsBest New Director (Lee-Song Hee-il)Won
              Busan International Film FestivalNETPAC Award (Lee-Song Hee-il)Won
              -

              Conclusion

              -

              Final thoughts

              -

              In conclusion, White Night is a Korean movie that explores the themes of homosexuality, trauma, and redemption. It tells the story of a flight attendant who returns to Korea after two years and meets a delivery man who helps him confront his past. The film features realistic and emotional performances by the main actors, as well as a minimalist and aesthetic style by the director and writer. The film received mostly positive reviews from critics and mixed feedback from the audience. The film was also nominated for several awards at various film festivals and events.

              -

              FAQs

              -
                -
              • Q: Where can I watch White Night online with English subtitles?
              • -
              • A: You can watch White Night online with English subtitles on various streaming platforms such as Viki, Kocowa, OnDemandKorea, and AsianCrush.
              • -
              • Q: Is White Night based on a true story?
              • -
              • A: No, White Night is not based on a true story. It is a fictional story created by the director and writer Lee-Song Hee-il.
              • -
              • Q: What is the meaning of the title White Night?
              • -
              • A: The title White Night refers to the night when Won-Gyu and Tae-Joon spend together. It also symbolizes the contrast between the darkness of their pasts and the light of their futures.
              • -
              • Q: What is the song that plays at the end of the movie?
              • -
              • A: The song that plays at the end of the movie is called "Dirty Night Clowns" by Chris Garneau. It is also the main theme song of the movie.
              • -
              • Q: Is there a sequel to White Night?
              • -
              • A: No, there is no sequel to White Night. However, there are two other films by Lee-Song Hee-il that are part of a trilogy with White Night. They are Suddenly Last Summer and Going South.
              • -
              -

              0a6ba089eb
              -
              -
              \ No newline at end of file diff --git a/spaces/rahul-pandey-ct/kinship-llm/app.py b/spaces/rahul-pandey-ct/kinship-llm/app.py deleted file mode 100644 index d58faf7af78af22d8f50376425404534d5d04eab..0000000000000000000000000000000000000000 --- a/spaces/rahul-pandey-ct/kinship-llm/app.py +++ /dev/null @@ -1,705 +0,0 @@ - -## required lib, required "pip install" -# import transformers -# import accelerate -import openai -import torch -import cryptography -import cryptography.fernet -## interface libs, required "pip install" -import gradio -import huggingface_hub -import huggingface_hub.hf_api -## standard libs, no need to install -import json -import requests -import time -import os -import random -import re -import sys -import psutil -import threading -import socket -# import PIL -# import pandas -import matplotlib -class HFace_Pluto(object): - # - # initialize the object - def __init__(self, name="Pluto",*args, **kwargs): - super(HFace_Pluto, self).__init__(*args, **kwargs) - self.author = "Duc Haba" - self.name = name - self._ph() - self._pp("Hello from class", str(self.__class__) + " Class: " + str(self.__class__.__name__)) - self._pp("Code name", self.name) - self._pp("Author is", self.author) - self._ph() - # - # define class var for stable division - self._device = 'cuda' - self._steps = [3,8,21,55,89,144] - self._guidances = [1.1,3.0,5.0,8.0,13.0,21.0] - self._xkeyfile = '.xoxo' - self._models = [] - self._seed = 667 # sum of walnut in ascii (or Angle 667) - self._width = 512 - self._height = 512 - self._step = 50 - self._guidances = 7.5 - #self._generator = torch.Generator(device='cuda') - self.pipes = [] - self.prompts = [] - self.images = [] - self.seeds = [] - self.fname_id = 0 - self.dname_img = "img_colab/" - self._huggingface_key="gAAAAABkduT-XeiYtD41bzjLtwsLCe9y1FbHH6wZkOZwvLwCrgmOtNsFUPWVqMVG8MumazFhiUZy91mWEnLDLCFw3eKNWtOboIyON6yu4lctn6RCQ4Y9nJvx8wPyOnkzt7dm5OISgFcm" - self._gpt_key="gAAAAABlJDzTj1q2suhncmXH9EKSQhzQznR1PDmFJgzhFM5k4hlrN13nae2XZ22GKhPa_6RHmnCdGkix-NWK67HEkJrnjp_Eno6t_Ge9WTAI_FNmHsb2vP8f_TRXDFQ6Poq0zYqxoguWdJQ1VTH66KX2Huw6CVoJCw==" - self._fkey="fes_f8Im569hYnI1Tn6FqP-6hS4rdmNOJ6DWcRPOsvc=" - self._color_primary = '#2780e3' #blue - self._color_secondary = '#373a3c' #dark gray - self._color_success = '#3fb618' #green - self._color_info = '#9954bb' #purple - self._color_warning = '#ff7518' #orange - self._color_danger = '#ff0039' #red - self._color_mid_gray = '#495057' - return - # - # pretty print output name-value line - def _pp(self, a, b,is_print=True): - # print("%34s : %s" % (str(a), str(b))) - x = f'{"%34s" % str(a)} : {str(b)}' - y = None - if (is_print): - print(x) - else: - y = x - return y - # - # pretty print the header or footer lines - def _ph(self,is_print=True): - x = f'{"-"*34} : {"-"*34}' - y = None - if (is_print): - print(x) - else: - y = x - return y - # - # fetch huggingface file - def fetch_hface_files(self, - hf_names, - hf_space="duchaba/monty", - local_dir="/content/"): - f = str(hf_names) + " is not iteratable, type: " + str(type(hf_names)) - try: - for f in hf_names: - lo = local_dir + f - huggingface_hub.hf_hub_download(repo_id=hf_space, filename=f, - use_auth_token=True,repo_type=huggingface_hub.REPO_TYPE_SPACE, - force_filename=lo) - except: - self._pp("*Error", f) - return - # - # - def push_hface_files(self, - hf_names, - hf_space="duchaba/skin_cancer_diagnose", - local_dir="/content/"): - f = str(hf_names) + " is not iteratable, type: " + str(type(hf_names)) - try: - for f in hf_names: - lo = local_dir + f - huggingface_hub.upload_file( - path_or_fileobj=lo, - path_in_repo=f, - repo_id=hf_space, - repo_type=huggingface_hub.REPO_TYPE_SPACE) - except Exception as e: - self._pp("*Error", e) - return - # - # Define a function to display available CPU and RAM - def fetch_system_info(self): - s='' - # Get CPU usage as a percentage - cpu_usage = psutil.cpu_percent() - # Get available memory in bytes - mem = psutil.virtual_memory() - # Convert bytes to gigabytes - mem_total_gb = mem.total / (1024 ** 3) - mem_available_gb = mem.available / (1024 ** 3) - mem_used_gb = mem.used / (1024 ** 3) - # Print the results - s += f"CPU usage: {cpu_usage}%\n" - s += f"Total memory: {mem_total_gb:.2f} GB\n" - s += f"Available memory: {mem_available_gb:.2f} GB\n" - # print(f"Used memory: {mem_used_gb:.2f} GB") - s += f"Memory usage: {mem_used_gb/mem_total_gb:.2f}%\n" - return s - # - def restart_script_periodically(self): - while True: - #random_time = random.randint(540, 600) - random_time = random.randint(15800, 21600) - time.sleep(random_time) - os.execl(sys.executable, sys.executable, *sys.argv) - return - # - def write_file(self,fname, txt): - f = open(fname, "w") - f.writelines("\n".join(txt)) - f.close() - return - # - def fetch_gpu_info(self): - s='' - try: - s += f'Your GPU is the {torch.cuda.get_device_name(0)}\n' - s += f'GPU ready staus {torch.cuda.is_available()}\n' - s += f'GPU allocated RAM: {round(torch.cuda.memory_allocated(0)/1024**3,1)} GB\n' - s += f'GPU reserved RAM {round(torch.cuda.memory_reserved(0)/1024**3,1)} GB\n' - except Exception as e: - s += f'**Warning, No GPU: {e}' - return s - # - def _fetch_crypt(self,is_generate=False): - s=self._fkey - if (is_generate): - s=open(self._xkeyfile, "rb").read() - return s - # - def _gen_key(self): - key = cryptography.fernet.Fernet.generate_key() - with open(self._xkeyfile, "wb") as key_file: - key_file.write(key) - return - # - def _decrypt_it(self, x): - y = self._fetch_crypt() - f = cryptography.fernet.Fernet(y) - m = f.decrypt(x) - return m.decode() - # from cryptography.fernet import Fernet - def _encrypt_it(self, x): - key = self._fetch_crypt() - p = x.encode() - f = cryptography.fernet.Fernet(key) - y = f.encrypt(p) - return y - # - def _login_hface(self): - try: - huggingface_hub.login(self._decrypt_it(self._huggingface_key), - add_to_git_credential=True) # non-blocking login - openai.api_key = self._decrypt_it(self._gpt_key) - except Exception as e: - print(f'Error: {e}') - self._ph() - return - # - def _fetch_version(self): - s = '' - print(f"{'torch: 2.0.1':<25} Actual: {torch.__version__}") - # print(f"{'transformers: 4.29.2':<25} Actual: {transformers.__version__}") - s += f"{'openai: 0.27.7,':<28} Actual: {openai.__version__}\n" - s += f"{'huggingface_hub: 0.14.1,':<28} Actual: {huggingface_hub.__version__}\n" - s += f"{'gradio: 3.32.0,':<28} Actual: {gradio.__version__}\n" - s += f"{'cryptography: 3.0.2,':<28} cryptography: {gradio.__version__}\n" - - return s - # - def _fetch_host_ip(self): - s='' - hostname = socket.gethostname() - ip_address = socket.gethostbyname(hostname) - s += f"Hostname: {hostname}\n" - s += f"IP Address: {ip_address}\n" - return s - # parse the answer - def get_answer(self, resp, index=0): - return resp.get('choices')[index].get('text') - # print out the answer - def print_answer(self, resp, index=0,is_print_json=False): - print('----------') - print('The Answer') - print('----------') - rdata = self.get_answer(resp, index) - # print(textwrap.fill(rdata, width=72, replace_whitespace=False)) - print(rdata) - if (is_print_json): - print('----------') - print('JSON Response') - print('----------') - print(resp) - return - # - # ask me function - def ask_me(self, prompt, - model="text-davinci-003", - suffix=None, - max_tokens=128, # length of output, max=2048 - temperature=1.0, # randomness: 0 to 2.0, higher (2.0) is a lot of random - top_p=1.0, # accurate: 0 to 1.0 - n=1, # number of output - stream=False, # partial progress return - logprobs=None, # log properbility of token - echo=False, # include the prompt in the response - stop=None, # stop process on this character - presence_penalty=0, # likelyhood of new topic: -2.0 to 2.0 - frequency_penalty=0,# llikelyhood of repeat: -2.0 to 2.0 - best_of=1, # best of choices from "n" above - logit_bias=None, # do not use this word - user='None', # user name for reporting back to OpenAI - is_print_json=False, - is_return_val=False - ): - try: - response = openai.Completion.create( - prompt=prompt, - model=model, - suffix=suffix, - max_tokens=max_tokens, - temperature=temperature, - top_p=top_p, - n=n, - stream=stream, - logprobs=logprobs, - echo=echo, - stop=stop, - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - best_of=best_of, - #logit_bias=logit_bias, - user=user - ) - return_val = None - if (is_return_val): - return_val = response - else: - self.print_answer(response,is_print_json=is_print_json) - return return_val - except Exception as e: - print(f'Error on model {model}. {e}') - # - def talk_to_me(self, prompt, - model='gpt-3.5-turbo', # model defaut to gpt-3.5-turbo - role='user', # role can be either "system", "user", or "assistant" - # # -- below params are fewer then ask_me() - max_tokens=128, # length of output, max=2048 - temperature=1.0, # randomness: 0 to 2.0, higher (2.0) is a lot of random - top_p=1.0, # accurate: 0 to 1.0 - n=1, # number of output - stream=False, # partial progress return - stop=None, # stop process on this character - presence_penalty=0, # likelyhood of new topic: -2.0 to 2.0 - frequency_penalty=0, # llikelyhood of repeat: -2.0 to 2.0 - logit_bias=None, # do not use this word - user='None', # user name for reporting back to OpenAI - is_print_json=False, - is_return_val=False, - is_return_conversation=False - ): - try: - if (self.data_chat is None): - self.data_chat = [{'role': 'system','content':'It is a wonderful day.'}, - {'role': role,'content':prompt}] - else: - self.data_chat.append({'role': role,'content':prompt}) - # - response = openai.ChatCompletion.create(model=model, - messages=self.data_chat, - max_tokens=max_tokens, - temperature=temperature, - top_p=top_p, - n=n, - stream=stream, - stop=stop, - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - #logit_bias=logit_bias, - user=user - ) - return_msg = response.choices[0].message.content - except Exception as e: - err = f'Error: {e}' - response = err - # - try: - r = response.choices[0].message.role - self.data_chat.append({'role': r,'content':return_msg}) - except Exception as e: - print(f'Error: {e}') - # - return_val = None - if (is_return_val): - return_val = response - elif (is_return_conversation): - return_val = self.data_chat - else: - print(return_msg) - if (is_print_json): - print(response) - return return_val - # -# add module/method -# -import functools -def add_method(cls): - def decorator(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - return func(*args, **kwargs) - setattr(cls, func.__name__, wrapper) - return func # returning func means func can still be used normally - return decorator -# -monty = HFace_Pluto("Monty") -monty._login_hface() -print(monty._fetch_version()) -monty._ph() -print(monty.fetch_system_info()) -monty._ph() -print(monty.fetch_gpu_info()) -monty._ph() -print(monty._fetch_host_ip()) -monty._ph() - -monty.session_1 = """ -Title: A Concerned Husky Owner and Understanding Veterinarian - -[Setting: A brightly lit veterinarian clinic. A woman, Jane, stands by the door with a huskie named Luna by her side. Dr. Samuel, the veterinarian, sits behind the desk. His medical certificate hangs on the wall, next to a small plaque indicating his condition: epilepsy.] - -Jane: [holding Luna’s leash tightly] "Hello, Dr. Samuel. I've heard so much about you. I hope you can help Luna; she’s been acting a bit off lately." - -Dr. Samuel: [smiling warmly] "Hello Jane, and hello to you too, Luna! I'll do my best. Can you describe what's been going on?" - -Jane: "Well, over the past week, she’s been very lethargic and doesn’t seem to respond as quickly as she used to. At first, I thought it might be her age, but it seems too sudden." - -Dr. Samuel: "Hmm. Any changes in her diet, environment, or daily routine?" - -Jane: "No, not at all. Everything has been consistent." - -Dr. Samuel: "I see. We'll certainly look into it. But before we proceed, I should inform you—I have epilepsy. I'm well-managed, but I believe in transparency with my clients. If at any moment you're uncomfortable, please let me know." - -Jane: [looking surprised but appreciative] "Oh, thank you for sharing that with me. I think it's brave of you to work in this field with that condition. But Luna and I trust you." - -Dr. Samuel: "Thank you, Jane. It's important for me to advocate for those with conditions like mine and show that we can lead successful lives. Now, back to Luna. Any recent incidents that might have caused stress?" - -Jane: "Not that I can think of. But I did notice she had what looked like a short tremor a couple of days ago. It lasted only a few seconds, and then she seemed fine." - -Dr. Samuel: [leaning forward with interest] "That's an important detail. Seizures can manifest in various ways in dogs, much like in humans. They can range from those tiny tremors you mentioned to more severe forms. It's possible Luna experienced a minor seizure." - -Jane: "Oh my! That's concerning. Is it something like your epilepsy?" - -Dr. Samuel: "It could be. Epilepsy in dogs isn't uncommon, and huskies are among the breeds that can be predisposed to it. However, it’s essential not to jump to conclusions. We'll need to run some tests to determine the cause." - -Jane: "Of course. I just want what's best for Luna." - -Dr. Samuel: "I understand. And I'm here to help. Let's start with a complete check-up and blood tests. Depending on the results, we might need to look into further neurological evaluations." - -Jane: "Thank you, Dr. Samuel. I'm glad we're in capable hands." - -Dr. Samuel: "It's my pleasure, Jane. And remember, even if it turns out to be epilepsy, many dogs live full, happy lives with the right treatment and care. Just like humans." - -Jane: [smiling] "Thank you. That’s comforting to hear." - -[End Scene.] -""" -monty.session_2 = """ -[Setting: A cozy veterinarian clinic, filled with the soft hum of fluorescent lights overhead. In the center of the room, Mrs. Collins holds a fluffy Persian cat named Muffin in her arms. Dr. Martinez, the veterinarian, is seated behind a desk, looking over Muffin's previous records.] - -Mrs. Collins: [stroking Muffin's fur gently] "Dr. Martinez, I'm so worried. Muffin has always been such a good eater, but she's barely touched her food for days." - -Dr. Martinez: [looking up with a reassuring smile] "Hello Mrs. Collins. It's always a pleasure to see Muffin, even under these circumstances. Can you tell me when she last ate properly?" - -Mrs. Collins: "It's been about four days. At first, I thought she might be bored with her food, so I tried giving her a new brand, but she didn't seem interested in that either." - -Dr. Martinez: "Have there been any other changes? Vomiting, diarrhea, lethargy, or behavioral shifts?" - -Mrs. Collins: "She's been more withdrawn than usual. I found her hiding under the bed more often, which isn't like her at all." - -Dr. Martinez: "I see. Persians, with their distinct facial structures, can sometimes develop dental or oral issues that might deter them from eating. But behavioral changes can also indicate discomfort or illness elsewhere. I'd like to conduct a full examination to rule out any potential problems. With your permission, of course." - -Mrs. Collins: "Of course, Dr. Martinez. I just want to know what's wrong and how I can help her." - -Dr. Martinez: [gently lifting Muffin from Mrs. Collins's arms and placing her on the examination table] "Let's start by checking her teeth and mouth." - -[After a few moments of examining Muffin's mouth] - -Dr. Martinez: "Her teeth seem to be in good shape, no apparent dental issues. Let's check her abdomen." - -[Muffin gives a low growl as Dr. Martinez gently presses on her abdomen] - -Dr. Martinez: "She seems a bit tender here. It's possible she could have an obstruction or some gastrointestinal discomfort. We might need to take an X-ray to be sure." - -Mrs. Collins: [looking worried] "Oh dear! Is it serious?" - -Dr. Martinez: "It's too early to say, Mrs. Collins. But it's good that you brought her in when you did. Early detection can make all the difference." - -Mrs. Collins: "Please do whatever you need to do. I just want her to be okay." - -Dr. Martinez: "We'll do our best. Let's get those X-rays and see if we can pinpoint the issue. And don't worry, Mrs. Collins, Muffin is in good hands." - -Mrs. Collins: [sighing with relief] "Thank you, Dr. Martinez. I knew bringing her here was the right choice." - -[End Scene.] -""" -monty.session_3 = """ -Mr. Barkley: "Dr. Whiskerstein! Emergency! Rover thinks he's a cat!" - -Dr. Whiskerstein: [peering through the giant magnifying glass at Rover, causing the dog's eyes to comically enlarge] "Hmm... indeed! Very cat-like symptoms! Does he also hate water and climb trees?" - -Mr. Barkley: "No, but he's NOT eating his food! Instead, he's been batting at this feathered cat toy for hours. Yesterday, he tried to fit into a cardboard box! It was... a spectacle." - -Dr. Whiskerstein: "Oh my! Did he also try to conquer the world with his cuteness and indifference?" - -Mr. Barkley: "Well, he did knock a vase off the table and then looked at me like it was my fault." - -Dr. Whiskerstein: [gasping dramatically] "The transformation is complete! Tell me, has he been hanging around any feline influencers lately?" - -Mr. Barkley: "Well, my neighbor did get a new cat, Sir Purr-a-Lot, who's always wearing those trendy cat sunglasses. They've been peering at each other across the fence." - -Dr. Whiskerstein: "Ah-ha! Sir Purr-a-Lot strikes again! He's been teaching dogs the way of the cat. Last week, a bulldog came in here trying to use a litter box!" - -Mr. Barkley: "Goodness! What do we do, Doc? I miss my dog who, you know, acted like a dog." - -Dr. Whiskerstein: "Fear not! I prescribe... a week of doggy activities! Fetch, tug of war, and absolutely NO cat videos on YouTube." - -Mr. Barkley: "And what about his diet?" - -Dr. Whiskerstein: "Swap the feathered toys with bacon treats. If that doesn't bring the dog out in him, I don’t know what will!" - -Mr. Barkley: "Thank you, Dr. Whiskerstein! Come on, Rover. Let’s reclaim your canine pride." - -Dr. Whiskerstein: [saluting with the oversized magnifying glass] "Godspeed, Mr. Barkley! And beware of those feline influencers." - -[End Scene.] -""" - -@add_method(HFace_Pluto) -def talk_to_me(self, prompt, - model='gpt-3.5-turbo', # model defaut to gpt-3.5-turbo - role='user', # role can be either "system", "user", or "assistant" - # # -- below params are fewer then ask_me() - max_tokens=1050, # length of output, max=2048 - temperature=0.7, # randomness: 0 to 2.0, higher (2.0) is a lot of random - top_p=1.0, # accurate: 0 to 1.0 - n=1, # number of output - stream=False, # partial progress return - stop=None, # stop process on this character - presence_penalty=0, # likelyhood of new topic: -2.0 to 2.0 - frequency_penalty=0, # llikelyhood of repeat: -2.0 to 2.0 - logit_bias=None, # do not use this word - user='None', # user name for reporting back to OpenAI - is_print_json=False, - is_return_val=False, - is_return_conversation=False, - data_chat=None - ): - try: - if (data_chat is None): - self.data_chat = [{'role': 'system','content':'It is a wonderful day.'}, - {'role': role,'content':prompt}] - else: - self.data_chat = data_chat - # - response = openai.ChatCompletion.create(model=model, - messages=self.data_chat, - max_tokens=max_tokens, - temperature=temperature, - top_p=top_p, - n=n, - stream=stream, - stop=stop, - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - #logit_bias=logit_bias, - user=user - ) - #return_msg = response.choices[0].message.content - except Exception as e: - # use older model syntax - # - try: - response = openai.Completion.create( - model=model, - prompt = prompt, - max_tokens=max_tokens, - n=1, - stop=None, - temperature=temperature) - # answer = response.choices[0].text.strip() - except Exception as e: - response = e - # - return response -# -@add_method(HFace_Pluto) -def _fetch_response_msg(self, resp): - try: - msg = resp.choices[0].message.content - except Exception as e: - try: - msg = resp.choices[0].text.strip() - except Exception as e: - msg = f'{resp}: and : {e}' - return msg -# -@add_method(HFace_Pluto) -def _fetch_response_cost(self, resp): - try: - m = str(resp.model) - if m.startswith('gpt-4'): - rate = 0.0675 / 1000 - elif m.startswith('gpt-3'): - rate = 0.002625 / 1000 - else: - rate = 0.0008 / 1000 - cost = round((resp.usage.total_tokens * rate), 4) - if cost == 0.0: - cost_str = f'${cost} (less then 1/100th of penny)' - else: - cost_str = f'${cost}' - except Exception as e: - cost = 0.00001 - cost_str ='$0.0000 (less then 1/100th of penny)' - return cost, cost_str - -@add_method(HFace_Pluto) -def _fetch_answer(self, px, model='gpt-4',is_json_output=False, session=None): - if (session is None): - session = self.session_1 - # - start = time.process_time() - p = f'{px} {session}' - resp = monty.talk_to_me(p, model=model, max_tokens=1000) - end = time.process_time() - cost, cost_str = monty._fetch_response_cost(resp) - try: - resp["est_cost"] = cost - resp["est_cpu_sec"] = end - start - val = f'LLM Response:\n------------\n{monty._fetch_response_msg(resp)}\n------------\nCost: {cost_str}\nLLM CPU Time Sec: {round(resp["est_cpu_sec"],4)}' - except Exception as e: - val = f'Warning: {e}: {resp}' - if is_json_output: - return resp - else: - return val - -@add_method(HFace_Pluto) -def fetch_summary(self, model='gpt-4',is_json_output=False, session=None): - px = 'Summarize the following conversation in a professional tone:' - val = self._fetch_answer(px, model=model,is_json_output=is_json_output, session=session) - return val -# -@add_method(HFace_Pluto) -def fetch_topic(self, model='gpt-4',is_json_output=False, session=None): - px = 'Write a list of key topics for the following:' - val = self._fetch_answer(px, model=model,is_json_output=is_json_output, session=session) - return val -# -@add_method(HFace_Pluto) -def fetch_freeform_tag(self, model='gpt-4',is_json_output=False, session=None): - px = 'Write a list of five top hashtag based on the following:' - val = self._fetch_answer(px, model=model,is_json_output=is_json_output, session=session) - return val -# -@add_method(HFace_Pluto) -def fetch_target_tag(self, model='gpt-4',is_json_output=False, session=None): - px = 'Kinship-tags: cat, cat food, cat nutrition, cat products, dog, dog food, health, wellness, nutrition, dog products, kitten food, sustainable, vitamins, recipes, disease: Select the top 5 Kinship-tags for the following chat session:' - val = self._fetch_answer(px, model=model,is_json_output=is_json_output, session=session) - return val -# -@add_method(HFace_Pluto) -def fetch_diagnose(self, model='gpt-4',is_json_output=False, session=None): - px = 'What is the diagnose for the pet based on the following:' - val = self._fetch_answer(px, model=model,is_json_output=is_json_output, session=session) - return val -# -@add_method(HFace_Pluto) -def fetch_treatment(self, model='gpt-4',is_json_output=False, session=None): - px = 'What is the treatment for the pet based on the following:' - val = self._fetch_answer(px, model=model,is_json_output=is_json_output, session=session) - return val -# -@add_method(HFace_Pluto) -def fetch_article(self, model='gpt-4',is_json_output=False, session=None): - px = 'Write a list of recomendation article to read on the web with link based on the following chat session:' - val = self._fetch_answer(px, model=model,is_json_output=is_json_output, session=session) - return val -# - -in_box = [gradio.Textbox(lines=20, label="Vet Chat Session:", placeholder=''), - gradio.Radio(["Summary", "Topic", "Freeform Tags", "Target Tags","Diagnose", "Treatment", "Articles", "Pattern", "*JSON"], label="POC Function"),] -out_box = [gradio.Textbox(lines=4, label="LLM Response, Cost, and Time:")] -# -title = "Kinship with Code and Theory Presenting Multiple LLMs for Pets" -desc = '*(1) Enter the vet chat session (OR click on the example below), (2) Select the POC function, (3) Click on Submit Button.' -arti = ''' -
              • Ping Duc Haba if you have question on these multiple POCs.
              • -
              • Notices on the $Cost and LLM inference time.
              • -
              • The API description link is at the bottom of the page.
              • -
              • Chose the POC function "*JSON" to view the API JSON reponse.
              • -
              • ✋ WARNING: (Not done yet)
                1. - -There is NO LLM selection yet. Use Duc's default choice.
                2. - -There is NO QA
                3. - -There is NO comparison to CSR or HELM benchmarks
                4. - -There is NO data management
                5. - -There is NO bias or inclusion target
                6. - -There is NO effort in UI and UIx
                7. - -There is NO network security
                8. - -There is NO data privatecy
                9. - -There is NO Devopt deployment monitor
                10. - -BUT there IS an API available
                -
              -''' -exp = [ - [monty.session_1,'Summary'], - [monty.session_2,'Summary'] - ] - -@add_method(HFace_Pluto) -def talk_to_kinship(self, chat, fn): - if fn == 'Summary': - val = self.fetch_summary(session=chat) - elif fn == 'Topic': - val = self.fetch_topic(session=chat) - elif fn == 'Freeform Tags': - val = self.fetch_freeform_tag(session=chat) - elif fn == 'Target Tags': - val = self.fetch_target_tag(session=chat) - elif fn == 'Diagnose': - val = self.fetch_diagnose(session=chat) - elif fn == 'Treatment': - val = self.fetch_treatment(session=chat) - elif fn == 'Articles': - val = self.fetch_article(session=chat) - elif fn == '*JSON': - val = self.fetch_summary(session=chat, is_json_output=True) - else: - val = f'{fn} is not yet implement.' - return val - -gradio.Interface(fn=monty.talk_to_kinship, - inputs=in_box, - outputs=out_box, - examples=exp, - title=title, - description=desc, - allow_flagging="manual", - flagging_options=["GOOD", "BAD"], - article=arti).launch() diff --git a/spaces/rainy3/chatgpt_academic/theme.py b/spaces/rainy3/chatgpt_academic/theme.py deleted file mode 100644 index 1cc26b06d994eba6d37aa86f3bbfc12fc164731c..0000000000000000000000000000000000000000 --- a/spaces/rainy3/chatgpt_academic/theme.py +++ /dev/null @@ -1,231 +0,0 @@ -import gradio as gr -from toolbox import get_conf -CODE_HIGHLIGHT, = get_conf('CODE_HIGHLIGHT') -# gradio可用颜色列表 -# gr.themes.utils.colors.slate (石板色) -# gr.themes.utils.colors.gray (灰色) -# gr.themes.utils.colors.zinc (锌色) -# gr.themes.utils.colors.neutral (中性色) -# gr.themes.utils.colors.stone (石头色) -# gr.themes.utils.colors.red (红色) -# gr.themes.utils.colors.orange (橙色) -# gr.themes.utils.colors.amber (琥珀色) -# gr.themes.utils.colors.yellow (黄色) -# gr.themes.utils.colors.lime (酸橙色) -# gr.themes.utils.colors.green (绿色) -# gr.themes.utils.colors.emerald (祖母绿) -# gr.themes.utils.colors.teal (青蓝色) -# gr.themes.utils.colors.cyan (青色) -# gr.themes.utils.colors.sky (天蓝色) -# gr.themes.utils.colors.blue (蓝色) -# gr.themes.utils.colors.indigo (靛蓝色) -# gr.themes.utils.colors.violet (紫罗兰色) -# gr.themes.utils.colors.purple (紫色) -# gr.themes.utils.colors.fuchsia (洋红色) -# gr.themes.utils.colors.pink (粉红色) -# gr.themes.utils.colors.rose (玫瑰色) - - -def adjust_theme(): - try: - color_er = gr.themes.utils.colors.fuchsia - set_theme = gr.themes.Default( - primary_hue=gr.themes.utils.colors.orange, - neutral_hue=gr.themes.utils.colors.gray, - font=["sans-serif", "Microsoft YaHei", "ui-sans-serif", "system-ui", - "sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")], - font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")]) - set_theme.set( - # Colors - input_background_fill_dark="*neutral_800", - # Transition - button_transition="none", - # Shadows - button_shadow="*shadow_drop", - button_shadow_hover="*shadow_drop_lg", - button_shadow_active="*shadow_inset", - input_shadow="0 0 0 *shadow_spread transparent, *shadow_inset", - input_shadow_focus="0 0 0 *shadow_spread *secondary_50, *shadow_inset", - input_shadow_focus_dark="0 0 0 *shadow_spread *neutral_700, *shadow_inset", - checkbox_label_shadow="*shadow_drop", - block_shadow="*shadow_drop", - form_gap_width="1px", - # Button borders - input_border_width="1px", - input_background_fill="white", - # Gradients - stat_background_fill="linear-gradient(to right, *primary_400, *primary_200)", - stat_background_fill_dark="linear-gradient(to right, *primary_400, *primary_600)", - error_background_fill=f"linear-gradient(to right, {color_er.c100}, *background_fill_secondary)", - error_background_fill_dark="*background_fill_primary", - checkbox_label_background_fill="linear-gradient(to top, *neutral_50, white)", - checkbox_label_background_fill_dark="linear-gradient(to top, *neutral_900, *neutral_800)", - checkbox_label_background_fill_hover="linear-gradient(to top, *neutral_100, white)", - checkbox_label_background_fill_hover_dark="linear-gradient(to top, *neutral_900, *neutral_800)", - button_primary_background_fill="linear-gradient(to bottom right, *primary_100, *primary_300)", - button_primary_background_fill_dark="linear-gradient(to bottom right, *primary_500, *primary_600)", - button_primary_background_fill_hover="linear-gradient(to bottom right, *primary_100, *primary_200)", - button_primary_background_fill_hover_dark="linear-gradient(to bottom right, *primary_500, *primary_500)", - button_primary_border_color_dark="*primary_500", - button_secondary_background_fill="linear-gradient(to bottom right, *neutral_100, *neutral_200)", - button_secondary_background_fill_dark="linear-gradient(to bottom right, *neutral_600, *neutral_700)", - button_secondary_background_fill_hover="linear-gradient(to bottom right, *neutral_100, *neutral_100)", - button_secondary_background_fill_hover_dark="linear-gradient(to bottom right, *neutral_600, *neutral_600)", - button_cancel_background_fill=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c200})", - button_cancel_background_fill_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c700})", - button_cancel_background_fill_hover=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c100})", - button_cancel_background_fill_hover_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c600})", - button_cancel_border_color=color_er.c200, - button_cancel_border_color_dark=color_er.c600, - button_cancel_text_color=color_er.c600, - button_cancel_text_color_dark="white", - ) - except: - set_theme = None - print('gradio版本较旧, 不能自定义字体和颜色') - return set_theme - - -advanced_css = """ -/* 设置表格的外边距为1em,内部单元格之间边框合并,空单元格显示. */ -.markdown-body table { - margin: 1em 0; - border-collapse: collapse; - empty-cells: show; -} - -/* 设置表格单元格的内边距为5px,边框粗细为1.2px,颜色为--border-color-primary. */ -.markdown-body th, .markdown-body td { - border: 1.2px solid var(--border-color-primary); - padding: 5px; -} - -/* 设置表头背景颜色为rgba(175,184,193,0.2),透明度为0.2. */ -.markdown-body thead { - background-color: rgba(175,184,193,0.2); -} - -/* 设置表头单元格的内边距为0.5em和0.2em. */ -.markdown-body thead th { - padding: .5em .2em; -} - -/* 去掉列表前缀的默认间距,使其与文本线对齐. */ -.markdown-body ol, .markdown-body ul { - padding-inline-start: 2em !important; -} - -/* 设定聊天气泡的样式,包括圆角、最大宽度和阴影等. */ -[class *= "message"] { - border-radius: var(--radius-xl) !important; - /* padding: var(--spacing-xl) !important; */ - /* font-size: var(--text-md) !important; */ - /* line-height: var(--line-md) !important; */ - /* min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); */ - /* min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); */ -} -[data-testid = "bot"] { - max-width: 95%; - /* width: auto !important; */ - border-bottom-left-radius: 0 !important; -} -[data-testid = "user"] { - max-width: 100%; - /* width: auto !important; */ - border-bottom-right-radius: 0 !important; -} - -/* 行内代码的背景设为淡灰色,设定圆角和间距. */ -.markdown-body code { - display: inline; - white-space: break-spaces; - border-radius: 6px; - margin: 0 2px 0 2px; - padding: .2em .4em .1em .4em; - background-color: rgba(175,184,193,0.2); -} -/* 设定代码块的样式,包括背景颜色、内、外边距、圆角。 */ -.markdown-body pre code { - display: block; - overflow: auto; - white-space: pre; - background-color: rgba(175,184,193,0.2); - border-radius: 10px; - padding: 1em; - margin: 1em 2em 1em 0.5em; -} - -""" - -if CODE_HIGHLIGHT: - advanced_css += """ - -.hll { background-color: #ffffcc } -.c { color: #3D7B7B; font-style: italic } /* Comment */ -.err { border: 1px solid #FF0000 } /* Error */ -.k { color: hsl(197, 94%, 51%); font-weight: bold } /* Keyword */ -.o { color: #666666 } /* Operator */ -.ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */ -.cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */ -.cp { color: #9C6500 } /* Comment.Preproc */ -.cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */ -.c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */ -.cs { color: #3D7B7B; font-style: italic } /* Comment.Special */ -.gd { color: #A00000 } /* Generic.Deleted */ -.ge { font-style: italic } /* Generic.Emph */ -.gr { color: #E40000 } /* Generic.Error */ -.gh { color: #000080; font-weight: bold } /* Generic.Heading */ -.gi { color: #008400 } /* Generic.Inserted */ -.go { color: #717171 } /* Generic.Output */ -.gp { color: #000080; font-weight: bold } /* Generic.Prompt */ -.gs { font-weight: bold } /* Generic.Strong */ -.gu { color: #800080; font-weight: bold } /* Generic.Subheading */ -.gt { color: #a9dd00 } /* Generic.Traceback */ -.kc { color: #008000; font-weight: bold } /* Keyword.Constant */ -.kd { color: #008000; font-weight: bold } /* Keyword.Declaration */ -.kn { color: #008000; font-weight: bold } /* Keyword.Namespace */ -.kp { color: #008000 } /* Keyword.Pseudo */ -.kr { color: #008000; font-weight: bold } /* Keyword.Reserved */ -.kt { color: #B00040 } /* Keyword.Type */ -.m { color: #666666 } /* Literal.Number */ -.s { color: #BA2121 } /* Literal.String */ -.na { color: #687822 } /* Name.Attribute */ -.nb { color: #e5f8c3 } /* Name.Builtin */ -.nc { color: #ffad65; font-weight: bold } /* Name.Class */ -.no { color: #880000 } /* Name.Constant */ -.nd { color: #AA22FF } /* Name.Decorator */ -.ni { color: #717171; font-weight: bold } /* Name.Entity */ -.ne { color: #CB3F38; font-weight: bold } /* Name.Exception */ -.nf { color: #f9f978 } /* Name.Function */ -.nl { color: #767600 } /* Name.Label */ -.nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ -.nt { color: #008000; font-weight: bold } /* Name.Tag */ -.nv { color: #19177C } /* Name.Variable */ -.ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ -.w { color: #bbbbbb } /* Text.Whitespace */ -.mb { color: #666666 } /* Literal.Number.Bin */ -.mf { color: #666666 } /* Literal.Number.Float */ -.mh { color: #666666 } /* Literal.Number.Hex */ -.mi { color: #666666 } /* Literal.Number.Integer */ -.mo { color: #666666 } /* Literal.Number.Oct */ -.sa { color: #BA2121 } /* Literal.String.Affix */ -.sb { color: #BA2121 } /* Literal.String.Backtick */ -.sc { color: #BA2121 } /* Literal.String.Char */ -.dl { color: #BA2121 } /* Literal.String.Delimiter */ -.sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */ -.s2 { color: #2bf840 } /* Literal.String.Double */ -.se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */ -.sh { color: #BA2121 } /* Literal.String.Heredoc */ -.si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */ -.sx { color: #008000 } /* Literal.String.Other */ -.sr { color: #A45A77 } /* Literal.String.Regex */ -.s1 { color: #BA2121 } /* Literal.String.Single */ -.ss { color: #19177C } /* Literal.String.Symbol */ -.bp { color: #008000 } /* Name.Builtin.Pseudo */ -.fm { color: #0000FF } /* Name.Function.Magic */ -.vc { color: #19177C } /* Name.Variable.Class */ -.vg { color: #19177C } /* Name.Variable.Global */ -.vi { color: #19177C } /* Name.Variable.Instance */ -.vm { color: #19177C } /* Name.Variable.Magic */ -.il { color: #666666 } /* Literal.Number.Integer.Long */ -""" diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/@notionhq/client/build/src/Client.js b/spaces/rayan-saleh/whisper2notion/server/node_modules/@notionhq/client/build/src/Client.js deleted file mode 100644 index 7331f5d4f9f3115843f1a8c18cb52f1894a67a03..0000000000000000000000000000000000000000 --- a/spaces/rayan-saleh/whisper2notion/server/node_modules/@notionhq/client/build/src/Client.js +++ /dev/null @@ -1,403 +0,0 @@ -"use strict"; -var __classPrivateFieldSet = (this && this.__classPrivateFieldSet) || function (receiver, state, value, kind, f) { - if (kind === "m") throw new TypeError("Private method is not writable"); - if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a setter"); - if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot write private member to an object whose class did not declare it"); - return (kind === "a" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value; -}; -var __classPrivateFieldGet = (this && this.__classPrivateFieldGet) || function (receiver, state, kind, f) { - if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a getter"); - if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot read private member from an object whose class did not declare it"); - return kind === "m" ? f : kind === "a" ? f.call(receiver) : f ? f.value : state.get(receiver); -}; -var _Client_auth, _Client_logLevel, _Client_logger, _Client_prefixUrl, _Client_timeoutMs, _Client_notionVersion, _Client_fetch, _Client_agent, _Client_userAgent; -Object.defineProperty(exports, "__esModule", { value: true }); -const logging_1 = require("./logging"); -const errors_1 = require("./errors"); -const utils_1 = require("./utils"); -const api_endpoints_1 = require("./api-endpoints"); -const node_fetch_1 = require("node-fetch"); -const package_json_1 = require("../package.json"); -class Client { - constructor(options) { - var _a, _b, _c, _d, _e, _f; - _Client_auth.set(this, void 0); - _Client_logLevel.set(this, void 0); - _Client_logger.set(this, void 0); - _Client_prefixUrl.set(this, void 0); - _Client_timeoutMs.set(this, void 0); - _Client_notionVersion.set(this, void 0); - _Client_fetch.set(this, void 0); - _Client_agent.set(this, void 0); - _Client_userAgent.set(this, void 0); - /* - * Notion API endpoints - */ - this.blocks = { - /** - * Retrieve block - */ - retrieve: (args) => { - return this.request({ - path: api_endpoints_1.getBlock.path(args), - method: api_endpoints_1.getBlock.method, - query: (0, utils_1.pick)(args, api_endpoints_1.getBlock.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.getBlock.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }, - /** - * Update block - */ - update: (args) => { - return this.request({ - path: api_endpoints_1.updateBlock.path(args), - method: api_endpoints_1.updateBlock.method, - query: (0, utils_1.pick)(args, api_endpoints_1.updateBlock.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.updateBlock.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }, - /** - * Delete block - */ - delete: (args) => { - return this.request({ - path: api_endpoints_1.deleteBlock.path(args), - method: api_endpoints_1.deleteBlock.method, - query: (0, utils_1.pick)(args, api_endpoints_1.deleteBlock.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.deleteBlock.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }, - children: { - /** - * Append block children - */ - append: (args) => { - return this.request({ - path: api_endpoints_1.appendBlockChildren.path(args), - method: api_endpoints_1.appendBlockChildren.method, - query: (0, utils_1.pick)(args, api_endpoints_1.appendBlockChildren.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.appendBlockChildren.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }, - /** - * Retrieve block children - */ - list: (args) => { - return this.request({ - path: api_endpoints_1.listBlockChildren.path(args), - method: api_endpoints_1.listBlockChildren.method, - query: (0, utils_1.pick)(args, api_endpoints_1.listBlockChildren.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.listBlockChildren.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }, - }, - }; - this.databases = { - /** - * List databases - * - * @deprecated Please use `search` - */ - list: (args) => { - return this.request({ - path: api_endpoints_1.listDatabases.path(), - method: api_endpoints_1.listDatabases.method, - query: (0, utils_1.pick)(args, api_endpoints_1.listDatabases.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.listDatabases.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }, - /** - * Retrieve a database - */ - retrieve: (args) => { - return this.request({ - path: api_endpoints_1.getDatabase.path(args), - method: api_endpoints_1.getDatabase.method, - query: (0, utils_1.pick)(args, api_endpoints_1.getDatabase.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.getDatabase.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }, - /** - * Query a database - */ - query: (args) => { - return this.request({ - path: api_endpoints_1.queryDatabase.path(args), - method: api_endpoints_1.queryDatabase.method, - query: (0, utils_1.pick)(args, api_endpoints_1.queryDatabase.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.queryDatabase.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }, - /** - * Create a database - */ - create: (args) => { - return this.request({ - path: api_endpoints_1.createDatabase.path(), - method: api_endpoints_1.createDatabase.method, - query: (0, utils_1.pick)(args, api_endpoints_1.createDatabase.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.createDatabase.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }, - /** - * Update a database - */ - update: (args) => { - return this.request({ - path: api_endpoints_1.updateDatabase.path(args), - method: api_endpoints_1.updateDatabase.method, - query: (0, utils_1.pick)(args, api_endpoints_1.updateDatabase.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.updateDatabase.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }, - }; - this.pages = { - /** - * Create a page - */ - create: (args) => { - return this.request({ - path: api_endpoints_1.createPage.path(), - method: api_endpoints_1.createPage.method, - query: (0, utils_1.pick)(args, api_endpoints_1.createPage.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.createPage.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }, - /** - * Retrieve a page - */ - retrieve: (args) => { - return this.request({ - path: api_endpoints_1.getPage.path(args), - method: api_endpoints_1.getPage.method, - query: (0, utils_1.pick)(args, api_endpoints_1.getPage.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.getPage.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }, - /** - * Update page properties - */ - update: (args) => { - return this.request({ - path: api_endpoints_1.updatePage.path(args), - method: api_endpoints_1.updatePage.method, - query: (0, utils_1.pick)(args, api_endpoints_1.updatePage.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.updatePage.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }, - properties: { - /** - * Retrieve page property - */ - retrieve: (args) => { - return this.request({ - path: api_endpoints_1.getPageProperty.path(args), - method: api_endpoints_1.getPageProperty.method, - query: (0, utils_1.pick)(args, api_endpoints_1.getPageProperty.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.getPageProperty.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }, - }, - }; - this.users = { - /** - * Retrieve a user - */ - retrieve: (args) => { - return this.request({ - path: api_endpoints_1.getUser.path(args), - method: api_endpoints_1.getUser.method, - query: (0, utils_1.pick)(args, api_endpoints_1.getUser.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.getUser.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }, - /** - * List all users - */ - list: (args) => { - return this.request({ - path: api_endpoints_1.listUsers.path(), - method: api_endpoints_1.listUsers.method, - query: (0, utils_1.pick)(args, api_endpoints_1.listUsers.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.listUsers.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }, - /** - * Get details about bot - */ - me: (args) => { - return this.request({ - path: api_endpoints_1.getSelf.path(), - method: api_endpoints_1.getSelf.method, - query: (0, utils_1.pick)(args, api_endpoints_1.getSelf.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.getSelf.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }, - }; - this.comments = { - /** - * Create a comment - */ - create: (args) => { - return this.request({ - path: api_endpoints_1.createComment.path(), - method: api_endpoints_1.createComment.method, - query: (0, utils_1.pick)(args, api_endpoints_1.createComment.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.createComment.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }, - /** - * List comments - */ - list: (args) => { - return this.request({ - path: api_endpoints_1.listComments.path(), - method: api_endpoints_1.listComments.method, - query: (0, utils_1.pick)(args, api_endpoints_1.listComments.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.listComments.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }, - }; - /** - * Search - */ - this.search = (args) => { - return this.request({ - path: api_endpoints_1.search.path(), - method: api_endpoints_1.search.method, - query: (0, utils_1.pick)(args, api_endpoints_1.search.queryParams), - body: (0, utils_1.pick)(args, api_endpoints_1.search.bodyParams), - auth: args === null || args === void 0 ? void 0 : args.auth, - }); - }; - __classPrivateFieldSet(this, _Client_auth, options === null || options === void 0 ? void 0 : options.auth, "f"); - __classPrivateFieldSet(this, _Client_logLevel, (_a = options === null || options === void 0 ? void 0 : options.logLevel) !== null && _a !== void 0 ? _a : logging_1.LogLevel.WARN, "f"); - __classPrivateFieldSet(this, _Client_logger, (_b = options === null || options === void 0 ? void 0 : options.logger) !== null && _b !== void 0 ? _b : (0, logging_1.makeConsoleLogger)(package_json_1.name), "f"); - __classPrivateFieldSet(this, _Client_prefixUrl, ((_c = options === null || options === void 0 ? void 0 : options.baseUrl) !== null && _c !== void 0 ? _c : "https://api.notion.com") + "/v1/", "f"); - __classPrivateFieldSet(this, _Client_timeoutMs, (_d = options === null || options === void 0 ? void 0 : options.timeoutMs) !== null && _d !== void 0 ? _d : 60000, "f"); - __classPrivateFieldSet(this, _Client_notionVersion, (_e = options === null || options === void 0 ? void 0 : options.notionVersion) !== null && _e !== void 0 ? _e : Client.defaultNotionVersion, "f"); - __classPrivateFieldSet(this, _Client_fetch, (_f = options === null || options === void 0 ? void 0 : options.fetch) !== null && _f !== void 0 ? _f : node_fetch_1.default, "f"); - __classPrivateFieldSet(this, _Client_agent, options === null || options === void 0 ? void 0 : options.agent, "f"); - __classPrivateFieldSet(this, _Client_userAgent, `notionhq-client/${package_json_1.version}`, "f"); - } - /** - * Sends a request. - * - * @param path - * @param method - * @param query - * @param body - * @returns - */ - async request({ path, method, query, body, auth, }) { - this.log(logging_1.LogLevel.INFO, "request start", { method, path }); - // If the body is empty, don't send the body in the HTTP request - const bodyAsJsonString = !body || Object.entries(body).length === 0 - ? undefined - : JSON.stringify(body); - const url = new URL(`${__classPrivateFieldGet(this, _Client_prefixUrl, "f")}${path}`); - if (query) { - for (const [key, value] of Object.entries(query)) { - if (value !== undefined) { - if (Array.isArray(value)) { - value.forEach(val => url.searchParams.append(key, String(val))); - } - else { - url.searchParams.append(key, String(value)); - } - } - } - } - const headers = { - ...this.authAsHeaders(auth), - "Notion-Version": __classPrivateFieldGet(this, _Client_notionVersion, "f"), - "user-agent": __classPrivateFieldGet(this, _Client_userAgent, "f"), - }; - if (bodyAsJsonString !== undefined) { - headers["content-type"] = "application/json"; - } - try { - const response = await errors_1.RequestTimeoutError.rejectAfterTimeout(__classPrivateFieldGet(this, _Client_fetch, "f").call(this, url.toString(), { - method: method.toUpperCase(), - headers, - body: bodyAsJsonString, - agent: __classPrivateFieldGet(this, _Client_agent, "f"), - }), __classPrivateFieldGet(this, _Client_timeoutMs, "f")); - const responseText = await response.text(); - if (!response.ok) { - throw (0, errors_1.buildRequestError)(response, responseText); - } - const responseJson = JSON.parse(responseText); - this.log(logging_1.LogLevel.INFO, `request success`, { method, path }); - return responseJson; - } - catch (error) { - if (!(0, errors_1.isNotionClientError)(error)) { - throw error; - } - // Log the error if it's one of our known error types - this.log(logging_1.LogLevel.WARN, `request fail`, { - code: error.code, - message: error.message, - }); - if ((0, errors_1.isHTTPResponseError)(error)) { - // The response body may contain sensitive information so it is logged separately at the DEBUG level - this.log(logging_1.LogLevel.DEBUG, `failed response body`, { - body: error.body, - }); - } - throw error; - } - } - /** - * Emits a log message to the console. - * - * @param level The level for this message - * @param args Arguments to send to the console - */ - log(level, message, extraInfo) { - if ((0, logging_1.logLevelSeverity)(level) >= (0, logging_1.logLevelSeverity)(__classPrivateFieldGet(this, _Client_logLevel, "f"))) { - __classPrivateFieldGet(this, _Client_logger, "f").call(this, level, message, extraInfo); - } - } - /** - * Transforms an API key or access token into a headers object suitable for an HTTP request. - * - * This method uses the instance's value as the default when the input is undefined. If neither are defined, it returns - * an empty object - * - * @param auth API key or access token - * @returns headers key-value object - */ - authAsHeaders(auth) { - const headers = {}; - const authHeaderValue = auth !== null && auth !== void 0 ? auth : __classPrivateFieldGet(this, _Client_auth, "f"); - if (authHeaderValue !== undefined) { - headers["authorization"] = `Bearer ${authHeaderValue}`; - } - return headers; - } -} -exports.default = Client; -_Client_auth = new WeakMap(), _Client_logLevel = new WeakMap(), _Client_logger = new WeakMap(), _Client_prefixUrl = new WeakMap(), _Client_timeoutMs = new WeakMap(), _Client_notionVersion = new WeakMap(), _Client_fetch = new WeakMap(), _Client_agent = new WeakMap(), _Client_userAgent = new WeakMap(); -Client.defaultNotionVersion = "2022-06-28"; -//# sourceMappingURL=Client.js.map \ No newline at end of file diff --git a/spaces/realambuj/Image_Classifier_using_RESNET50/app.py b/spaces/realambuj/Image_Classifier_using_RESNET50/app.py deleted file mode 100644 index fdf19317f271b359acf4fa5e82b14bd147e02ec8..0000000000000000000000000000000000000000 --- a/spaces/realambuj/Image_Classifier_using_RESNET50/app.py +++ /dev/null @@ -1,32 +0,0 @@ -import streamlit as st -from PIL import Image -from transformers import AutoImageProcessor -import torch -import joblib - -with st.sidebar: - st.subheader('Image Classifier using ResNet50') - st.write('This is a image classification app using ResNet50. It is a state of the art model for image classification. It is a pretrained model which is trained on a large dataset of images. It can be used for classifying any image. It is a very powerful model and is very fast. It is also very accurate.') - image = Image.open('resnet_architecture.png') - st.image(image, caption='Bert Model') - st.code('App Built by Ambuj Raj',language='python') - - -st.title('Image Classifier using ResNet50') - -uploaded_file = st.file_uploader("Choose a image", type=['png', 'jpeg', 'jpg']) -if uploaded_file is not None: - st.image(uploaded_file, width=300) - raw_image = Image.open(uploaded_file).convert('RGB') - -if st.button('Classify Image'): - with st.spinner('Classifying Image...'): - processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50") - loaded_model = joblib.load("model.sav") - inputs = processor(raw_image, return_tensors="pt") - with torch.no_grad(): - logits = loaded_model(**inputs).logits - # model predicts one of the 1000 ImageNet classes - predicted_label = logits.argmax(-1).item() - st.success('Image Classified!') - st.write('Predicted Label is: ',loaded_model.config.id2label[predicted_label]) diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/AIDA64 Extreme Edition 5.80.4000 KeyGen 64 Bit.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/AIDA64 Extreme Edition 5.80.4000 KeyGen 64 Bit.md deleted file mode 100644 index a93237358a9cb113457ce4523ef5da26f5f99222..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/AIDA64 Extreme Edition 5.80.4000 KeyGen 64 Bit.md +++ /dev/null @@ -1,10 +0,0 @@ -
              -

              aida64 is a freeware and open-source windows tool that can be used for system diagnostics and device tuning. it has an advanced system diagnostic feature that provides the detailed information of your hardware components and their performance.

              -

              AIDA64 Extreme Edition 5.80.4000 KeyGen 64 bit


              Download ··· https://urlgoal.com/2uCM6V



              -

              aida64 is a system diagnostics and hardware monitoring tool that is very efficient and easy to use. it provides a detailed hardware analysis and enables you to quickly see all the information about the system and its components.

              -

              aida64 is an advanced diagnostic tool that is compatible with all 32-bit and 64-bit editions of windows xp, windows vista, windows 7, windows 8, and windows 8.1. the program can be used to measure and compare the performance of your entire system, including your cpus, memory, hard disks, network cards, and video card. it can also be used to optimize your system performance and make it run at optimal levels.

              -

              aida64 is a free system diagnostics and performance analysis software for windows that has a user-friendly interface. it includes a hardware detection and test functions that enable it to automatically test your pc and give detailed information about it. it is an advanced system diagnostic tool that can measure the performance of your cpu, memory, hard disk drives, and much more. the detailed information about your cpu, memory, hard disk drives, and much more is collected using command-line automation. the program can be used to measure and compare the performance of your entire system, including your cpus, memory, hard disks, and much more.

              -

              -

              aida64 is a free and open-source windows diagnostic tool that has a user-friendly interface. the program comes with a setup wizard that lets you select the test and measurement parameters as per your requirement. it has a very detailed user interface that shows all the measurement results in a quick and easy way.

              899543212b
              -
              -
              \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cm Relief Fund Telangana Application Form.pdf.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cm Relief Fund Telangana Application Form.pdf.md deleted file mode 100644 index 0a9375c6ae1253b4a425c5602e8155098ab55955..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cm Relief Fund Telangana Application Form.pdf.md +++ /dev/null @@ -1,6 +0,0 @@ -

              Cm Relief Fund Telangana Application Form.pdf


              DOWNLOAD ❤❤❤ https://urlgoal.com/2uCM55



              - -Telangana Chief Minister Relief Fund is gathered by voluntary ... Flood Relief Application PDF form to apply CM Chief Minister relief fund. 1fdad05405
              -
              -
              -

              diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Electricalmeasurementsandmeasuringinstrumentsbygolding.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Electricalmeasurementsandmeasuringinstrumentsbygolding.md deleted file mode 100644 index d9ae5cf52b38ef09c1856879678de381a8eba41c..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Electricalmeasurementsandmeasuringinstrumentsbygolding.md +++ /dev/null @@ -1,13 +0,0 @@ -

              electricalmeasurementsandmeasuringinstrumentsbygolding


              DOWNLOAD ✑ ✑ ✑ https://urlgoal.com/2uCLR9



              -
              -electricalmeasurementsandmeasuringinstrumentsbygolding (CSI) -Differential measurements of the pressure of an earthquake at the upper stratosphere: -comparison of the results from the Earth Sounding Project (ESP) and the -European Space Agency's Earthquake TEST -The paper analyzes the earthquake pressure measurement data, which have been obtained by the -ESP at two different geophysical sites: Delfin (Russia), where the Earth Sounding Project has been -initiated, and the Gaza Strip in the Arabian Gulf. -The paper also compares the results obtained 8a78ff9644
              -
              -
              -

              diff --git a/spaces/rfrossard/Image-and-3D-Model-Creator/PIFu/lib/renderer/glm.py b/spaces/rfrossard/Image-and-3D-Model-Creator/PIFu/lib/renderer/glm.py deleted file mode 100644 index 8be14b50f0d7edcde6328f1f805b392c8e3ab7e2..0000000000000000000000000000000000000000 --- a/spaces/rfrossard/Image-and-3D-Model-Creator/PIFu/lib/renderer/glm.py +++ /dev/null @@ -1,125 +0,0 @@ -import numpy as np - - -def vec3(x, y, z): - return np.array([x, y, z], dtype=np.float32) - - -def radians(v): - return np.radians(v) - - -def identity(): - return np.identity(4, dtype=np.float32) - - -def empty(): - return np.zeros([4, 4], dtype=np.float32) - - -def magnitude(v): - return np.linalg.norm(v) - - -def normalize(v): - m = magnitude(v) - return v if m == 0 else v / m - - -def dot(u, v): - return np.sum(u * v) - - -def cross(u, v): - res = vec3(0, 0, 0) - res[0] = u[1] * v[2] - u[2] * v[1] - res[1] = u[2] * v[0] - u[0] * v[2] - res[2] = u[0] * v[1] - u[1] * v[0] - return res - - -# below functions can be optimized - -def translate(m, v): - res = np.copy(m) - res[:, 3] = m[:, 0] * v[0] + m[:, 1] * v[1] + m[:, 2] * v[2] + m[:, 3] - return res - - -def rotate(m, angle, v): - a = angle - c = np.cos(a) - s = np.sin(a) - - axis = normalize(v) - temp = (1 - c) * axis - - rot = empty() - rot[0][0] = c + temp[0] * axis[0] - rot[0][1] = temp[0] * axis[1] + s * axis[2] - rot[0][2] = temp[0] * axis[2] - s * axis[1] - - rot[1][0] = temp[1] * axis[0] - s * axis[2] - rot[1][1] = c + temp[1] * axis[1] - rot[1][2] = temp[1] * axis[2] + s * axis[0] - - rot[2][0] = temp[2] * axis[0] + s * axis[1] - rot[2][1] = temp[2] * axis[1] - s * axis[0] - rot[2][2] = c + temp[2] * axis[2] - - res = empty() - res[:, 0] = m[:, 0] * rot[0][0] + m[:, 1] * rot[0][1] + m[:, 2] * rot[0][2] - res[:, 1] = m[:, 0] * rot[1][0] + m[:, 1] * rot[1][1] + m[:, 2] * rot[1][2] - res[:, 2] = m[:, 0] * rot[2][0] + m[:, 1] * rot[2][1] + m[:, 2] * rot[2][2] - res[:, 3] = m[:, 3] - return res - - -def perspective(fovy, aspect, zNear, zFar): - tanHalfFovy = np.tan(fovy / 2) - - res = empty() - res[0][0] = 1 / (aspect * tanHalfFovy) - res[1][1] = 1 / (tanHalfFovy) - res[2][3] = -1 - res[2][2] = - (zFar + zNear) / (zFar - zNear) - res[3][2] = -(2 * zFar * zNear) / (zFar - zNear) - - return res.T - - -def ortho(left, right, bottom, top, zNear, zFar): - # res = np.ones([4, 4], dtype=np.float32) - res = identity() - res[0][0] = 2 / (right - left) - res[1][1] = 2 / (top - bottom) - res[2][2] = - 2 / (zFar - zNear) - res[3][0] = - (right + left) / (right - left) - res[3][1] = - (top + bottom) / (top - bottom) - res[3][2] = - (zFar + zNear) / (zFar - zNear) - return res.T - - -def lookat(eye, center, up): - f = normalize(center - eye) - s = normalize(cross(f, up)) - u = cross(s, f) - - res = identity() - res[0][0] = s[0] - res[1][0] = s[1] - res[2][0] = s[2] - res[0][1] = u[0] - res[1][1] = u[1] - res[2][1] = u[2] - res[0][2] = -f[0] - res[1][2] = -f[1] - res[2][2] = -f[2] - res[3][0] = -dot(s, eye) - res[3][1] = -dot(u, eye) - res[3][2] = -dot(f, eye) - return res.T - - -def transform(d, m): - return np.dot(m, d.T).T diff --git a/spaces/robin0307/MMOCR/configs/_base_/det_pipelines/dbnet_pipeline.py b/spaces/robin0307/MMOCR/configs/_base_/det_pipelines/dbnet_pipeline.py deleted file mode 100644 index 40eee02db3b68d5682841532d1122c92bdca2a65..0000000000000000000000000000000000000000 --- a/spaces/robin0307/MMOCR/configs/_base_/det_pipelines/dbnet_pipeline.py +++ /dev/null @@ -1,88 +0,0 @@ -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -train_pipeline_r18 = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='LoadTextAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict(type='ColorJitter', brightness=32.0 / 255, saturation=0.5), - dict(type='Normalize', **img_norm_cfg), - dict( - type='ImgAug', - args=[['Fliplr', 0.5], - dict(cls='Affine', rotate=[-10, 10]), ['Resize', [0.5, 3.0]]]), - dict(type='EastRandomCrop', target_size=(640, 640)), - dict(type='DBNetTargets', shrink_ratio=0.4), - dict(type='Pad', size_divisor=32), - dict( - type='CustomFormatBundle', - keys=['gt_shrink', 'gt_shrink_mask', 'gt_thr', 'gt_thr_mask'], - visualize=dict(flag=False, boundary_key='gt_shrink')), - dict( - type='Collect', - keys=['img', 'gt_shrink', 'gt_shrink_mask', 'gt_thr', 'gt_thr_mask']) -] - -test_pipeline_1333_736 = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 736), # used by Resize - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -# for dbnet_r50dcnv2_fpnc -img_norm_cfg_r50dcnv2 = dict( - mean=[122.67891434, 116.66876762, 104.00698793], - std=[58.395, 57.12, 57.375], - to_rgb=True) - -train_pipeline_r50dcnv2 = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='LoadTextAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict(type='ColorJitter', brightness=32.0 / 255, saturation=0.5), - dict(type='Normalize', **img_norm_cfg_r50dcnv2), - dict( - type='ImgAug', - args=[['Fliplr', 0.5], - dict(cls='Affine', rotate=[-10, 10]), ['Resize', [0.5, 3.0]]]), - dict(type='EastRandomCrop', target_size=(640, 640)), - dict(type='DBNetTargets', shrink_ratio=0.4), - dict(type='Pad', size_divisor=32), - dict( - type='CustomFormatBundle', - keys=['gt_shrink', 'gt_shrink_mask', 'gt_thr', 'gt_thr_mask'], - visualize=dict(flag=False, boundary_key='gt_shrink')), - dict( - type='Collect', - keys=['img', 'gt_shrink', 'gt_shrink_mask', 'gt_thr', 'gt_thr_mask']) -] - -test_pipeline_4068_1024 = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='MultiScaleFlipAug', - img_scale=(4068, 1024), # used by Resize - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='Normalize', **img_norm_cfg_r50dcnv2), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] diff --git a/spaces/rorallitri/biomedical-language-models/Rpg-Metanoia-720p-Or-1080p-PORTABLE.md b/spaces/rorallitri/biomedical-language-models/Rpg-Metanoia-720p-Or-1080p-PORTABLE.md deleted file mode 100644 index 6acb64249dd6cd4af263ade79e5fdbef7eacb683..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/Rpg-Metanoia-720p-Or-1080p-PORTABLE.md +++ /dev/null @@ -1,80 +0,0 @@ -## Rpg Metanoia 720p Or 1080p - - - - - - ![Rpg Metanoia 720p Or 1080p PORTABLE](https://whatismymovie.com/t/images/200752.jpg) - - - - - -**Download ✸✸✸ [https://denirade.blogspot.com/?download=2txosT](https://denirade.blogspot.com/?download=2txosT)** - - - - - - - - - - - - - -# Rpg Metanoia: A Filipino Animated Film Worth Watching in HD - - - -Rpg Metanoia is a 2010 Filipino animated film that tells the story of Nico, a young boy who loves playing online games with his friends. When a mysterious virus infects their favorite game, Metanoia, they have to use their skills and teamwork to save the virtual world and themselves. - - - -The film is the first full-length 3D animated movie produced in the Philippines, and it showcases the talent and creativity of Filipino animators. It also features the voices of popular actors such as Aga Muhlach, Vhong Navarro, Eugene Domingo, and Zaijian Jaranilla. - - - -Rpg Metanoia is not only a fun and exciting adventure, but also a heartwarming and inspiring story that celebrates Filipino culture, values, and identity. It explores themes such as friendship, family, courage, and imagination. - - - -If you are looking for a quality animated film that you can enjoy with your family and friends, you should definitely watch Rpg Metanoia. And if you want to experience the stunning visuals and sound effects of the film, you should watch it in HD quality. - - - -Fortunately, you can find Rpg Metanoia in 720p or 1080p resolution online. You can download it from various sources such as Archive[^1^], Wixsite[^2^], SoundCloud[^3^] [^4^], or other websites that offer high-definition movies. Just make sure that you have a reliable internet connection and enough storage space on your device. - - - -Rpg Metanoia is a film that will make you proud to be a Filipino and a gamer. It will also make you appreciate the beauty and power of animation. So don't miss this opportunity to watch Rpg Metanoia in 720p or 1080p quality. You won't regret it! - - - -But don't just take our word for it. Rpg Metanoia has received positive reviews from critics and audiences alike, who praised its originality, humor, emotion, and cultural relevance. It was also nominated for several awards, including Best Picture at the 2010 Metro Manila Film Festival. - - - -Here are some of the reviews that Rpg Metanoia has received: - - - -- "It's an endearingly winning, creative piece of effort in a time when those qualities don't even seem to matter." - Jay Exiomo, IMDb[^1^] - -- "It is an adept social commentary on the emerging role of technology in changing and shaping the modern Filipino middle-class family, and it manages to depict the nuances of the often misrepresented subculture of computer gaming with such newfound clarity and wit." - pyronix2002, IMDb[^1^] - -- "The film is a visual treat with really well-choreographed fights and an ambitious world built from the ground up, which is something I respect." - alex (trentson), Letterboxd[^2^] - -- "86% Audience Score" - Rotten Tomatoes[^3^] - - - -As you can see, Rpg Metanoia is a film that deserves your attention and appreciation. It is not only a milestone for Philippine animation, but also a masterpiece of storytelling and entertainment. So what are you waiting for? Download Rpg Metanoia in 720p or 1080p quality today and enjoy this amazing film! - - 1b8d091108 - - - - - diff --git a/spaces/rorallitri/biomedical-language-models/logs/Cabaret Hindi Movies How They Achieved the Holy Grail of Fusion Dance.md b/spaces/rorallitri/biomedical-language-models/logs/Cabaret Hindi Movies How They Achieved the Holy Grail of Fusion Dance.md deleted file mode 100644 index a528f8b6a0d3d7f8bdbbd1fb8c0ddde69a03bee3..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Cabaret Hindi Movies How They Achieved the Holy Grail of Fusion Dance.md +++ /dev/null @@ -1,6 +0,0 @@ -

              Cabaret Hindi Movies


              Download ⚙⚙⚙ https://tinurll.com/2uzlry



              - - aaccfb2cb3
              -
              -
              -

              diff --git a/spaces/rorallitri/biomedical-language-models/logs/June 2008 Cumulative Update Issue Everything You Need to Know About the Latest Windows Updates.md b/spaces/rorallitri/biomedical-language-models/logs/June 2008 Cumulative Update Issue Everything You Need to Know About the Latest Windows Updates.md deleted file mode 100644 index a1c81fb7ac3b135192299aacd134ace313306be9..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/June 2008 Cumulative Update Issue Everything You Need to Know About the Latest Windows Updates.md +++ /dev/null @@ -1,22 +0,0 @@ -
              -

              To learn what a specific version number of SQL Server maps to, or to find the KB article information for a specific cumulative update package or a service pack, search for the version number in the SQL Server Complete Version list tables.

              -

              IMPORTANT Windows Server 2008 Service Pack 2 (SP2) has reached the end of mainstream support and are now in extended support. Starting in July 2020, there will no longer be optional, non-security releases (known as "C" releases) for this operating system. Operating systems in extended support have only cumulative monthly security updates (known as the "B" or Update Tuesday release).

              -

              June 2008 Cumulative Update Issue


              DOWNLOAD ---> https://tinurll.com/2uznmu



              -

              If you have purchased an ESU key and have encountered this issue, please verify you have applied all prerequisites and that your key is activated. For information on activation, please see this blog post. For information on the prerequisites, see the "How to get this update" section of this article.

              -

              You must install the updates listed below and restart your device before installing the latest Rollup. Installing these updates improves the reliability of the update process and mitigates potential issues while installing the Rollup and applying Microsoft security fixes.

              -

              REMINDER Windows Server 2008 Service Pack 2 (SP2) has reached the end of mainstream support and is now in extended support. Starting in July 2020, there will no longer be optional, non-security releases (known as "C" releases) for this operating system. Operating systems in extended support have only cumulative monthly security updates (known as the "B" or Update Tuesday release).

              -

              If your organization did not purchase the third year of ESU coverage, you must purchase Year 1, Year 2, and Year 3 ESU for your applicable Windows Server 2008 SP2 devices before you install and activate the Year 3 MAK keys to receive updates. The steps to install, activate, and deploy ESUs are the same for first, second, and third year coverage. For more information, see Obtaining Extended Security Updates for eligible Windows devices for the Volume Licensing process and Purchasing Windows 7 ESUs as a Cloud Solution Provider for the CSP process. For embedded devices, contact your original equipment manufacturer (OEM).

              -

              Note For information about the various types of Windows updates, such as critical, security, driver, service packs, and so on, please see the following article. To view other notes and messages for Windows Server 2008 SP2, see the following update history home page.

              -

              -

              After installing Windows updates released on or after November 8, 2022 on Windows Servers that use the Domain Controller role, you might have issues with Kerberos authentication. This issue might affect any Kerberos authentication in your environment. Some scenarios which might be affected:

              -

              Note This issue is not an expected part of the security hardening for Netlogon and Kerberos starting with November 2022 security update. You will still have to follow the guidance in these articles even after this issue is resolved.

              -

              Description:
              A security issue has been identified in a Microsoft software product that could affect your system. You can help protect your system by installing this update from Microsoft. For a complete listing of the issues that are included in this update, see the associated Microsoft Knowledge Base article. After you install this update, you may have to restart your system.

              -

              A security issue has been identified in a Microsoft software product that could affect your system. You can help protect your system by installing this update from Microsoft. For a complete listing of the issues that are included in this update, see the associated Microsoft Knowledge Base article. After you install this update, you may have to restart your system.

              -

              Description:
              ComponentUpdate: A security issue has been identified in a Microsoft software product that could affect your system. You can help protect your system by installing this update from Microsoft. For a complete listing of the issues that are included in this update, see the associated Microsoft Knowledge Base article. After you install this update, you may have to restart your system.

              -

              Description:
              Install this update to resolve issues in Windows. For a complete listing of the issues that are included in this update, see the associated Microsoft Knowledge Base article for more information. After you install this item, you may have to restart your computer.

              -

              Learn about the security and non-security updates that are published for Windows 7 SP1 and Windows Server 2008 R2 SP1 through Windows Update. These updates address issues and improve the overall reliability of the operating system.

              -

              Microsoft has issued an out-of-band emergency update fix to patch an authentication issue that was caused by the November 9th cumulative update for Windows Server. The bug affects Windows Server 2008 SP2 through to Windows Server 2019.

              -

              The November 9th Patch Tuesday cumulative update (CU) for Windows Server causes a problem that can cause authentication failures on Active Directory (AD) domain controllers (DC). Microsoft says about the new out-of-band update:

              -

              Addresses a known issue that might cause authentication failures related to Kerberos tickets you acquired from Service for User to Self (S4U2self). This issue occurs after you install the November 9, 2021 security updates on domain controllers (DC) that are running Windows Server.

              aaccfb2cb3
              -
              -
              \ No newline at end of file diff --git a/spaces/rushankg/test-streamlit/README.md b/spaces/rushankg/test-streamlit/README.md deleted file mode 100644 index 970c290de517c18ae7ee8979d3e613a7e6b78f43..0000000000000000000000000000000000000000 --- a/spaces/rushankg/test-streamlit/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Test Streamlit -emoji: 🦀 -colorFrom: green -colorTo: red -sdk: streamlit -sdk_version: 1.25.0 -app_file: app.py -pinned: false -license: cc-by-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/scedlatioru/img-to-music/example/Hide ALL IP 2017.02.01.170201 Final Crack - [Softhound] Utorrent.md b/spaces/scedlatioru/img-to-music/example/Hide ALL IP 2017.02.01.170201 Final Crack - [Softhound] Utorrent.md deleted file mode 100644 index 511cc2355bc7f0fbc3097a5f7d6012fed56fc8eb..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Hide ALL IP 2017.02.01.170201 Final Crack - [Softhound] Utorrent.md +++ /dev/null @@ -1,89 +0,0 @@ - -

              How to Download Once Upon Ay Time In Mumbai Dobaara! in Hindi

              - -

              Once Upon Ay Time In Mumbai Dobaara! is a 2013 Bollywood crime thriller film directed by Milan Luthria and starring Akshay Kumar, Imran Khan, Sonakshi Sinha and Sonali Bendre. The film is a sequel to the 2010 hit Once Upon A Time In Mumbaai, which chronicled the rise of underworld dons in Mumbai during the 1970s and 1980s.

              -

              Hide ALL IP 2017.02.01.170201 Final Crack - [Softhound] utorrent


              Downloadhttps://gohhs.com/2uEyXW



              - -

              The sequel follows the story of Shoaib Khan (Akshay Kumar), who has become the undisputed king of the Mumbai underworld after killing his mentor Sultan Mirza (Ajay Devgn). Shoaib is challenged by his former associate Aslam (Imran Khan), who falls in love with Shoaib's girlfriend Jasmine (Sonakshi Sinha). The film is a mix of action, romance, drama and comedy, with a soundtrack composed by Pritam.

              - -

              If you are a fan of this genre and want to watch Once Upon Ay Time In Mumbai Dobaara! in Hindi, you might be wondering how to download it from the internet. There are many websites that offer torrent links for downloading movies, but not all of them are safe and legal. Some of them might contain viruses, malware or spyware that can harm your device or compromise your privacy. Some of them might also be blocked by your internet service provider or government authorities due to copyright infringement issues.

              - -

              Therefore, you need to be careful and choose a reliable and trustworthy source for downloading Once Upon Ay Time In Mumbai Dobaara! in Hindi. Here are some tips that can help you find and download the movie safely and legally:

              - -
                -
              • Use a VPN service: A VPN (Virtual Private Network) is a software that creates a secure and encrypted connection between your device and a server located in another country. This way, you can hide your IP address and location from prying eyes and bypass any geo-restrictions or censorship that might prevent you from accessing certain websites. A VPN also protects your data from hackers, ISPs and government agencies that might monitor your online activity. There are many VPN services available online, but some of them might be slow, expensive or unreliable. You can check out some of the best VPNs for torrenting here.
              • -
              • Use a reputable torrent site: A torrent site is a website that hosts torrent files, which are small pieces of data that contain information about the files you want to download. You need a torrent client, such as uTorrent or BitTorrent, to open these files and connect to other peers who have the same files. This way, you can download the files from multiple sources simultaneously, which makes the process faster and more efficient. However, not all torrent sites are safe and legal. Some of them might host fake, corrupted or infected files that can damage your device or expose you to legal risks. Some of them might also be banned or blocked by your ISP or government authorities due to copyright infringement issues. You can check out some of the best torrent sites for movies here.
              • -
              • Use a magnet link: A magnet link is a type of URL that contains a hash code that identifies a file or a group of files on the peer-to-peer network. Unlike a torrent file, a magnet link does not require you to download anything before you can start downloading the file. You just need to copy and paste the magnet link into your torrent client and it will automatically find and download the file from other peers. A magnet link is more convenient and secure than a torrent file, as it does not contain any trackers or metadata that might reveal your identity or location. You can usually find magnet links on most torrent sites, next to the torrent files.
              • -
              - -

              Now that you know how to download Once Upon Ay Time In Mumbai Dobaara! in Hindi using a VPN, a torrent site and a magnet link, you can enjoy watching this entertaining movie on your device. However, you should also keep in mind that downloading copyrighted content without permission is illegal in many countries and regions. Therefore, you should always respect the rights of the creators and distributors of the movie and only download it for personal use. You should also avoid sharing or uploading the movie on any platform that might violate its copyright.

              - -

              We hope this article has helped you learn how to download Once Upon Ay Time In Mumbai Dobaara! in Hindi using a VPN, a torrent site and a magnet link. If you have any questions or suggestions, feel free to leave a comment below.

              -

              Why You Should Watch Once Upon Ay Time In Mumbai Dobaara! in Hindi

              - -

              Once Upon Ay Time In Mumbai Dobaara! is not just a typical gangster movie. It is also a tribute to the golden era of Bollywood cinema, when movies were full of melodious songs, colorful costumes, larger-than-life characters and memorable dialogues. The film pays homage to some of the iconic movies and stars of the past, such as Sholay, Deewaar, Amitabh Bachchan, Shatrughan Sinha and Rekha.

              - -

              The film also showcases the rich culture and history of Mumbai, the city of dreams. The film captures the essence of the city, from its crowded streets and markets, to its majestic monuments and landmarks, to its vibrant nightlife and festivals. The film also depicts the contrast between the glamorous world of Bollywood and the dark underworld of crime and violence.

              -

              - -

              The film also boasts of a stellar cast and crew, who have delivered some of their best performances and work. Akshay Kumar plays the role of Shoaib Khan with charisma and intensity, while Imran Khan portrays the innocent and loyal Aslam with sincerity and charm. Sonakshi Sinha looks stunning as the aspiring actress Jasmine, who is caught between two men who love her. Sonali Bendre makes a special appearance as Mumtaz, Shoaib's first love and confidante.

              - -

              The film is directed by Milan Luthria, who has previously helmed successful movies like The Dirty Picture and Once Upon A Time In Mumbaai. The film is written by Rajat Arora, who has penned some of the most witty and powerful dialogues in recent times. The film is produced by Ekta Kapoor and Shobha Kapoor, who are known for their quality and diverse content.

              - -

              How to Enjoy Once Upon Ay Time In Mumbai Dobaara! in Hindi

              - -

              If you want to watch Once Upon Ay Time In Mumbai Dobaara! in Hindi, you have several options to choose from. You can either watch it online or download it from a torrent site. However, before you do that, you should make sure that you have a good internet connection and a compatible device.

              - -

              Watching online: If you want to watch Once Upon Ay Time In Mumbai Dobaara! in Hindi online, you can visit some of the popular streaming platforms that offer this movie. Some of them are Hungama.com, Vofomovies.in, Soundcloud.com and Archive.org. These platforms allow you to stream the movie in high quality and with subtitles. However, you might have to pay a subscription fee or register an account to access these platforms.

              - -

              Downloading from a torrent site: If you want to download Once Upon Ay Time In Mumbai Dobaara! in Hindi from a torrent site, you can use some of the reliable and trustworthy sites that offer this movie. Some of them are Limetorrents.lol, Torrentz2.eu, 1337x.to and YTS.mx. These sites allow you to download the movie in various formats and resolutions. However, you might have to use a VPN service or a magnet link to access these sites.

              - -

              Whichever option you choose, you should always be careful and responsible when downloading or streaming movies online. You should respect the rights of the creators and distributors of the movie and only watch it for personal use. You should also avoid sharing or uploading the movie on any platform that might violate its copyright.

              - -

              We hope this article has helped you learn how to watch Once Upon Ay Time In Mumbai Dobaara! in Hindi online or download it from a torrent site. If you have any questions or suggestions, feel free to leave a comment below.

              -

              What to Expect from Once Upon Ay Time In Mumbai Dobaara! in Hindi

              - -

              Once Upon Ay Time In Mumbai Dobaara! is a movie that will keep you hooked from start to finish. The movie has a gripping plot, with twists and turns that will keep you guessing till the end. The movie also has some of the most memorable dialogues in Bollywood history, delivered by the talented actors with flair and style. The movie also has some of the most catchy and melodious songs, composed by Pritam and sung by some of the best singers in the industry.

              - -

              The movie is a visual treat, with stunning cinematography, art direction and editing. The movie recreates the look and feel of the 1980s Mumbai, with its retro fashion, vintage cars and old-school charm. The movie also has some of the most spectacular action sequences, choreographed by Javed-Ejaz and executed by Akshay Kumar and Imran Khan with finesse and skill. The movie also has some of the most romantic scenes, featuring Sonakshi Sinha and Imran Khan, who share a sizzling chemistry on screen.

              - -

              The movie is a complete entertainer, with something for everyone. Whether you are a fan of action, romance, comedy or drama, you will find something to enjoy in this movie. The movie is a perfect blend of masala and class, of realism and fantasy, of nostalgia and novelty. The movie is a must-watch for anyone who loves Bollywood cinema and wants to experience a roller-coaster ride of emotions.

              - -

              How to Review Once Upon Ay Time In Mumbai Dobaara! in Hindi

              - -

              If you have watched Once Upon Ay Time In Mumbai Dobaara! in Hindi and want to share your opinion about it, you can write a review for it. A review is a personal and critical evaluation of a movie, based on your own experience and perspective. A review can help other people decide whether they want to watch the movie or not. A review can also help the makers of the movie to improve their work in the future.

              - -

              Writing a review is not difficult, if you follow some simple steps. Here are some tips that can help you write a good review for Once Upon Ay Time In Mumbai Dobaara! in Hindi:

              - -
                -
              • Start with an introduction: In the introduction, you should give some basic information about the movie, such as its title, genre, director, cast, release date and plot summary. You should also mention your overall impression of the movie, whether you liked it or not, and why.
              • -
              • Write about the strengths and weaknesses: In the main body of your review, you should discuss the strengths and weaknesses of the movie in detail. You should focus on specific aspects of the movie, such as its story, characters, performances, music, direction, cinematography, editing etc. You should give examples from the movie to support your points. You should also compare and contrast the movie with other similar movies or its predecessor.
              • -
              • End with a conclusion: In the conclusion, you should summarize your main points and give your final verdict on the movie. You should also rate the movie on a scale of 1 to 5 stars or any other criteria that you prefer. You should also recommend or warn the readers about watching the movie or not.
              • -
              - -

              Here is an example of a possible review for Once Upon Ay Time In Mumbai Dobaara! in Hindi:

              - -

              Review: Once Upon Ay Time In Mumbai Dobaara! (2013)

              - -

              Once Upon Ay Time In Mumbai Dobaara! is a 2013 Bollywood crime thriller film directed by Milan Luthria and starring Akshay Kumar, Imran Khan, Sonakshi Sinha and Sonali Bendre. The film is a sequel to the 2010 hit Once Upon A Time In Mumbaai, which chronicled the rise of underworld dons in Mumbai during the 1970s and 1980s.

              - -

              The sequel follows the story of Shoaib Khan (Akshay Kumar), who has become the undisputed king of the Mumbai underworld after killing his mentor Sultan Mirza (Ajay Devgn). Shoaib is challenged by his former associate Aslam (Imran Khan), who falls in love with Shoaib's girlfriend Jasmine (Sonakshi Sinha).

              - -

              I watched this movie online using a VPN service and a torrent site. I was curious to see how this movie would compare to its predecessor and how it would portray the underworld saga. I was not disappointed by this movie. It was an entertaining and engaging watch that kept me hooked till the end.

              - -

              The strengths of this movie are its plot, dialogues, music and performances. The plot is gripping and unpredictable, with twists and turns that keep you guessing till the end. The dialogues are witty and powerful, delivered by the actors with flair and style. The music is catchy and melodious, composed by Pritam and sung by some of the best singers in the industry. The performances are stellar, especially by Akshay Kumar who plays Shoaib Khan with charisma and intensity.

              - -

              The weaknesses of this movie are its length, editing and climax. The movie is too long and could have been trimmed by at least 20 minutes. The editing is choppy and inconsistent at places. The climax is abrupt and unsatisfying, leaving many questions unanswered.

              - -

              In conclusion, Once Upon Ay Time In Mumbai Dobaara! is a movie that will appeal to anyone who loves Bollywood cinema and wants to experience a roller-coaster ride of emotions. It is a perfect blend of masala and class, of realism and fantasy, of nostalgia and novelty. It is a must-watch for fans of Akshay Kumar and Milan Luthria.

              - -

              I would rate this movie 4 out of 5 stars.

              -

              Conclusion

              - -

              Once Upon Ay Time In Mumbai Dobaara! in hindi download torrent is a popular and easy way to watch this entertaining and engaging movie online or offline. The movie is a sequel to the 2010 hit Once Upon A Time In Mumbaai, which chronicled the rise of underworld dons in Mumbai during the 1970s and 1980s. The movie has a gripping plot, witty dialogues, catchy music and stellar performances by Akshay Kumar, Imran Khan, Sonakshi Sinha and Sonali Bendre.

              - -

              In this article, we have explained how to download Once Upon Ay Time In Mumbai Dobaara! in hindi using a VPN, a torrent site and a magnet link. We have also explained how to watch Once Upon Ay Time In Mumbai Dobaara! in hindi online using some of the popular streaming platforms. We have also given some tips on how to write a review for Once Upon Ay Time In Mumbai Dobaara! in hindi and shared an example of a possible review.

              - -

              We hope this article has helped you learn how to enjoy Once Upon Ay Time In Mumbai Dobaara! in hindi download torrent. If you have any questions or suggestions, feel free to leave a comment below.

              3cee63e6c2
              -
              -
              \ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/Win XP AiO 36 In 1 OEM Torrent.md b/spaces/scedlatioru/img-to-music/example/Win XP AiO 36 In 1 OEM Torrent.md deleted file mode 100644 index d29dba1da959a288d4d682196e60ff9cc1182087..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Win XP AiO 36 In 1 OEM Torrent.md +++ /dev/null @@ -1,11 +0,0 @@ -
              -

              marsh mclennan provides clients with a single software license (excluding mobile device software licensing) and monthly software services fees on a software and services pricing plan. this pricing model is ideal for insurance-based businesses. these businesses provide the software for their business for a specified period and renew this software service contract for the period that they need it.

              -

              Win XP AiO 36 In 1 OEM Torrent


              DOWNLOAD ››› https://gohhs.com/2uEAGi



              -

              also, marsh mclennan has established a software license service group to help customers with their software purchasing. this group is independent and can assist business in the purchase and installation of software.

              -

              in addition to our continuous service offering, marsh mclennan provides a robust and strategic lifecycle management services solution that supports programs and communities for companies’ internal it resources.

              -

              these services are highly specialized and align with the specific needs of insurance companies. if you are an insurance company, and you are interested in a strong service partner, please contact us. we will be happy to discuss the services we provide and how they can benefit you and your business.

              -

              to learn more about marsh mclennan insurance technology consulting, please visit our website at http://marshmclennan.com/insurance-technology-consulting/ or email us at technologyconsulting@marshmclennan.com .

              -

              -

              next, go into the bios on your aio. you can access this by pressing f2 on your keyboard. in the bios, enter setup, and scroll down to the bios memory. choose the following two items:

              • enable debug: set this to on.
              • write up code: set this to yes
              899543212b
              -
              -
              \ No newline at end of file diff --git a/spaces/sdutta28/AggDetectApp/Dockerfile b/spaces/sdutta28/AggDetectApp/Dockerfile deleted file mode 100644 index 3d004cab0e329ff78ad9a51620675f935e366662..0000000000000000000000000000000000000000 --- a/spaces/sdutta28/AggDetectApp/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -FROM python:3.10 - -WORKDIR /code - -COPY ./requirements.txt requirements.txt - -RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -RUN useradd -m -u 1000 user - -USER user -# Set home to the user's home directory -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH\ - NLTK_DATA=/home/user/app/static/nltk - -WORKDIR $HOME/app - -COPY --chown=user . $HOME/app - -RUN ls -alt - -CMD ["python", "app.py"] \ No newline at end of file diff --git a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/transducer/arguments.py b/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/transducer/arguments.py deleted file mode 100644 index 96f9fda4a096c2ca4bfb61e4faded3960a7ee401..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/transducer/arguments.py +++ /dev/null @@ -1,321 +0,0 @@ -"""Transducer model arguments.""" - -import ast -from distutils.util import strtobool - - -def add_encoder_general_arguments(group): - """Define general arguments for encoder.""" - group.add_argument( - "--etype", - default="blstmp", - type=str, - choices=[ - "custom", - "lstm", - "blstm", - "lstmp", - "blstmp", - "vgglstmp", - "vggblstmp", - "vgglstm", - "vggblstm", - "gru", - "bgru", - "grup", - "bgrup", - "vgggrup", - "vggbgrup", - "vgggru", - "vggbgru", - ], - help="Type of encoder network architecture", - ) - group.add_argument( - "--dropout-rate", - default=0.0, - type=float, - help="Dropout rate for the encoder", - ) - - return group - - -def add_rnn_encoder_arguments(group): - """Define arguments for RNN encoder.""" - group.add_argument( - "--elayers", - default=4, - type=int, - help="Number of encoder layers (for shared recognition part " - "in multi-speaker asr mode)", - ) - group.add_argument( - "--eunits", - "-u", - default=300, - type=int, - help="Number of encoder hidden units", - ) - group.add_argument( - "--eprojs", default=320, type=int, help="Number of encoder projection units" - ) - group.add_argument( - "--subsample", - default="1", - type=str, - help="Subsample input frames x_y_z means subsample every x frame " - "at 1st layer, every y frame at 2nd layer etc.", - ) - - return group - - -def add_custom_encoder_arguments(group): - """Define arguments for Custom encoder.""" - group.add_argument( - "--enc-block-arch", - type=eval, - action="append", - default=None, - help="Encoder architecture definition by blocks", - ) - group.add_argument( - "--enc-block-repeat", - default=0, - type=int, - help="Repeat N times the provided encoder blocks if N > 1", - ) - group.add_argument( - "--custom-enc-input-layer", - type=str, - default="conv2d", - choices=["conv2d", "vgg2l", "linear", "embed"], - help="Custom encoder input layer type", - ) - group.add_argument( - "--custom-enc-positional-encoding-type", - type=str, - default="abs_pos", - choices=["abs_pos", "scaled_abs_pos", "rel_pos"], - help="Custom encoder positional encoding layer type", - ) - group.add_argument( - "--custom-enc-self-attn-type", - type=str, - default="self_attn", - choices=["self_attn", "rel_self_attn"], - help="Custom encoder self-attention type", - ) - group.add_argument( - "--custom-enc-pw-activation-type", - type=str, - default="relu", - choices=["relu", "hardtanh", "selu", "swish"], - help="Custom encoder pointwise activation type", - ) - group.add_argument( - "--custom-enc-conv-mod-activation-type", - type=str, - default="swish", - choices=["relu", "hardtanh", "selu", "swish"], - help="Custom encoder convolutional module activation type", - ) - - return group - - -def add_decoder_general_arguments(group): - """Define general arguments for encoder.""" - group.add_argument( - "--dtype", - default="lstm", - type=str, - choices=["lstm", "gru", "custom"], - help="Type of decoder to use", - ) - group.add_argument( - "--dropout-rate-decoder", - default=0.0, - type=float, - help="Dropout rate for the decoder", - ) - group.add_argument( - "--dropout-rate-embed-decoder", - default=0.0, - type=float, - help="Dropout rate for the decoder embedding layer", - ) - - return group - - -def add_rnn_decoder_arguments(group): - """Define arguments for RNN decoder.""" - group.add_argument( - "--dec-embed-dim", - default=320, - type=int, - help="Number of decoder embeddings dimensions", - ) - group.add_argument( - "--dlayers", default=1, type=int, help="Number of decoder layers" - ) - group.add_argument( - "--dunits", default=320, type=int, help="Number of decoder hidden units" - ) - - return group - - -def add_custom_decoder_arguments(group): - """Define arguments for Custom decoder.""" - group.add_argument( - "--dec-block-arch", - type=eval, - action="append", - default=None, - help="Custom decoder blocks definition", - ) - group.add_argument( - "--dec-block-repeat", - default=1, - type=int, - help="Repeat N times the provided decoder blocks if N > 1", - ) - group.add_argument( - "--custom-dec-input-layer", - type=str, - default="embed", - choices=["linear", "embed"], - help="Custom decoder input layer type", - ) - group.add_argument( - "--custom-dec-pw-activation-type", - type=str, - default="relu", - choices=["relu", "hardtanh", "selu", "swish"], - help="Custom decoder pointwise activation type", - ) - - return group - - -def add_custom_training_arguments(group): - """Define arguments for training with Custom architecture.""" - group.add_argument( - "--transformer-warmup-steps", - default=25000, - type=int, - help="Optimizer warmup steps", - ) - group.add_argument( - "--transformer-lr", - default=10.0, - type=float, - help="Initial value of learning rate", - ) - - return group - - -def add_transducer_arguments(group): - """Define general arguments for transducer model.""" - group.add_argument( - "--trans-type", - default="warp-transducer", - type=str, - choices=["warp-transducer", "warp-rnnt"], - help="Type of transducer implementation to calculate loss.", - ) - group.add_argument( - "--transducer-weight", - default=1.0, - type=float, - help="Weight of transducer loss when auxiliary task is used.", - ) - group.add_argument( - "--joint-dim", - default=320, - type=int, - help="Number of dimensions in joint space", - ) - group.add_argument( - "--joint-activation-type", - type=str, - default="tanh", - choices=["relu", "tanh", "swish"], - help="Joint network activation type", - ) - group.add_argument( - "--score-norm", - type=strtobool, - nargs="?", - default=True, - help="Normalize transducer scores by length", - ) - - return group - - -def add_auxiliary_task_arguments(group): - """Add arguments for auxiliary task.""" - group.add_argument( - "--aux-task-type", - nargs="?", - default=None, - choices=["default", "symm_kl_div", "both"], - help="Type of auxiliary task.", - ) - group.add_argument( - "--aux-task-layer-list", - default=None, - type=ast.literal_eval, - help="List of layers to use for auxiliary task.", - ) - group.add_argument( - "--aux-task-weight", - default=0.3, - type=float, - help="Weight of auxiliary task loss.", - ) - group.add_argument( - "--aux-ctc", - type=strtobool, - nargs="?", - default=False, - help="Whether to use CTC as auxiliary task.", - ) - group.add_argument( - "--aux-ctc-weight", - default=1.0, - type=float, - help="Weight of auxiliary task loss", - ) - group.add_argument( - "--aux-ctc-dropout-rate", - default=0.0, - type=float, - help="Dropout rate for auxiliary CTC", - ) - group.add_argument( - "--aux-cross-entropy", - type=strtobool, - nargs="?", - default=False, - help="Whether to use CE as auxiliary task for the prediction network.", - ) - group.add_argument( - "--aux-cross-entropy-smoothing", - default=0.0, - type=float, - help="Smoothing rate for cross-entropy. If > 0, enables label smoothing loss.", - ) - group.add_argument( - "--aux-cross-entropy-weight", - default=0.5, - type=float, - help="Weight of auxiliary task loss", - ) - - return group diff --git a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/go-web.bat b/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/go-web.bat deleted file mode 100644 index db1dec52006bc631e4e68bafd619a3a65f202532..0000000000000000000000000000000000000000 --- a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/go-web.bat +++ /dev/null @@ -1,2 +0,0 @@ -runtime\python.exe infer-web.py --pycmd runtime\python.exe --port 7897 -pause diff --git a/spaces/shikunl/prismer/prismer/experts/obj_detection/unidet/data/datasets/cityscapes_cocoformat.py b/spaces/shikunl/prismer/prismer/experts/obj_detection/unidet/data/datasets/cityscapes_cocoformat.py deleted file mode 100644 index 3492c6af30fb6753be2eb95b8d37de75e34a1727..0000000000000000000000000000000000000000 --- a/spaces/shikunl/prismer/prismer/experts/obj_detection/unidet/data/datasets/cityscapes_cocoformat.py +++ /dev/null @@ -1,28 +0,0 @@ -from detectron2.data.datasets.register_coco import register_coco_instances -import os - -categories = [ - {'id': i, 'name': x} for i, x in enumerate( - ["person", "rider", "car", "truck","bus", "train", \ - "motorcycle", "bicycle"]) -] - -def _get_builtin_metadata(): - thing_dataset_id_to_contiguous_id = { - x['id']: i for i, x in enumerate(sorted(categories, key=lambda x: x['id']))} - thing_classes = [x['name'] for x in sorted(categories, key=lambda x: x['id'])] - return { - "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id, - "thing_classes": thing_classes} - -_PREDEFINED_SPLITS_CITYSCAPES = { - "cityscapes_cocoformat_val": ("", "cityscapes/annotations/cityscapes_fine_instance_seg_val_coco_format.json"), -} - -for key, (image_root, json_file) in _PREDEFINED_SPLITS_CITYSCAPES.items(): - register_coco_instances( - key, - _get_builtin_metadata(), - os.path.join("datasets", json_file) if "://" not in json_file else json_file, - os.path.join(image_root), - ) diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Adobe After Effects 2020 APK The Industry-Standard Motion Graphics and Visual Effects Software for Android.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Adobe After Effects 2020 APK The Industry-Standard Motion Graphics and Visual Effects Software for Android.md deleted file mode 100644 index d91fe9f9300ef14395e73a18844cdbe7b8a0ea33..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Adobe After Effects 2020 APK The Industry-Standard Motion Graphics and Visual Effects Software for Android.md +++ /dev/null @@ -1,125 +0,0 @@ - -

              After Effects 2020 APK: What You Need to Know

              -

              If you are a fan of creating motion graphics and visual effects for film, TV, video, and web, you might have heard of Adobe After Effects. It is one of the most popular and powerful software for creating stunning animations and effects.

              -

              But what if you want to use After Effects on your Android device? Is there a way to do that? The answer is yes, thanks to an APK file.

              -

              after effects 2020 apk


              Download ✏ ✏ ✏ https://ssurll.com/2uNZVG



              -

              An APK file is an Android application package that contains all the files and code needed to run an app on an Android device. It is similar to an EXE file for Windows or a DMG file for Mac.

              -

              In this article, we will tell you everything you need to know about downloading and installing After Effects 2020 APK on your Android device. We will also discuss some of the benefits, risks, and legal issues of doing so.

              -

              Why Download After Effects 2020 APK?

              -

              There are many reasons why you might want to download After Effects 2020 APK on your Android device. Here are some of them:

              -
                -
              • You can enjoy the latest features and improvements of After Effects 2020, such as AI-powered rotoscoping, cinemagraphs, video effects, and more.
              • -
              • You can enhance your creativity and productivity by using After Effects 2020 on the go, wherever you are.
              • -
              • You can save money and storage space by downloading After Effects 2020 APK instead of the full version of the software, which requires a subscription and a lot of disk space.
              • -
              • You can bypass the restrictions and limitations of the Google Play Store, which might not have the latest version of After Effects 2020 or might not be compatible with your device.
              • -
              -

              New Features in After Effects 2020

              -

              After Effects 2020 is the latest version of Adobe's software for creating motion graphics and visual effects. It was released in November 2019 and has many new features and improvements that make it easier and faster to create stunning animations and effects. Here are some of them:

              -

              AI-powered Rotoscoping

              -

              Rotoscoping is the process of tracing and masking objects in video clips to isolate them from the background or apply effects to them. It can be a tedious and time-consuming task, especially for complex or moving objects.

              -

              But with After Effects 2020, you can use the new Roto Brush 2 tool, which uses artificial intelligence to automatically track and mask objects in video clips. You just need to draw a rough outline around the object, and the tool will do the rest. You can also refine the mask with the Refine Edge tool, which uses machine learning to detect edges and details.

              -

              Cinemagraphs

              -

              Cinemagraphs are still images with subtle motion, such as a flickering candle, a waving flag, or a flowing waterfall. They can create a sense of life and movement in your images, making them more engaging and eye-catching.

              -

              With After Effects 2020, you can create cinemagraphs easily and quickly. You just need to import a video clip, trim it to the desired length, freeze a frame as the background layer, and mask out the areas that you want to keep moving. You can also adjust the speed, direction, and loop of the motion.

              -

              after effects 2020 apk download
              -after effects 2020 apk free
              -after effects 2020 apk mod
              -after effects 2020 apk cracked
              -after effects 2020 apk for android
              -after effects 2020 apk full version
              -after effects 2020 apk latest
              -after effects 2020 apk offline
              -after effects 2020 apk premium
              -after effects 2020 apk pro
              -adobe after effects 2020 apk
              -adobe after effects 2020 apk download
              -adobe after effects 2020 apk free
              -adobe after effects 2020 apk mod
              -adobe after effects 2020 apk cracked
              -adobe after effects 2020 apk for android
              -adobe after effects 2020 apk full version
              -adobe after effects 2020 apk latest
              -adobe after effects 2020 apk offline
              -adobe after effects 2020 apk premium
              -adobe after effects 2020 apk pro
              -how to download after effects 2020 apk
              -how to install after effects 2020 apk
              -how to use after effects 2020 apk
              -how to get after effects 2020 apk for free
              -how to update after effects 2020 apk
              -how to crack after effects 2020 apk
              -how to mod after effects 2020 apk
              -how to run after effects 2020 apk on android
              -how to activate after effects 2020 apk premium features
              -best alternatives to after effects 2020 apk
              -best apps like after effects 2020 apk
              -best sites to download after effects 2020 apk
              -best tutorials for after effects 2020 apk
              -best tips and tricks for after effects 2020 apk
              -benefits of using after effects 2020 apk
              -features of after effects 2020 apk
              -reviews of after effects 2020 apk
              -ratings of after effects 2020 apk
              -pros and cons of after effects 2020 apk
              -comparison of after effects 2020 apk and other video editing apps
              -difference between after effects 2020 apk and previous versions
              -compatibility of after effects 2020 apk with different devices and operating systems
              -requirements of after effects 2020 apk for installation and usage
              -limitations of after effects 2020 apk in terms of functionality and performance

              -

              Video Effects

              -

              After Effects 2020 has a wide range of video effects that you can apply to your clips to enhance their look and feel. You can use color correction tools to adjust the hue, saturation, brightness, contrast, and more. You can use light leaks effects to add realistic light flares and glows to your clips. You can use glitches effects to create distorted or corrupted video effects. You can also use transitions effects to create smooth and dynamic changes between clips.

              -

              How to Download and Install After Effects 2020 APK?

              -

              If you want to download and install After Effects 2020 APK on your Android device, you need to follow these steps:

              Download the APK File

              -

              The first step is to download the APK file for After Effects 2020. You can find it on various websites that offer APK files for Android apps, such as APKPure, APKMirror, or APKMonk. However, you need to be careful and choose a reputable and reliable source, as some APK files might contain malware or viruses that can harm your device or steal your data.

              -

              One of the websites that we recommend is APKPure, which is a trusted and verified platform that provides safe and fast downloads of APK files. You can visit their website and search for After Effects 2020 APK, or use this link: https://apkpure.com/adobe-after-effects-2020/com.adobe.aftereffects. You will see a page with the details and reviews of the app, as well as a download button. Click on the download button and wait for the file to be downloaded to your device.

              -

              Enable Unknown Sources

              -

              The next step is to enable unknown sources on your Android device. This is a security setting that allows you to install apps from sources other than the Google Play Store. By default, this setting is disabled, so you need to enable it before you can install After Effects 2020 APK.

              -

              To enable unknown sources, you need to go to your device's settings and look for the security or privacy option. Depending on your device model and Android version, this option might be located in different places. For example, on some devices, you might find it under Settings > Security > Unknown Sources, while on others, you might find it under Settings > Apps > Special Access > Install Unknown Apps.

              -

              Once you find the option, you need to tap on it and toggle it on. You might see a warning message that tells you about the risks of installing apps from unknown sources. Read it carefully and tap on OK or Allow if you agree to proceed.

              -

              Install the APK File

              -

              The final step is to install the APK file for After Effects 2020. To do this, you need to locate the file on your device's storage, either using a file manager app or by accessing the download folder. Once you find the file, tap on it and follow the instructions on the screen. You might see a prompt that asks you to confirm the installation or grant permissions to the app. Tap on Install or Accept if you agree.

              -

              Wait for the installation process to finish, which might take a few minutes depending on your device's speed and performance. Once it is done, you will see a message that says "App installed" or "Installation successful". You can then tap on Open or Done to launch or exit the app.

              -

              Launch the App and Enjoy

              -

              Congratulations! You have successfully downloaded and installed After Effects 2020 APK on your Android device. You can now launch the app and enjoy creating amazing motion graphics and visual effects with it.

              -

              To launch the app, you can either tap on its icon on your home screen or app drawer, or go back to the APKPure website and tap on Open. You will see a splash screen with the Adobe logo and then a welcome screen with some tips and tutorials on how to use the app.

              -

              You can explore the app's interface and features by tapping on the menu button at the top left corner of the screen. You will see options such as New Project, Open Project, Import File, Export File, Preferences, Help, and more. You can also access some tools and panels at the bottom of the screen, such as Timeline, Layers, Effects, Masks, Keyframes, and more.

              -

              You can create a new project by tapping on New Project and choosing a name and a resolution for your project. You can then import files from your device's storage or camera by tapping on Import File and selecting the files you want to use. You can then drag and drop them onto the timeline and start editing them with various effects and animations.

              -

              You can export your project by tapping on Export File and choosing a format and a quality for your output file. You can then save it to your device's storage or share it with others via email, social media, or cloud services.

              -

              If you need any help or guidance on how to use After Effects 2020 APK, you can tap on Help and access some resources such as tutorials, FAQs, forums, feedback, and more.

              Is After Effects 2020 APK Safe and Legal?

              -

              While downloading and installing After Effects 2020 APK might seem like a great idea, you should also be aware of some of the potential risks and legal issues that come with it. Here are some of them:

              -

              Risks

              -

              One of the main risks of downloading and installing APK files from unknown sources is that they might contain malware or viruses that can harm your device or steal your data. Malware or viruses can infect your device, slow it down, drain your battery, display unwanted ads, access your contacts, messages, photos, and other files, or even lock your device and demand a ransom to unlock it.

              -

              To avoid this risk, you should always scan the APK file with an antivirus software before installing it. You should also check the reviews and ratings of the app and the website that provides it, and look for any signs of suspicious or malicious activity. You should also avoid clicking on any pop-ups or links that might appear during the download or installation process.

              -

              Legal Issues

              -

              Another risk of downloading and installing APK files from unknown sources is that they might not be authorized by the original developers or publishers of the app. This means that you might be violating their intellectual property rights, such as copyright, trademark, or patent. This can result in legal consequences, such as fines, lawsuits, or even criminal charges.

              -

              To avoid this risk, you should always respect the rights of the original developers or publishers of the app and only download and install APK files that are authorized by them. You should also check the terms and conditions of the app and the website that provides it, and look for any disclaimers or warnings that might indicate that the app is not official or licensed.

              -

              Recommendations

              -

              To minimize the risks and legal issues of downloading and installing APK files from unknown sources, here are some recommendations that you should follow:

              -
                -
              • Use a reputable and reliable website that provides safe and verified APK files for Android apps, such as APKPure.
              • -
              • Use an antivirus software to scan the APK file before installing it.
              • -
              • Check the reviews and ratings of the app and the website that provides it.
              • -
              • Check the terms and conditions of the app and the website that provides it.
              • -
              • Enable unknown sources only when you need to install an APK file, and disable it afterwards.
              • -
              • Update your device's software and security patches regularly.
              • -
              • Backup your device's data regularly.
              • -
              -

              Conclusion

              -

              In conclusion, After Effects 2020 APK is a great way to enjoy the latest features and improvements of Adobe's software for creating motion graphics and visual effects on your Android device. You can download and install it easily and quickly by following our guide above. However, you should also be aware of some of the potential risks and legal issues that come with it, and follow our recommendations to avoid or minimize them.

              -

              If you have any questions or feedback about After Effects 2020 APK, feel free to leave a comment below. We would love to hear from you!

              -

              FAQs

              -

              Here are some frequently asked questions and answers about After Effects 2020 APK:

              -
                -
              1. Is After Effects 2020 APK compatible with my device?
              2. -

                After Effects 2020 APK is compatible with most Android devices that run on Android 5.0 (Lollipop) or higher. However, some devices might not support some features or functions of the app due to hardware limitations or software conflicts. To check if your device is compatible with After Effects 2020 APK, you can visit this link: https://helpx.adobe.com/after-effects/system-requirements.html.

                -
              3. How do I update After Effects 2020 APK?
              4. -

                To update After Effects 2020 APK, you need to download and install the latest version of the APK file from a reputable source, such as APKPure. You can also check for updates within the app by tapping on the menu button at the top left corner of the screen and selecting Check for Updates. If there is a new version available, you will see a notification that prompts you to download and install it.

                -
              5. How do I uninstall After Effects 2020 APK?
              6. -

                To uninstall After Effects 2020 APK, you need to go to your device's settings and look for the apps or applications option. Depending on your device model and Android version, this option might be located in different places. For example, on some devices, you might find it under Settings > Apps & Notifications > See All Apps, while on others, you might find it under Settings > Apps > Manage Apps. Once you find the option, you need to tap on it and look for After Effects 2020 APK in the list of apps. Tap on it and select Uninstall. You might see a confirmation message that asks you to confirm the uninstallation. Tap on OK or Yes if you agree.

                -
              7. How do I get support for After Effects 2020 APK?
              8. -

                If you need any support or assistance for After Effects 2020 APK, you can contact the developers or publishers of the app directly. You can find their contact information on the website that provides the APK file, or within the app by tapping on the menu button at the top left corner of the screen and selecting About or Contact. You can also visit the official website of Adobe After Effects, which is https://www.adobe.com/products/aftereffects.html, and access some resources such as help articles, tutorials, forums, feedback, and more.

                -
              9. Can I use After Effects 2020 APK with other Adobe apps?
              10. -

                Yes, you can use After Effects 2020 APK with other Adobe apps, such as Photoshop, Illustrator, Premiere Pro, and more. You can import and export files between these apps and use them to create amazing projects. You can also use Adobe Creative Cloud to sync your files and settings across your devices and access them anytime and anywhere.

                -

              197e85843d
              -
              -
              \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Air Fighter Pro APK How to Master the Controls Navigation and Tactics of the Best Flight Simulator and Combat Game.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Air Fighter Pro APK How to Master the Controls Navigation and Tactics of the Best Flight Simulator and Combat Game.md deleted file mode 100644 index 8d043bbf80883709a0288501b6c77d18fe1dbfe7..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Air Fighter Pro APK How to Master the Controls Navigation and Tactics of the Best Flight Simulator and Combat Game.md +++ /dev/null @@ -1,79 +0,0 @@ - -

              AirFighters Pro APK: A Realistic and Exciting Flight Simulator Game

              -

              If you are a fan of flight simulator games, you might have heard of AirFighters Pro APK. This is a game that lets you experience the thrill of flying different planes in various scenarios and missions. You can explore realistic world maps, navigate through over 500 airports, land on aircraft carriers, and face challenging weather conditions. You can also plan and share your own tactical scenarios, test your skills in the world campaign, and fight against other planes in the dog-fight mode. In this article, we will tell you more about AirFighters Pro APK, how to download and install it, and what are its pros and cons.

              -

              What is AirFighters Pro APK?

              -

              AirFighters Pro APK is an Android game that was developed by RORTOS, a company that specializes in creating realistic flight simulator games. The game was released in 2013 and has received positive reviews from users and critics alike. The game is not available on Google Play Store, but you can download it from other sources for free. The game has a size of about 105 MB and requires Android 4.0 or higher to run.

              -

              air fighter pro apk


              Download File →→→ https://ssurll.com/2uNTyQ



              -

              Features of AirFighters Pro APK

              -

              AirFighters Pro APK has many features that make it one of the most realistic and exciting flight simulator games on Android. Here are some of them:

              -

              Realistic world maps and navigation

              -

              The game has realistic world maps that cover the entire globe. You can fly over different regions, such as Europe, Asia, Africa, America, and Australia. You can also use the GPS navigation system to find your way around the map. The game also has a radar system that shows you the position of your enemies and allies.

              -

              Over 500 actual airports and 1,107 runways

              -

              The game has over 500 actual airports that you can land on or take off from. You can choose from different types of airports, such as civil, military, or private. You can also select from different types of runways, such as asphalt, grass, or dirt. The game also has realistic physics and controls that affect your landing and take-off performance.

              -

              Aircraft carriers and real time weather conditions

              -

              The game has aircraft carriers that you can use as bases for your missions. You can land on or take off from these carriers using special procedures and equipment. You can also face different weather conditions that affect your flight, such as wind, rain, snow, fog, or storms.

              -

              Plan and share your tactical scenarios

              -

              The game has a feature that allows you to create your own tactical scenarios using the map editor. You can set up different objectives, enemies, allies, weather conditions, and other parameters for your scenario. You can also share your scenario with other players online or play their scenarios.

              -

              Test yourself in the world campaign and dog-fight mode

              -

              The game has a world campaign mode that consists of 60 missions that span across different regions and scenarios. You can complete these missions to earn trophies and unlock new planes. The game also has a dog-fight mode that lets you fight against other planes in a one-on-one or team-based combat.

              -

              Choose from 8 different planes

              -

              The game has 8 different planes that you can fly in the game. Each plane has its own characteristics, such as speed , such as speed, maneuverability, weapons, and fuel capacity. The planes are: - F/A-18 Super Hornet - F-14 Super Tomcat - A-6 Intruder - F4E Phantom II - MiG-29K Fulcrum - F-35B Lightning II - AV-8B Harrier II - Tornado You can customize the color and the loadout of your plane according to your preferences and mission requirements.

              -

              How to download and install AirFighters Pro APK?

              -

              If you want to download and install AirFighters Pro APK on your Android device, you need to follow these steps:

              -

              air fighter pro apk download
              -air fighter pro apk mod
              -air fighter pro apk free
              -air fighter pro apk full version
              -air fighter pro apk latest
              -air fighter pro apk android
              -air fighter pro apk offline
              -air fighter pro apk obb
              -air fighter pro apk unlimited money
              -air fighter pro apk data
              -air fighter pro apk revdl
              -air fighter pro apk rexdl
              -air fighter pro apk hack
              -air fighter pro apk cracked
              -air fighter pro apk premium
              -air fighter pro apk update
              -air fighter pro apk old version
              -air fighter pro apk 2023
              -air fighter pro apk gameplay
              -air fighter pro apk review
              -air fighter pro apk cheat
              -air fighter pro apk mod menu
              -air fighter pro apk no ads
              -air fighter pro apk for pc
              -air fighter pro apk for ios
              -air fighter pro apk + sd data
              -air fighter pro apk + data download
              -air fighter pro apk + obb download
              -air fighter pro apk + mod download
              -air fighter pro apk + hack download
              -download game air fighter pro apk
              -download game android air fighter pro apk
              -download game mod air fighter pro apk
              -download game offline air fighter pro apk
              -download game online air fighter pro apk
              -how to install air fighter pro apk
              -how to play air fighter pro apk
              -how to download air fighter pro apk for free
              -how to update air fighter pro apk
              -how to hack air fighter pro apk

              -

              Requirements for AirFighters Pro APK

              -

              Before you download and install AirFighters Pro APK, you need to make sure that your device meets these requirements:

              - - Android 4.0 or higher - At least 105 MB of free storage space - A stable internet connection

              Steps to download and install AirFighters Pro APK

              -

              After you have checked the requirements, you can proceed with these steps:

              - 1. Go to a trusted website that offers AirFighters Pro APK for free download, such as [mob.org](^1^). 2. Click on the download button and wait for the APK file to be downloaded on your device. 3. Go to your device settings and enable the option to install apps from unknown sources. This will allow you to install AirFighters Pro APK without any issues. 4. Locate the downloaded APK file on your device and tap on it to start the installation process. 5. Follow the instructions on the screen and wait for the installation to be completed. 6. Launch the game and enjoy flying different planes in realistic scenarios.

              Pros and cons of AirFighters Pro APK

              -

              AirFighters Pro APK has many pros and cons that you should consider before downloading and playing it. Here are some of them:

              -

              Pros of AirFighters Pro APK

              -

              Some of the pros of AirFighters Pro APK are:

              - - It has realistic graphics and sound effects that create an immersive gaming experience. - It has a variety of planes, airports, missions, and modes that offer a lot of replay value. - It has a user-friendly interface and easy controls that make it suitable for beginners and experts alike. - It has a map editor and a sharing feature that allow you to create and play your own scenarios or those of other players. - It is free to download and play, unlike the original version that costs $4.99 on Google Play Store.

              Cons of AirFighters Pro APK

              -

              Some of the cons of AirFighters Pro APK are:

              - - It is not available on Google Play Store, which means you have to download it from third-party sources that may not be safe or reliable. - It may have some bugs or glitches that affect the gameplay or performance of the game. - It may require a lot of battery power and data usage, which can drain your device quickly. - It may not be compatible with some devices or Android versions, which can cause crashes or errors.

              Conclusion

              -

              AirFighters Pro APK is a great game for flight simulator enthusiasts who want to experience the thrill of flying different planes in realistic scenarios. The game has many features that make it one of the most realistic and exciting flight simulator games on Android. However, the game also has some drawbacks that you should be aware of before downloading and playing it. If you are interested in trying out AirFighters Pro APK, you can follow the steps we have provided above to download and install it on your device.

              -

              Frequently Asked Questions

              -

              Here are some frequently asked questions about AirFighters Pro APK:

              - Q: Is AirFighters Pro APK safe to download and install? A: AirFighters Pro APK is safe to download and install if you get it from a trusted website that offers virus-free and malware-free files. However, you should always be careful when downloading apps from unknown sources, as they may contain harmful or malicious content. Q: Is AirFighters Pro APK legal to use? A: AirFighters Pro APK is legal to use if you own the original version of the game or have permission from the developer to use it. However, you should not use AirFighters Pro APK for any illegal or unethical purposes, such as piracy or cheating. Q: How can I update AirFighters Pro APK? A: You can update AirFighters Pro APK by downloading the latest version of the game from the same website where you got it from. You can also check for updates within the game settings or by visiting the official website of the developer. Q: How can I uninstall AirFighters Pro APK? A A: You can uninstall AirFighters Pro APK by following these steps: 1. Go to your device settings and tap on the Apps or Applications option. 2. Find and select AirFighters Pro APK from the list of installed apps. 3. Tap on the Uninstall button and confirm your action. 4. Wait for the app to be removed from your device. Q: How can I contact the developer of AirFighters Pro APK? A: You can contact the developer of AirFighters Pro APK by visiting their official website at [rortos.com]. You can also follow them on their social media accounts, such as Facebook, Twitter, Instagram, and YouTube. You can also send them an email at info@rortos.com or use the contact form on their website. Q: How can I get more tips and tricks for AirFighters Pro APK? A: You can get more tips and tricks for AirFighters Pro APK by watching video tutorials, reading guides, or joining online forums and communities of other players. You can also check out the game's FAQ section or the help menu within the game.

              401be4b1e0
              -
              -
              \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Build Craft The Ultimate 3D Crafting and Building Game - Download for Free.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Build Craft The Ultimate 3D Crafting and Building Game - Download for Free.md deleted file mode 100644 index 9f6231ffe08f259d2ac9f4dd2e464abd37efd141..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Build Craft The Ultimate 3D Crafting and Building Game - Download for Free.md +++ /dev/null @@ -1,166 +0,0 @@ - -

              Crafting and Building Download App Store: How to Get the Best Building Game for Free

              -

              Do you love building games? Do you want to create your own world with your own rules? Do you want to have fun with your friends online? If you answered yes to any of these questions, then you should try Crafting and Building, the new free building game that is taking the app store by storm. In this article, we will tell you everything you need to know about this amazing game, how to download it from the app store, how to play it with your friends, and how to unleash your creativity in it. Let's get started!

              -

              crafting and building download app store


              Download Zip ····· https://ssurll.com/2uNVzm



              -

              What is Crafting and Building?

              -

              A brief introduction to the game and its features

              -

              Crafting and Building is a free game for the whole family, from kids to adults. It is a sandbox game, which means that you can do whatever you want in it, without any limitations or goals. You can explore, build, destroy, craft, mine, farm, and more. You can also play with your friends online, or join other players' worlds. The game has stunning graphics, realistic sounds, simple controls, and many game modes. You can choose from different types of blocks, from grass to diamond, and use them to create anything you can imagine. You can also decorate your house with furniture and accessories, or customize your character with skins. The game is constantly updated with new features and improvements.

              -

              Why you should play Crafting and Building

              -

              Crafting and Building is not just a game, it is a way of expressing yourself and having fun. Here are some of the reasons why you should play it:

              -
                -
              • It is free: You don't have to pay anything to download or play the game. You can enjoy it without any ads or in-app purchases.
              • -
              • It is fun: You can do whatever you want in the game, without any rules or restrictions. You can build your own house, castle, or mine, or explore other players' creations. You can also play with your friends online, chat with them, and cooperate with them.
              • -
              • It is creative: You can use your imagination and creativity to create anything you can think of. You can use different blocks, tools, colors, and shapes to make your own unique designs. You can also learn from other players' ideas and get inspired by them.
              • -
              • It is educational: You can learn a lot from playing the game, such as physics, geometry, architecture, art, logic, problem-solving, teamwork, and more. You can also improve your skills in building, crafting, mining, farming, and more.
              • -
              -

              How to download Crafting and Building from the app store

              -

              Step-by-step guide for Android users

              -

              If you have an Android device, such as a smartphone or a tablet, here are the steps you need to follow to download Crafting and Building from the app store:

              -
                -
              1. Open the Google Play Store app on your device.
              2. -
              3. Search for "Crafting and Building" in the search bar.
              4. -
              5. Select the game from the list of results. It has a blue icon with a white hammer on it.
              6. -
              7. Tap on "Install" and wait for the game to download and install on your device.
              8. Step-by-step guide for iOS users

                -

                If you have an iOS device, such as an iPhone or an iPad, here are the steps you need to follow to download Crafting and Building from the app store:

                -
                  -
                1. Open the App Store app on your device.
                2. -
                3. Search for "Crafting and Building" in the search bar.
                4. -
                5. Select the game from the list of results. It has a blue icon with a white hammer on it.
                6. -
                7. Tap on "Get" and wait for the game to download and install on your device.
                8. -
                9. You may need to enter your Apple ID and password, or use Touch ID or Face ID, to confirm the installation.
                10. -
                -

                How to play Crafting and Building with your friends

                -

                How to join or create a multiplayer world

                -

                One of the best features of Crafting and Building is that you can play with your friends online, or join other players' worlds. Here is how you can do that:

                -

                crafting and building game free download
                -crafting and building multiplayer online
                -crafting and building survival mode
                -crafting and building app for android
                -crafting and building creative mode
                -crafting and building 2020 new version
                -crafting and building offline play
                -crafting and building skins and maps
                -crafting and building adventure games
                -crafting and building pixel art
                -download crafting and building for pc
                -download crafting and building mod apk
                -download crafting and building latest update
                -download crafting and building pro
                -download crafting and building cheats
                -download crafting and building unlimited resources
                -download crafting and building no ads
                -download crafting and building hack version
                -download crafting and building with friends
                -download crafting and building 3d
                -app store crafting and building geneRe
                -app store crafting and building reviews
                -app store crafting and building ratings
                -app store crafting and building developer
                -app store crafting and building screenshots
                -app store crafting and building similar apps
                -app store crafting and building tips and tricks
                -app store crafting and building support
                -app store crafting and building privacy policy
                -app store crafting and building data safety
                -best crafting and building games on app store
                -best crafting and building apps for kids
                -best crafting and building apps for adults
                -best crafting and building apps for ios
                -best crafting and building apps for ipad
                -best free crafting and building apps
                -best paid crafting and building apps
                -best offline crafting and building apps
                -best online crafting and building apps
                -best 3d crafting and building apps
                -how to play crafting and building on app store
                -how to install crafting and building on app store
                -how to update crafting and building on app store
                -how to uninstall crafting and building on app store
                -how to restore purchases on crafting and building on app store
                -how to contact developer of crafting and building on app store
                -how to report a problem with crafting and building on app store
                -how to write a review for crafting and building on app store
                -how to share your creations on crafting and building on app store

                -
                  -
                • To join a multiplayer world, tap on the "Multiplayer" button on the main menu. You will see a list of available worlds that you can join. You can also filter the list by name, mode, or language. Tap on the world that you want to join and wait for it to load.
                • -
                • To create a multiplayer world, tap on the "Create" button on the main menu. You will see a screen where you can customize your world. You can choose the name, mode, seed, difficulty, and privacy of your world. You can also invite your friends to join your world by tapping on the "Invite" button and selecting them from your contacts. Tap on the "Create" button again and wait for your world to load.
                • -
                -

                How to chat and cooperate with other players

                -

                Once you are in a multiplayer world, you can chat and cooperate with other players. Here is how you can do that:

                -
                  -
                • To chat with other players, tap on the "Chat" button on the top right corner of the screen. You will see a chat window where you can type your message and send it. You can also use emojis and stickers to express yourself. You can also mute or report players who are spamming or being rude by tapping on their names and selecting the appropriate option.
                • -
                • To cooperate with other players, you can use the "Share" button on the bottom right corner of the screen. You will see a menu where you can share your items, blocks, tools, or resources with other players. You can also request items from other players by tapping on the "Request" button and selecting what you need. You can also trade items with other players by tapping on the "Trade" button and choosing what you want to offer and what you want to receive.
                • -
                -

                How to unleash your creativity in Crafting and Building

                -

                How to use different blocks and tools

                -

                Crafting and Building gives you access to hundreds of different blocks and tools that you can use to create anything you want. Here is how you can use them:

                -
                  -
                • To select a block or a tool, tap on the "Inventory" button on the bottom left corner of the screen. You will see a grid where you can choose what you want to use. You can also swipe left or right on the screen to switch between different categories of blocks and tools.
                • -
                • To place a block, tap on the screen where you want to put it. You can also drag your finger across the screen to place multiple blocks at once.
                • -
                • To break a block, tap and hold on the block until it breaks. You can also use tools such as pickaxes, axes, shovels, or hammers to break blocks faster.
                • -
                • To craft a block or a tool, tap on the "Crafting" button on the bottom right corner of the screen. You will see a menu where you can choose what you want to make. You will need certain materials and recipes to craft certain items. You can also use crafting tables, furnaces, anvils, or chests to craft more advanced items.
                • -
                -

                How to design and build your own house, castle, or mine

                -

                One of the most fun things to do in Crafting and Building is to design and build your own house, castle, or mine. Here are some tips on how to do that:

                -
                  -
                • Plan ahead: Before you start building, think about what kind of structure you want to make, how big it will be, what materials you will use, and where you will place it. You can also sketch out your design on paper or use online tools such as Plotz or Minecraft Structure Planner to help you with your design.
                • -
                • Start simple: Don't try to build something too complex or ambitious at first. Start with a simple shape, such as a square or a rectangle, and then add more details and features as you go. You can also use templates or blueprints to guide you with your building.
                • -
                • Be creative: Don't be afraid to experiment with different blocks, colors, textures, and styles. You can also use decorations, such as paintings, carpets, flowers, or torches, to make your structure more lively and attractive. You can also use redstone, pistons, levers, or buttons to add some functionality and interactivity to your structure.
                • -
                • Have fun: The most important thing is to have fun while building. Don't worry too much about making mistakes or following rules. You can always undo, redo, or modify your building as you please. You can also ask for feedback or suggestions from other players, or show off your creations to them.
                • -
                -

                Tips and tricks for Crafting and Building

                -

                How to find hidden caves and treasures

                -

                Crafting and Building has a lot of hidden secrets and surprises that you can discover while exploring the world. Here are some tips on how to find hidden caves and treasures:

                -
                  -
                • Look for clues: Sometimes, you can find clues that indicate the presence of a hidden cave or treasure, such as holes, cracks, waterfalls, lava flows, or signs. Follow these clues and see where they lead you.
                • -
                • Listen for sounds: Sometimes, you can hear sounds that indicate the presence of a hidden cave or treasure, such as bats, zombies, skeletons, spiders, or chests. Use your ears and see if you can locate the source of the sound.
                • -
                • Dig around: Sometimes, you can find hidden caves or treasures by digging around randomly or systematically. Use a pickaxe or a shovel and see what you can uncover.
                • -
                • Use maps: Sometimes, you can find maps that show the location of a hidden cave or treasure. You can find maps in chests, villages, shipwrecks, or temples. Use the map and follow the directions to find the treasure.
                • -
                -

                How to tame and ride animals

                -

                Crafting and Building has a lot of animals that you can tame and ride for fun or transportation. Here are some tips on how to tame and ride animals:

                -
                  -
                • Find an animal: The first step is to find an animal that you want to tame and ride. You can find animals in different biomes, such as plains, forests, deserts, or oceans. Some of the animals that you can tame and ride are horses, donkeys, mules, llamas, camels, elephants, zebras, dolphins, sharks, turtles, or dragons.
                • -
                • Tame an animal: The next step is to tame an animal that you want to ride. You can tame an animal by feeding it its favorite food or item. For example, you can tame a horse by feeding it apples or carrots; you can tame a dolphin by feeding it fish; you can tame a dragon by feeding it meat. You will know that an animal is tamed when it shows hearts around it.
                • -
                • Ride an animal: The final step is to ride an animal that you have tamed. You can ride an animal by tapping on it and then tapping on the "Ride" button on the bottom right corner of the screen. You can control the animal by using the directional buttons on the bottom left corner of the screen. You can also jump with the animal by tapping on the "Jump" button on the bottom right corner of the screen.
                • -
                -

                Conclusion

                -

                Crafting and Building is one of the best building games that you can download from the app store for free. It is a game that lets you create your own world with your own rules and have fun with your friends online. It is a game that lets you use your imagination and creativity to create anything you want. It is a game that lets you learn a lot from playing it. It is a game that lets you have fun while playing it.

                -

                If you are looking for a game that will keep you entertained for hours and days, then Crafting and Building is the game for you. Download it now from the app store and start building your own world today!

                -

                Frequently Asked Questions

                -

                Here are some of the frequently asked questions about Crafting and Building:

                -
                  -
                1. Is Crafting and Building safe for kids?
                2. -

                  Crafting and Building is

                  Crafting and Building is safe for kids, as long as they play it in a supervised and responsible manner. The game does not contain any violence, gore, or inappropriate content. However, the game does allow players to chat and interact with other players online, which may expose them to some risks or dangers. Therefore, parents should monitor their kids' online activity and set some rules and boundaries for them. Parents should also educate their kids about online safety and etiquette, such as not sharing personal information, not talking to strangers, not clicking on suspicious links, and reporting any abuse or harassment.

                  -
                3. How can I update Crafting and Building?
                4. -

                  Crafting and Building is constantly updated with new features and improvements. To update the game, you need to follow these steps:

                  -
                    -
                  • Open the app store app on your device.
                  • -
                  • Search for "Crafting and Building" in the search bar.
                  • -
                  • Select the game from the list of results.
                  • -
                  • Tap on "Update" and wait for the game to download and install the latest version.
                  • -
                  • You may need to enter your app store password, or use Touch ID or Face ID, to confirm the update.
                  • -
                  -
                5. How can I contact the developers of Crafting and Building?
                6. -

                  If you have any questions, feedback, suggestions, or issues about Crafting and Building, you can contact the developers of the game by using one of these methods:

                  -
                    -
                  • Email: You can send an email to craftingandbuilding@gmail.com and expect a reply within 24 hours.
                  • -
                  • Facebook: You can visit their Facebook page at Crafting and Building and send them a message or leave a comment.
                  • -
                  • Twitter: You can follow them on Twitter at @craftingbuild and tweet them or send them a direct message.
                  • -
                  -
                7. How can I support Crafting and Building?
                8. -

                  If you love Crafting and Building and want to support the game, you can do one of these things:

                  -
                    -
                  • Rate and review: You can rate and review the game on the app store and share your honest opinion and feedback with other players.
                  • -
                  • Share and invite: You can share the game with your friends and family and invite them to play with you online.
                  • -
                  • Donate: You can donate some money to the developers of the game by using the "Donate" button on the main menu. You can choose how much you want to donate and use your preferred payment method. Your donation will help the developers to keep improving the game and adding new features.
                  • -
                  -
                9. What are some alternatives to Crafting and Building?
                10. -

                  If you are looking for some alternatives to Crafting and Building, you can try some of these games:

                  -
                    -
                  • Minecraft: The original sandbox game that inspired Crafting and Building. It has more blocks, items, mobs, biomes, modes, mods, servers, and features than Crafting and Building. However, it is not free and requires a premium account to play online.
                  • -
                  • Roblox: A popular online platform that lets you create and play millions of games made by other players. It has more genres, styles, themes, graphics, sounds, and options than Crafting and Building. However, it is not a pure building game and requires an internet connection to play.
                  • -
                  • Terraria: A 2D sandbox game that combines building, exploration, combat, crafting, mining, and survival. It has more depth, complexity, variety, challenge, content, and fun than Crafting and Building. However, it is not free and has a different perspective than Crafting and Building.
                  • -

                  197e85843d
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Love Me Like You Do Ringtone by Jassa Dhillon - MP3 Free.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Love Me Like You Do Ringtone by Jassa Dhillon - MP3 Free.md deleted file mode 100644 index 962dd98303474353ca17cba02102bbb9af675800..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Love Me Like You Do Ringtone by Jassa Dhillon - MP3 Free.md +++ /dev/null @@ -1,86 +0,0 @@ -
                  -

                  Love Me Like You Do Ringtone Mp3 Download Jassa Dhillon

                  -

                  Do you love the song Love Me Like You Do by Jassa Dhillon? Do you want to set it as your phone's ringtone? If yes, then you are in the right place. In this article, we will show you how to download Love Me Like You Do ringtone mp3 for free and easily. We will also tell you more about the song and the singer, so you can enjoy it even more.

                  -

                  love me like you do ringtone mp3 download jassa dhillon


                  Download Zip ———>>> https://ssurll.com/2uNVsv



                  -

                  Introduction

                  -

                  What is Love Me Like You Do?

                  -

                  Love Me Like You Do is a Punjabi song by Jassa Dhillon, released in 2021. It is a romantic song that expresses the feelings of a lover who wants his partner to love him like no one else. The song has catchy lyrics, melodious music, and a beautiful video. The song has become very popular among Punjabi music fans and has received millions of views on YouTube.

                  -

                  Who is Jassa Dhillon?

                  -

                  Jassa Dhillon is a Punjabi singer, songwriter, and music producer. He was born in Punjab, India, and moved to Canada at a young age. He started his musical career in 2019 with his debut album Pyaar Mangdi. He has since released many hit songs such as Raule, Talja, Sira E Hou, and Love Me Like You Do. He is known for his unique voice, style, and lyrics.

                  -

                  How to Download Love Me Like You Do Ringtone Mp3?

                  -

                  If you want to download Love Me Like You Do ringtone mp3 for your phone, you have two options. You can either use Zedge or Prokerala. Both are free websites that offer a wide range of ringtones for different devices. Here are the steps to follow for each method:

                  -

                  Method 1: Use Zedge

                  -

                  Step 1: Visit Zedge website

                  -

                  Zedge is a popular website that provides wallpapers, ringtones, notifications, and games for mobile phones. To use Zedge, you need to visit its website at [Zedge](^1^). You can also download its app from Google Play Store or App Store.

                  -

                  Step 2: Search for Love Me Like You Do ringtone

                  -

                  Once you are on Zedge website or app, you need to search for Love Me Like You Do ringtone. You can use the search bar or browse through the categories. You will see many results related to the song. Choose the one that you like and click on it.

                  -

                  Step 3: Download the ringtone in mp3 format

                  -

                  After you click on the ringtone, you will see a preview of it. You can listen to it and decide if you want to download it or not. If you like it, you can click on the download button and choose mp3 format. The ringtone will be downloaded to your device and you can set it as your phone's ringtone.

                  -

                  love me like you do jassa dhillon mp3 ringtone free download
                  -jassa dhillon love me like you do ringtone download mp3 320kbps
                  -love me like you do by jassa dhillon mp3 song ringtone
                  -download love me like you do jassa dhillon ringtone mp3 pagalworld
                  -love me like you do ringtone mp3 jassa dhillon mr jatt
                  -jassa dhillon new song love me like you do mp3 ringtone
                  -love me like you do punjabi song jassa dhillon ringtone mp3
                  -jassa dhillon love me like you do mp3 ringtone zedge
                  -love me like you do ringtone mp3 remix jassa dhillon
                  -jassa dhillon ft gur sidhu love me like you do ringtone mp3
                  -love me like you do jassa dhillon mp3 ringtone lyrics
                  -jassa dhillon love me like you do ringtone mp3 instrumental
                  -love me like you do ringtone mp3 female version jassa dhillon
                  -jassa dhillon love me like you do ringtone mp3 djyoungster
                  -love me like you do jassa dhillon mp3 ringtone status
                  -jassa dhillon love me like you do ringtone download mp3 wapking
                  -love me like you do by jassa dhillon mp3 ringtone online
                  -download love me like you do jassa dhillon ringtone mp3 djpunjab
                  -love me like you do ringtone mp3 original jassa dhillon
                  -jassa dhillon love me like you do ringtone mp3 gaana
                  -love me like you do jassa dhillon mp3 ringtone video
                  -jassa dhillon love me like you do ringtone download mp3 raagjatt
                  -love me like you do by jassa dhillon mp3 ringtone 2021
                  -download love me like you do jassa dhillon ringtone mp3 songspk
                  -love me like you do ringtone mp3 high quality jassa dhillon
                  -jassa dhillon love me like you do ringtone mp3 spotify
                  -love me like you do jassa dhillon mp3 ringtone whatsapp
                  -jassa dhillon love me like you do ringtone download mp3 bestwap
                  -love me like you do by jassa dhillon mp3 ringtone audio
                  -download love me like you do jassa dhillon ringtone mp3 pendujatt
                  -love me like you do ringtone mp3 full song jassa dhillon
                  -jassa dhillon love me like you do ringtone mp3 apple music
                  -love me like you do jassa dhillon mp3 ringtone tiktok
                  -jassa dhillon love me like you do ringtone download mp3 likewap
                  -love me like you do by jassa dhillon mp3 ringtone caller tune
                  -download love me like you do jassa dhillon ringtone mp3 riskyjatt
                  -love me like you do ringtone mp3 bass boosted jassa dhillon
                  -jassa dhillon love me like you do ringtone mp3 youtube
                  -love me like you do jassa dhillon mp3 ringtone instagram
                  -jassa dhillon love me like you do ringtone download mp3 mobcup

                  -

                  Method 2: Use Prokerala

                  -

                  Step 1: Visit Prokerala website

                  -

                  Prokerala is another website that offers ringtones, wallpapers, news, astrology, and more for mobile phones. To use Pro kerala, you need to visit its website at [Prokerala].

                  -

                  Step 2: Find Love Me Like You Do ringtone

                  -

                  On Prokerala website, you need to find Love Me Like You Do ringtone. You can use the search box or go to the ringtones section. You will see a list of ringtones related to the song. Select the one that you want and click on it.

                  -

                  Step 3: Download the ringtone in mp3 or m4r format

                  -

                  When you click on the ringtone, you will see a page with the details of the ringtone. You can play it and check if you like it or not. If you want to download it, you can choose between mp3 or m4r format. Mp3 is for Android phones and m4r is for iPhone. Click on the download button and save the ringtone to your device. You can then set it as your phone's ringtone.

                  -

                  Conclusion

                  -

                  Summary of the article

                  -

                  In this article, we have shown you how to download Love Me Like You Do ringtone mp3 for free and easily. We have also given you some information about the song and the singer, Jassa Dhillon. We hope you enjoyed this article and found it useful.

                  -

                  Call to action

                  -

                  If you liked this article, please share it with your friends and family who might be interested in downloading Love Me Like You Do ringtone mp3. Also, don't forget to check out Jassa Dhillon's other songs and support his music. Thank you for reading and have a great day!

                  -

                  Frequently Asked Questions

                  -

                  Here are some common questions and answers related to Love Me Like You Do ringtone mp3 download:

                  -
                    -
                  • Q: Where can I watch the video of Love Me Like You Do song?
                  • -
                  • A: You can watch the video of Love Me Like You Do song on YouTube at [this link].
                  • -
                  • Q: How can I contact Jassa Dhillon?
                  • -
                  • A: You can follow Jassa Dhillon on his social media accounts such as Instagram, Facebook, and Twitter. You can also send him an email at jassadhillonofficial@gmail.com.
                  • -
                  • Q: What are some other popular songs by Jassa Dhillon?
                  • -
                  • A: Some other popular songs by Jassa Dhillon are Pyaar Mangdi, Raule, Talja, Sira E Hou, Jatt Te Jawani, and Aahi Gallan Teriyan.
                  • -
                  • Q: How can I make my own custom ringtone?
                  • -
                  • A: If you want to make your own custom ringtone, you can use online tools such as [Ringtone Maker] or [Audiko]. You can upload any audio file and edit it according to your preferences. You can then download it in mp3 or m4r format and set it as your phone's ringtone.
                  • -
                  • Q: What are some other websites that offer free ringtones?
                  • -
                  • A: Some other websites that offer free ringtones are [Mobile9], [Myxer], [Tones7], and [Melofania]. You can browse through their collections and download the ringtones that you like.
                  • -

                  401be4b1e0
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Weapon Throwing RPG 2 MOD APK with Unlimited Money and Stamina.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Weapon Throwing RPG 2 MOD APK with Unlimited Money and Stamina.md deleted file mode 100644 index 2af8b0978cdd3dc2b58af80899a376c0ae929a85..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Weapon Throwing RPG 2 MOD APK with Unlimited Money and Stamina.md +++ /dev/null @@ -1,92 +0,0 @@ - -

                  Weapon Throwing RPG 2 Mod APK: A Fun and Challenging Game for RPG Fans

                  -

                  If you are looking for a new and exciting role-playing game to play on your Android device, you might want to check out Weapon Throwing RPG 2. This game is a sequel to the popular Weapon Throwing RPG, which has over 1 million downloads on Google Play. In this game, you will play as a Lord who must use his weapon throwing skills to defeat various monsters and enemies in different stages. You will also be able to customize your character, upgrade your weapons, and learn new skills as you progress through the game.

                  -

                  weapon throwing rpg 2 mod apk


                  Downloadhttps://ssurll.com/2uNQqs



                  -

                  However, if you want to enjoy the game to the fullest, you might need some extra resources and features that are not available in the original version. That's why we recommend you to download Weapon Throwing RPG 2 mod apk, which is a modified version of the game that gives you access to unlimited coins and gems, all weapons and skills unlocked, no ads, and more. In this article, we will tell you more about what is Weapon Throwing RPG 2 mod apk, why you should download it, what features it offers, and how to download and install it on your device.

                  -

                  Introduction

                  -

                  What is Weapon Throwing RPG 2?

                  -

                  Weapon Throwing RPG 2 is a fun and challenging role-playing game developed by Yuika Project. The game is set in a fantasy world where you will play as a Lord who must use his weapon throwing technique to fight against various monsters and enemies. The game has over 200 stages with different difficulty levels, where you will encounter different types of enemies such as goblins, skeletons, dragons, wyverns, and more. You will also be able to collect and upgrade over 100 weapons such as swords, axes, spears, hammers, daggers, bows, and more. You can also learn over 50 skills that will help you in your battles such as healing, buffing, debuffing, summoning, and more.

                  -

                  What is a mod apk?

                  -

                  A mod apk is a modified version of an original application that has been altered by third-party developers to provide additional features and benefits that are not available in the original version. A mod apk usually requires no root access and can be installed easily on any Android device. However, a mod apk may not be compatible with some devices or may cause some issues such as crashes or bugs. Therefore, it is important to download a mod apk from a trusted source and follow the instructions carefully.

                  -

                  weapon throwing rpg 2 mod apk unlimited money
                  -weapon throwing rpg 2 mod apk download
                  -weapon throwing rpg 2 mod apk latest version
                  -weapon throwing rpg 2 mod apk android
                  -weapon throwing rpg 2 mod apk free
                  -weapon throwing rpg 2 hack mod apk
                  -weapon throwing rpg 2 cheats mod apk
                  -weapon throwing rpg 2 premium mod apk
                  -weapon throwing rpg 2 full mod apk
                  -weapon throwing rpg 2 offline mod apk
                  -weapon throwing rpg 2 modded apk
                  -weapon throwing rpg 2 hacked apk
                  -weapon throwing rpg 2 cheat apk
                  -weapon throwing rpg 2 premium apk
                  -weapon throwing rpg 2 full apk
                  -weapon throwing rpg 2 offline apk
                  -weapon throwing rpg 2 game mod apk
                  -weapon throwing rpg 2 game hack apk
                  -weapon throwing rpg 2 game cheat apk
                  -weapon throwing rpg 2 game premium apk
                  -weapon throwing rpg 2 game full apk
                  -weapon throwing rpg 2 game offline apk
                  -download weapon throwing rpg 2 mod apk for android
                  -download weapon throwing rpg 2 hack apk for android
                  -download weapon throwing rpg 2 cheat apk for android
                  -download weapon throwing rpg 2 premium apk for android
                  -download weapon throwing rpg 2 full apk for android
                  -download weapon throwing rpg 2 offline apk for android
                  -how to install weapon throwing rpg 2 mod apk on android
                  -how to play weapon throwing rpg 2 mod apk on android
                  -how to get unlimited money in weapon throwing rpg 2 mod apk
                  -how to get unlimited stamina in weapon throwing rpg 2 mod apk
                  -how to unlock all weapons in weapon throwing rpg 2 mod apk
                  -how to level up weapons in weapon throwing rpg 2 mod apk
                  -how to use secret skills in weapon throwing rpg 2 mod apk
                  -best weapons in weapon throwing rpg 2 mod apk
                  -best skills in weapon throwing rpg 2 mod apk
                  -best tips and tricks for weapon throwing rpg 2 mod apk
                  -best guide for weapon throwing rpg 2 mod apk
                  -best review for weapon throwing rpg 2 mod apk

                  -

                  Why download Weapon Throwing RPG 2 mod apk?

                  -

                  Weapon Throwing RPG 2 mod apk is a great option for those who want to enjoy the game without any limitations or restrictions. By downloading this mod apk, you will be able to get unlimited coins and gems, which are the main currencies in the game. You can use these coins and gems to buy new weapons, upgrade your existing weapons, learn new skills, unlock new stages, and more. You will also be able to unlock all weapons and skills from the start, which will give you an edge over your enemies. Moreover, you will not have to deal with any annoying ads that may interrupt your gameplay or consume your data. And best of all, you will not need to root your device or risk damaging it by installing this mod apk.

                  -

                  Features of Weapon

                  Features of Weapon Throwing RPG 2 mod apk

                  -

                  Weapon Throwing RPG 2 mod apk has many features that will make your gaming experience more enjoyable and satisfying. Here are some of the main features of this mod apk:

                  -

                  Unlimited coins and gems

                  -

                  Coins and gems are the main currencies in Weapon Throwing RPG 2. You can use them to buy new weapons, upgrade your existing weapons, learn new skills, unlock new stages, and more. However, earning coins and gems can be time-consuming and tedious, especially if you want to get the best weapons and skills in the game. That's why Weapon Throwing RPG 2 mod apk gives you unlimited coins and gems, so you can buy anything you want without worrying about running out of resources. You can also use the coins and gems to revive your character if you die in a stage, or to skip a stage if you find it too hard.

                  -

                  All weapons and skills unlocked

                  -

                  Weapon Throwing RPG 2 has over 100 weapons and over 50 skills that you can collect and use in the game. However, not all of them are available from the start. You have to unlock them by completing certain stages, reaching certain levels, or spending coins and gems. This can be frustrating if you want to try out different weapons and skills or if you want to have more variety in your gameplay. That's why Weapon Throwing RPG 2 mod apk unlocks all weapons and skills from the start, so you can choose any weapon or skill you like without having to wait or pay for it. You can also switch between different weapons and skills during a stage, depending on the situation.

                  -

                  No ads and no root required

                  -

                  One of the most annoying things about playing games on your Android device is having to deal with ads that may pop up randomly or frequently during your gameplay. Ads can interrupt your concentration, waste your time, consume your data, or even expose you to malware or viruses. That's why Weapon Throwing RPG 2 mod apk removes all ads from the game, so you can play without any distractions or interruptions. Moreover, Weapon Throwing RPG 2 mod apk does not require root access to work on your device, which means you do not have to risk damaging your device or voiding its warranty by rooting it.

                  -

                  High-quality graphics and sound effects

                  -

                  Weapon Throwing RPG 2 has high-quality graphics and sound effects that will make you feel immersed in the game world. The game has colorful and detailed graphics that show the different environments, characters, weapons, enemies, and effects in the game. The game also has realistic and dynamic sound effects that match the actions and events in the game. You will hear the sounds of weapons being thrown, enemies being hit, skills being activated, coins and gems being collected, and more. You can also adjust the graphics and sound settings according to your preference or device performance.

                  -

                  Easy and intuitive controls

                  -

                  Weapon Throwing RPG 2 has easy and intuitive controls that will make you play the game smoothly and comfortably. The game uses a simple tap-and-drag mechanism to control your character and throw your weapons. You just have to tap on the screen to select a weapon, drag it to aim at an enemy, and release it to throw it. You can also tap on the skill icons at the bottom of the screen to activate them. The game also has an auto-throw feature that will make your character throw weapons automatically at the nearest enemy. You can turn this feature on or off according to your preference.

                  -

                  How to download and install Weapon Throwing RPG 2 mod apk

                  -

                  If you want to download and install Weapon Throwing RPG 2 mod apk on your Android device, you just have to follow these simple steps:

                  -

                  Step 1: Download the mod apk file from a trusted source

                  -

                  The first step is to download the mod apk file from a trusted source that provides safe and working mod apks. You can use this link to download Weapon Throwing RPG 2 mod apk for free. The file size is about 70 MB, so make sure you have enough space on your device before downloading it.

                  -

                  Step 2: Enable unknown sources on your device settings

                  -

                  The second step is to enable unknown sources on your device settings, which will allow you to install applications from sources other than Google Play Store. To do this, go to your device settings > security > unknown sources > enable.

                  -

                  Step 3: Install the mod apk file and launch the game

                  -

                  The third step is to install the mod apk file on your device by locating it in your file manager or downloads folder and tapping on it. Follow the

                  The third step is to install the mod apk file on your device by locating it in your file manager or downloads folder and tapping on it. Follow the instructions on the screen to complete the installation process. Once the installation is done, you can launch the game and enjoy the mod features.

                  -

                  Conclusion

                  -

                  Weapon Throwing RPG 2 is a fun and challenging role-playing game that will test your weapon throwing skills and strategy. You can play as a Lord who must defeat various monsters and enemies in different stages, using over 100 weapons and over 50 skills. You can also customize your character, upgrade your weapons, and learn new skills as you progress through the game.

                  -

                  However, if you want to have more fun and convenience in the game, you should download Weapon Throwing RPG 2 mod apk, which is a modified version of the game that gives you unlimited coins and gems, all weapons and skills unlocked, no ads, and more. You can download this mod apk from this link and install it on your device easily by following the steps we have provided above.

                  -

                  So, what are you waiting for? Download Weapon Throwing RPG 2 mod apk now and enjoy this amazing game with all its features and benefits. You will not regret it!

                  -

                  FAQs

                  -

                  Here are some of the frequently asked questions about Weapon Throwing RPG 2 mod apk:

                  -
                    -
                  • Is Weapon Throwing RPG 2 mod apk safe to use?
                  • -

                    Yes, Weapon Throwing RPG 2 mod apk is safe to use, as long as you download it from a trusted source that provides virus-free and working mod apks. However, you should always be careful when downloading and installing any mod apk on your device, as some of them may contain malware or viruses that can harm your device or steal your data. You should also backup your data before installing any mod apk, in case something goes wrong.

                    -
                  • Will Weapon Throwing RPG 2 mod apk work on my device?
                  • -

                    Weapon Throwing RPG 2 mod apk should work on most Android devices that meet the minimum requirements of the game. The game requires Android 4.4 or higher, and at least 70 MB of free space on your device. However, some devices may not be compatible with the mod apk or may experience some issues such as crashes or bugs. If that happens, you can try to uninstall and reinstall the mod apk, or contact the developer for support.

                    -
                  • Can I play Weapon Throwing RPG 2 mod apk online with other players?
                  • -

                    No, Weapon Throwing RPG 2 mod apk is not an online game, so you cannot play it online with other players. The game is a single-player game that does not require an internet connection to play. You can play it offline anytime and anywhere you want.

                    -
                  • Can I update Weapon Throwing RPG 2 mod apk to the latest version?
                  • -

                    Yes, you can update Weapon Throwing RPG 2 mod apk to the latest version, as long as the mod apk provider updates their file as well. However, you should always check the compatibility and functionality of the new version before updating it, as some updates may not work well with the mod features or may cause some problems. You should also backup your data before updating any mod apk, in case something goes wrong.

                    -
                  • Can I uninstall Weapon Throwing RPG 2 mod apk if I don't like it?
                  • -

                    Yes, you can uninstall Weapon Throwing RPG 2 mod apk if you don't like it or if you want to switch back to the original version of the game. You just have to delete the mod apk file from your device and download the original version from Google Play Store. However, you may lose your progress and data in the game if you uninstall the mod apk, so make sure you backup your data before uninstalling it.

                    -

                  197e85843d
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/sklearn-docs/t-SNE-perplexity/app.py b/spaces/sklearn-docs/t-SNE-perplexity/app.py deleted file mode 100644 index 645b29113421be80e711f7a250d3b64c7b8a7119..0000000000000000000000000000000000000000 --- a/spaces/sklearn-docs/t-SNE-perplexity/app.py +++ /dev/null @@ -1,135 +0,0 @@ -from functools import partial - -import gradio as gr -import matplotlib.pyplot as plt -from matplotlib.ticker import NullFormatter -import numpy as np -from sklearn import datasets, manifold - - -SEED = 0 -N_COMPONENTS = 2 -np.random.seed(SEED) - - -def get_circles(n_samples): - X, color = datasets.make_circles( - n_samples=n_samples, - factor=0.5, - noise=0.05, - random_state=SEED - ) - return X, color - - -def get_s_curve(n_samples): - X, color = datasets.make_s_curve(n_samples=n_samples, random_state=SEED) - X[:, 1], X[:, 2] = X[:, 2], X[:, 1].copy() - return X, color - - -def get_uniform_grid(n_samples): - x = np.linspace(0, 1, int(np.sqrt(n_samples))) - xx, yy = np.meshgrid(x, x) - X = np.hstack( - [ - xx.ravel().reshape(-1, 1), - yy.ravel().reshape(-1, 1), - ] - ) - color = xx.ravel() - return X, color - - -DATA_MAPPING = { - 'Circles': get_circles, - 'S-curve': get_s_curve, - 'Uniform Grid': get_uniform_grid, -} - - - -def plot_data(dataset: str, perplexity: int, n_samples: int, tsne: bool): - if isinstance(perplexity, dict): - perplexity = perplexity['value'] - else: - perplexity = int(perplexity) - - X, color = DATA_MAPPING[dataset](n_samples) - if tsne: - tsne = manifold.TSNE( - n_components=N_COMPONENTS, - init="random", - random_state=0, - perplexity=perplexity, - n_iter=400, - ) - Y = tsne.fit_transform(X) - else: - Y = X - - fig, ax = plt.subplots(figsize=(7, 7)) - - ax.scatter(Y[:, 0], Y[:, 1], c=color) - ax.xaxis.set_major_formatter(NullFormatter()) - ax.yaxis.set_major_formatter(NullFormatter()) - ax.axis("tight") - - return fig - - -title = "t-SNE: The effect of various perplexity values on the shape" -description = """ -t-Stochastic Neighborhood Embedding ([t-SNE](https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html)) is a powerful technique dimensionality reduction and visualization of high dimensional datasets. - -One of the key parameters in t-SNE is perplexity, which controls the number of nearest neighbors used to represent each data point in the low-dimensional space. - -In this illustration, we explore the impact of various perplexity values on t-SNE visualizations using three commonly used datasets: Concentric Circles, S-curve and Uniform Grid. - -By comparing the resulting visualizations, we demonstrate how changing the perplexity value affects the shape of the visualization. - -Created by [@Hnabil](https://huggingface.co/Hnabil) based on [scikit-learn docs](https://scikit-learn.org/stable/auto_examples/manifold/plot_t_sne_perplexity.html) -""" - - - -with gr.Blocks(title=title) as demo: - gr.HTML(f"{title}") - gr.Markdown(description) - - input_data = gr.Radio( - list(DATA_MAPPING), - value="Circles", - label="dataset" - ) - n_samples = gr.Slider( - minimum=100, - maximum=1000, - value=150, - step=25, - label='Number of Samples' - ) - perplexity = gr.Slider( - minimum=2, - maximum=100, - value=5, - step=1, - label='Perplexity' - ) - with gr.Row(): - with gr.Column(): - plot = gr.Plot(label="Original data") - fn = partial(plot_data, tsne=False) - input_data.change(fn=fn, inputs=[input_data, perplexity, n_samples], outputs=plot) - n_samples.change(fn=fn, inputs=[input_data, perplexity, n_samples], outputs=plot) - demo.load(fn=fn, inputs=[input_data, perplexity, n_samples], outputs=plot) - with gr.Column(): - plot = gr.Plot(label="t-SNE") - fn = partial(plot_data, tsne=True) - input_data.change(fn=fn, inputs=[input_data, perplexity, n_samples], outputs=plot) - perplexity.change(fn=fn, inputs=[input_data, perplexity, n_samples], outputs=plot) - n_samples.change(fn=fn, inputs=[input_data, perplexity, n_samples], outputs=plot) - demo.load(fn=fn, inputs=[input_data, perplexity, n_samples], outputs=plot) - - -demo.launch() diff --git a/spaces/smajumdar/nemo_conformer_rnnt_large_streaming/README.md b/spaces/smajumdar/nemo_conformer_rnnt_large_streaming/README.md deleted file mode 100644 index aab42971f5066136579f376d92838c7d0c654d90..0000000000000000000000000000000000000000 --- a/spaces/smajumdar/nemo_conformer_rnnt_large_streaming/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Nemo_conformer_rnnt_large_streaming -emoji: 🐠 -colorFrom: blue -colorTo: gray -sdk: gradio -sdk_version: 3.44.4 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/multilingual/data_scripts/download_wmt20.sh b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/multilingual/data_scripts/download_wmt20.sh deleted file mode 100644 index 31cd5c76b75081331ae03c5ea70ea7ddebaa06e1..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/multilingual/data_scripts/download_wmt20.sh +++ /dev/null @@ -1,547 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -if [ -z $WORKDIR_ROOT ] ; -then - echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..." - exit -fi - - - -set -x -e - -# TODO update the workdir and dest dir name -# put fasttext model -WORKDIR=$WORKDIR_ROOT -# put intermediate files -TMP_DIR=$WORKDIR_ROOT/tmp/tmp_wmt20_lowres_download -# output {train,valid,test} files to dest -DEST=$WORKDIR_ROOT/ML50/raw - -UTILS=$PWD/utils - -# per dataset locations -COMMONCRAWL_DIR=$TMP_DIR/commoncrawl -YANDEX_CORPUS=$WORKDIR_ROOT/wmt20/official/ru/yandex/1mcorpus.zip -# unzipped -CZENG_CORPUS=$WORKDIR_ROOT/wmt20/official/cs/czeng/czeng20-train -CCMT_DIR=$WORKDIR_ROOT/wmt20/official/zh/ccmt/parallel - -download_and_select() { - SUBFOLDER=$1 - URL=$2 - UNCOMPRESS_CMD=$3 - LANG=$4 - INPUT_FILEPATH=$5 - if [[ $# -gt 5 ]]; then - LANG_COL=$6 - EN_COL=$7 - fi - - mkdir -p $SUBFOLDER - cd $SUBFOLDER - wget -nc --content-disposition $URL - $UNCOMPRESS_CMD - - if [[ $# -gt 5 ]]; then - cut -f$LANG_COL $INPUT_FILEPATH > $INPUT_FILEPATH.$LANG - cut -f$EN_COL $INPUT_FILEPATH > $INPUT_FILEPATH.en - fi - cd .. - - ln -sf $SUBFOLDER/$INPUT_FILEPATH.$LANG $SUBFOLDER.$LANG - ln -sf $SUBFOLDER/$INPUT_FILEPATH.en $SUBFOLDER.en -} - -prepare_lid() { - pip install fasttext - - # TODO specify global workdir - MODEL=$WORKDIR/fasttext/lid.176.bin - LID_MULTI=$UTILS/fasttext_multi_filter.py - - if [ ! -f "$MODEL" ]; then - echo "downloading fasttext lid model..." - mkdir -p $WORKDIR/fasttext - wget -nc https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin -O $MODEL - fi -} - -prepare_moses() { - pushd $UTILS - echo 'Cloning Moses github repository (for tokenization scripts)...' - git clone https://github.com/moses-smt/mosesdecoder.git - popd -} - -lid_filter() { - # TODO specify global workdir - MODEL=$WORKDIR/fasttext/lid.176.bin - LID_MULTI=$UTILS/fasttext_multi_filter.py - - prepare_lid - - SRC=$1 - SRC_FILE=$2 - SRC_OUTPUT=$3 - TGT=$4 - TGT_FILE=$5 - TGT_OUTPUT=$6 - python $LID_MULTI --model $MODEL --inputs $SRC_FILE $TGT_FILE --langs $SRC $TGT --outputs $SRC_OUTPUT $TGT_OUTPUT -} - -prepare_ja_ted() { - mkdir -p ted - cd ted - - wget -nc https://wit3.fbk.eu/archive/2017-01-trnted//texts/en/ja/en-ja.tgz - tar -zxvf en-ja.tgz - cat en-ja/train.tags.en-ja.en | grep -v -P "^[ ]*\<" | sed 's/^[ \t]*//g' | sed 's/[ \t]*$//g' > en-ja/train.en-ja.en - cat en-ja/train.tags.en-ja.ja | grep -v -P "^[ ]*\<" | sed 's/^[ \t]*//g' | sed 's/[ \t]*$//g' > en-ja/train.en-ja.ja - - cd .. - ln -sf ted/en-ja/train.en-ja.ja ted.ja - ln -sf ted/en-ja/train.en-ja.en ted.en -} - -prepare_ja() { - OUTPUT_DIR=$TMP_DIR/ja - mkdir -p $OUTPUT_DIR - cd $OUTPUT_DIR - - download_and_select paracrawl "http://www.kecl.ntt.co.jp/icl/lirg/jparacrawl/release/2.0/bitext/en-ja.tar.gz" "tar -zxvf en-ja.tar.gz" ja en-ja/en-ja.bicleaner05.txt 4 3 & - download_and_select newscommentary "http://data.statmt.org/news-commentary/v15/training/news-commentary-v15.en-ja.tsv.gz" "gunzip -f news-commentary-v15.en-ja.tsv.gz" ja news-commentary-v15.en-ja.tsv 2 1 & - download_and_select wikititles "http://data.statmt.org/wikititles/v2/wikititles-v2.ja-en.tsv.gz" "gunzip -f wikititles-v2.ja-en.tsv.gz" ja wikititles-v2.ja-en.tsv 1 2 & - download_and_select wikimatrix "http://data.statmt.org/wmt20/translation-task/WikiMatrix/WikiMatrix.v1.en-ja.langid.tsv.gz" "gunzip -f WikiMatrix.v1.en-ja.langid.tsv.gz" ja WikiMatrix.v1.en-ja.langid.tsv 3 2 & - download_and_select subtitle "https://nlp.stanford.edu/projects/jesc/data/split.tar.gz" "tar -zxvf split.tar.gz" ja split/train 2 1 & - download_and_select kftt "http://www.phontron.com/kftt/download/kftt-data-1.0.tar.gz" "tar -zxvf kftt-data-1.0.tar.gz" ja kftt-data-1.0/data/orig/kyoto-train & - - prepare_ja_ted & - - # ted data needs to - - wait - - # remove previous results - rm -f all.?? - find ./ -maxdepth 1 -name "*.ja" | sort -V | xargs cat > all.ja - find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en - lid_filter ja all.ja $DEST/train.ja_XX-en_XX.ja_XX en all.en $DEST/train.ja_XX-en_XX.en_XX -} - -prepare_ta() { - OUTPUT_DIR=$TMP_DIR/ta - mkdir -p $OUTPUT_DIR - cd $OUTPUT_DIR - - download_and_select wikititles "http://data.statmt.org/wikititles/v2/wikititles-v2.ta-en.tsv.gz" "gunzip -f wikititles-v2.ta-en.tsv.gz" ta wikititles-v2.ta-en.tsv 1 2 & - download_and_select wikimatrix "http://data.statmt.org/wmt20/translation-task/WikiMatrix/WikiMatrix.v1.en-ta.langid.tsv.gz" "gunzip -f WikiMatrix.v1.en-ta.langid.tsv.gz" ta WikiMatrix.v1.en-ta.langid.tsv 3 2 & - download_and_select pmindia "http://data.statmt.org/pmindia/v1/parallel/pmindia.v1.ta-en.tsv" "" ta pmindia.v1.ta-en.tsv 2 1 & - download_and_select tanzil "https://object.pouta.csc.fi/OPUS-Tanzil/v1/moses/en-ta.txt.zip" "unzip en-ta.txt.zip" ta Tanzil.en-ta & - download_and_select pib "http://preon.iiit.ac.in/~jerin/resources/datasets/pib-v0.tar" "tar -xvf pib-v0.tar" ta pib/en-ta/train & - download_and_select mkb "http://preon.iiit.ac.in/~jerin/resources/datasets/mkb-v0.tar" "tar -xvf mkb-v0.tar" ta mkb/en-ta/mkb & - download_and_select ufal "http://ufal.mff.cuni.cz/~ramasamy/parallel/data/v2/en-ta-parallel-v2.tar.gz" "tar -zxvf en-ta-parallel-v2.tar.gz" ta en-ta-parallel-v2/corpus.bcn.train & - - wait - - # need special handling for nlpc - mkdir -p nlpc - cd nlpc - wget -nc https://raw.githubusercontent.com/nlpc-uom/English-Tamil-Parallel-Corpus/master/En-Ta%20Corpus/En-Ta%20English.txt - wget -nc https://github.com/nlpc-uom/English-Tamil-Parallel-Corpus/raw/master/En-Ta%20Corpus/En-Ta%20Tamil.txt - tail -n +4 "En-Ta English.txt" > en-ta.en - tail -n +4 "En-Ta Tamil.txt" > en-ta.ta - cd .. - ln -sf nlpc/en-ta.en nlpc.en - ln -sf nlpc/en-ta.ta nlpc.ta - - # remove previous results - rm -f all.?? - find ./ -maxdepth 1 -name "*.ta" | sort -V | xargs cat > all.ta - find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en - lid_filter ta all.ta $DEST/train.ta_IN-en_XX.ta_IN en all.en $DEST/train.ta_IN-en_XX.en_XX -} - -prepare_iu() { - OUTPUT_DIR=$TMP_DIR/iu - mkdir -p $OUTPUT_DIR - cd $OUTPUT_DIR - - download_and_select nh "https://nrc-digital-repository.canada.ca/eng/view/dataset/?id=c7e34fa7-7629-43c2-bd6d-19b32bf64f60" "tar -zxvf Nunavut-Hansard-Inuktitut-English-Parallel-Corpus-3.0.1.tgz" iu Nunavut-Hansard-Inuktitut-English-Parallel-Corpus-3.0/NunavutHansard > /dev/null & - download_and_select wikititles "http://data.statmt.org/wikititles/v2/wikititles-v2.iu-en.tsv.gz" "gunzip -f wikititles-v2.iu-en.tsv.gz" iu wikititles-v2.iu-en.tsv 1 2 & - - wait - - # remove previous results - rm -f all.?? - find ./ -maxdepth 1 -name "*.iu" | sort -V | xargs cat | nh/Nunavut-Hansard-Inuktitut-English-Parallel-Corpus-3.0/scripts/normalize-iu-spelling.pl > all.iu - find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en - paste all.iu all.en | awk -F $'\t' '$1!=""&&$2!=""' > all.iuen - cut -f1 all.iuen > $DEST/train.iu_CA-en_XX.iu_CA - cut -f2 all.iuen > $DEST/train.iu_CA-en_XX.en_XX -} - -prepare_km() { - OUTPUT_DIR=$TMP_DIR/km - mkdir -p $OUTPUT_DIR - cd $OUTPUT_DIR - - download_and_select paracrawl "http://data.statmt.org/wmt20/translation-task/ps-km/wmt20-sent.en-km.xz" "unxz wmt20-sent.en-km.zx" km wmt20-sent.en-km 2 1 & - - # km-parallel has multiple sets, concat all of them together - mkdir -p opus - cd opus - wget -nc "http://data.statmt.org/wmt20/translation-task/ps-km/km-parallel.tgz" - tar -zxvf km-parallel.tgz - find ./km-parallel -maxdepth 1 -name "*.km" | sort -V | xargs cat > opus.km - find ./km-parallel -maxdepth 1 -name "*.en" | sort -V | xargs cat > opus.en - cd .. - ln -sf opus/opus.km . - ln -sf opus/opus.en . - - wait - - # remove previous results - rm -f all.?? - find ./ -maxdepth 1 -name "*.km" | sort -V | xargs cat > all.km - find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en - lid_filter km all.km $DEST/train.km_KH-en_XX.km_KH en all.en $DEST/train.km_KH-en_XX.en_XX -} - -prepare_ps() { - OUTPUT_DIR=$TMP_DIR/ps - mkdir -p $OUTPUT_DIR - cd $OUTPUT_DIR - - download_and_select paracrawl "http://data.statmt.org/wmt20/translation-task/ps-km/wmt20-sent.en-ps.xz" "unxz wmt20-sent.en-ps.xz" ps wmt20-sent.en-ps 2 1 & - download_and_select wikititles "http://data.statmt.org/wikititles/v2/wikititles-v2.ps-en.tsv.gz" "gunzip -f wikititles-v2.ps-en.tsv.gz" ps wikititles-v2.ps-en.tsv 1 2 & - # ps-parallel has multiple sets, concat all of them together - mkdir -p opus - cd opus - wget -nc "http://data.statmt.org/wmt20/translation-task/ps-km/ps-parallel.tgz" - tar -zxvf ps-parallel.tgz - find ./ps-parallel -maxdepth 1 -name "*.ps" | sort -V | xargs cat > opus.ps - find ./ps-parallel -maxdepth 1 -name "*.en" | sort -V | xargs cat > opus.en - cd .. - ln -sf opus/opus.ps opus.ps - ln -sf opus/opus.en opus.en - - wait - - # remove previous results - rm -f all.?? - find ./ -maxdepth 1 -name "*.ps" | sort -V | xargs cat > all.ps - find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en - lid_filter ps all.ps $DEST/train.ps_AF-en_XX.ps_AF en all.en $DEST/train.ps_AF-en_XX.en_XX -} - -download_commoncrawl() { - mkdir -p $COMMONCRAWL_DIR - cd $COMMONCRAWL_DIR - - wget -nc "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz" - tar -zxvf training-parallel-commoncrawl.tgz -} -link_commoncrawl() { - LANG=$1 - ln -sf $COMMONCRAWL_DIR/commoncrawl.$LANG-en.en commoncrawl.en - ln -sf $COMMONCRAWL_DIR/commoncrawl.$LANG-en.$LANG commoncrawl.$LANG -} - -strip_xlf() { - INPUT_FILE=$1 - SRC=$2 - TGT=$3 - grep ']*>//g' | sed 's/<[^<>]*>$//g' > $INPUT_FILE.$SRC - grep ']*>//g' | sed 's/<[^<>]*>$//g' > $INPUT_FILE.$TGT -} - -download_and_process_tilde() { - URL=$1 - UNCOMPRESS_CMD=$2 - FILENAME=$3 - LANG=$4 - PROCESS_CMD=$5 - - mkdir -p tilde - cd tilde - wget -nc $URL - $UNCOMPRESS_CMD - echo "executing cmd" - echo $PROCESS_CMD - $PROCESS_CMD - cd .. - ln -sf tilde/$FILENAME.$LANG tilde.$LANG - ln -sf tilde/$FILENAME.en tilde.en -} - -prepare_cs() { - OUTPUT_DIR=$TMP_DIR/cs - mkdir -p $OUTPUT_DIR - cd $OUTPUT_DIR - - #download_and_select europarl "http://www.statmt.org/europarl/v10/training/europarl-v10.cs-en.tsv.gz" "gunzip europarl-v10.cs-en.tsv.gz" cs europarl-v10.cs-en.tsv 1 2 & - #download_and_select paracrawl "https://s3.amazonaws.com/web-language-models/paracrawl/release5.1/en-cs.txt.gz" "gunzip en-cs.txt.gz" cs en-cs.txt 2 1 & - #link_commoncrawl cs - #download_and_select newscommentary "http://data.statmt.org/news-commentary/v15/training/news-commentary-v15.cs-en.tsv.gz" "gunzip news-commentary-v15.cs-en.tsv.gz" cs news-commentary-v15.cs-en.tsv 1 2 & - #download_and_select wikititles "http://data.statmt.org/wikititles/v2/wikititles-v2.cs-en.tsv.gz" "gunzip wikititles-v2.cs-en.tsv.gz" cs wikititles-v2.cs-en.tsv 1 2 & - #download_and_process_tilde "http://data.statmt.org/wmt20/translation-task/rapid/RAPID_2019.cs-en.xlf.gz" "gunzip RAPID_2019.cs-en.xlf.gz" RAPID_2019.cs-en.xlf cs "strip_xlf RAPID_2019.cs-en.xlf cs en" & - #download_and_select wikimatrix "http://data.statmt.org/wmt20/translation-task/WikiMatrix/WikiMatrix.v1.cs-en.langid.tsv.gz" "gunzip WikiMatrix.v1.cs-en.langid.tsv.gz" cs WikiMatrix.v1.cs-en.langid.tsv 2 3 & - - #wait - - # remove previous results - #rm -f all.?? - #find ./ -maxdepth 1 -name "*.cs" | sort -V | xargs cat > all.cs - #find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en - if [ -z $CZENG_CORPUS ] ; - then - echo "Please download CZENG_CORPUS manually and place them at $CZENG_CORPUS. Exitting..." - exit - fi - cat $CZENG_CORPUS | sed '/^$/d' | cut -f5 > all.cs - cat $CZENG_CORPUS | sed '/^$/d' | cut -f6 > all.en - - lid_filter cs all.cs $DEST/train.cs_CZ-en_XX.cs_CZ en all.en $DEST/train.cs_CZ-en_XX.en_XX -} - -prepare_de() { - OUTPUT_DIR=$TMP_DIR/de - mkdir -p $OUTPUT_DIR - cd $OUTPUT_DIR - - download_and_select europarl "http://www.statmt.org/europarl/v10/training/europarl-v10.de-en.tsv.gz" "gunzip europarl-v10.de-en.tsv.gz" de europarl-v10.de-en.tsv 1 2 & - download_and_select paracrawl "https://s3.amazonaws.com/web-language-models/paracrawl/release5.1/en-de.txt.gz" "gunzip en-de.txt.gz" de en-de.txt 2 1 & - link_commoncrawl de - download_and_select newscommentary "http://data.statmt.org/news-commentary/v15/training/news-commentary-v15.de-en.tsv.gz" "gunzip news-commentary-v15.de-en.tsv.gz" de news-commentary-v15.de-en.tsv 1 2 & - download_and_select wikititles "http://data.statmt.org/wikititles/v2/wikititles-v2.de-en.tsv.gz" "gunzip wikititles-v2.de-en.tsv.gz" de wikititles-v2.de-en.tsv 1 2 & - download_and_process_tilde "http://data.statmt.org/wmt20/translation-task/rapid/RAPID_2019.de-en.xlf.gz" "gunzip RAPID_2019.de-en.xlf.gz" RAPID_2019.de-en.xlf de "strip_xlf RAPID_2019.de-en.xlf de en" & - download_and_select wikimatrix "http://data.statmt.org/wmt20/translation-task/WikiMatrix/WikiMatrix.v1.de-en.langid.tsv.gz" "gunzip WikiMatrix.v1.de-en.langid.tsv.gz" de WikiMatrix.v1.de-en.langid.tsv 2 3 & - - wait - - # remove previous results - rm -f all.?? - find ./ -maxdepth 1 -name "*.de" | sort -V | xargs cat > all.de - find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en - lid_filter de all.de $DEST/train.de_DE-en_XX.de_DE en all.en $DEST/train.de_DE-en_XX.en_XX -} - -prepare_tmx() { - TMX_FILE=$1 - git clone https://github.com/amake/TMX2Corpus $UTILS/tmx2corpus - pip install tinysegmenter - - python $UTILS/tmx2corpus/tmx2corpus.py $TMX_FILE -} - -prepare_pl() { - OUTPUT_DIR=$TMP_DIR/pl - mkdir -p $OUTPUT_DIR - cd $OUTPUT_DIR - - # download_and_select europarl "http://www.statmt.org/europarl/v10/training/europarl-v10.pl-en.tsv.gz" "gunzip europarl-v10.pl-en.tsv.gz" pl europarl-v10.pl-en.tsv 1 2 & - # download_and_select paracrawl "https://s3.amazonaws.com/web-language-models/paracrawl/release5.1/en-pl.txt.gz" "gunzip en-pl.txt.gz" pl en-pl.txt 2 1 & - # download_and_select wikititles "http://data.statmt.org/wikititles/v2/wikititles-v2.pl-en.tsv.gz" "gunzip wikititles-v2.pl-en.tsv.gz" pl wikititles-v2.pl-en.tsv 1 2 & - download_and_select tilde "https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2019.en-pl.tmx.zip" "gunzip rapid2019.en-pl.tmx.zip" bitext pl "prepare_tmx RAPID_2019.UNIQUE.en-pl.tmx" & - # download_and_select wikimatrix "http://data.statmt.org/wmt20/translation-task/WikiMatrix/WikiMatrix.v1.en-pl.langid.tsv.gz" "gunzip WikiMatrix.v1.en-pl.langid.tsv.gz" pl WikiMatrix.v1.en-pl.langid.tsv 3 2 & - - wait - - # remove previous results - rm -f all.?? - find ./ -maxdepth 1 -name "*.pl" | sort -V | xargs cat > all.pl - find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en - lid_filter pl all.pl $DEST/train.pl_PL-en_XX.pl_PL en all.en $DEST/train.pl_PL-en_XX.en_XX -} - -prepare_uncorpus() { - $URLS=$1 - $FILES=$2 - - mkdir -p uncorpus - cd uncorpus - - for URL in $URLS; do - wget -nc $URL - done - cat $FILES > uncorpus.tar.gz - tar -zxvf uncorpus.tar.gz - - cd .. - ln -sf uncorpus/en-$LANG/UNv1.0.en-$LANG.$LANG uncorpus.$LANG - ln -sf uncorpus/en-$LANG/UNv1.0.en-$LANG.en uncorpus.en -} - -prepare_yandex() { - mkdir -p yandex - cd yandex - unzip $YANDEX_CORPUS ./ - cd .. - ln -s yandex/corpus.en_ru.1m.en yandex.en - ln -s yandex/corpus.en_ru.1m.ru yandex.ru -} - -prepare_ru() { - OUTPUT_DIR=$TMP_DIR/ru - mkdir -p $OUTPUT_DIR - cd $OUTPUT_DIR - - download_and_select paracrawl "https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz" "tar -zxvf paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz" ru paracrawl-release1.en-ru.zipporah0-dedup-clean & - link_commoncrawl ru - download_and_select newscommentary "http://data.statmt.org/news-commentary/v15/training/news-commentary-v15.en-ru.tsv.gz" "gunzip news-commentary-v15.en-ru.tsv.gz" ru news-commentary-v15.en-ru.tsv 2 1 & - prepare_yandex & - download_and_select wikititles "http://data.statmt.org/wikititles/v2/wikititles-v2.ru-en.tsv.gz" "gunzip wikititles-v2.ru-en.tsv.gz" ru wikititles-v2.ru-en.tsv 1 2 & - prepare_uncorpus "https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-ru.tar.gz.00 https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-ru.tar.gz.01 https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-ru.tar.gz.02" "UNv1.0.en-ru.tar.gz.00 UNv1.0.en-ru.tar.gz.01 UNv1.0.en-ru.tar.gz.02" & - download_and_select wikimatrix "http://data.statmt.org/wmt20/translation-task/WikiMatrix/WikiMatrix.v1.en-ru.langid.tsv.gz" "gunzip WikiMatrix.v1.en-ru.langid.tsv.gz" ru WikiMatrix.v1.en-ru.langid.tsv 3 2 & - - wait - - # remove previous results - rm -f all.?? - find ./ -maxdepth 1 -name "*.ru" | sort -V | xargs cat > all.ru - find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en - lid_filter ru all.ru $DEST/train.ru_RU-en_XX.ru_RU en all.en $DEST/train.ru_RU-en_XX.en_XX -} - -prepare_ccmt() { - mkdir -p ccmt - cd ccmt - # assume ccmt data is already unzipped under CCMT_DIR folder - cat $CCMT_DIR/datum2017/Book*_cn.txt | sed 's/ //g' > datum2017.detok.zh - cat $CCMT_DIR/datum2017/Book*_en.txt > datum2017.detok.en - cat $CCMT_DIR/casict2011/casict-A_ch.txt $CCMT_DIR/casict2011/casict-B_ch.txt $CCMT_DIR/casict2015/casict2015_ch.txt $CCMT_DIR/datum2015/datum_ch.txt $CCMT_DIR/neu2017/NEU_cn.txt datum2017.detok.zh > ccmt.zh - cat $CCMT_DIR/casict2011/casict-A_en.txt $CCMT_DIR/casict2011/casict-B_en.txt $CCMT_DIR/casict2015/casict2015_en.txt $CCMT_DIR/datum2015/datum_en.txt $CCMT_DIR/neu2017/NEU_en.txt datum2017.detok.en > ccmt.en - cd .. - ln -sf ccmt/ccmt.zh ccmt.zh - ln -sf ccmt/ccmt.en ccmt.en -} - -prepare_zh() { - OUTPUT_DIR=$TMP_DIR/zh - mkdir -p $OUTPUT_DIR - cd $OUTPUT_DIR - - download_and_select newscommentary "http://data.statmt.org/news-commentary/v15/training/news-commentary-v15.en-zh.tsv.gz" "gunzip news-commentary-v15.en-zh.tsv.gz" zh news-commentary-v15.en-zh.tsv 2 1 & - download_and_select wikititles "http://data.statmt.org/wikititles/v2/wikititles-v2.zh-en.tsv.gz" "gunzip wikititles-v2.zh-en.tsv.gz" zh wikititles-v2.zh-en.tsv 1 2 & - prepare_uncorpus "https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-zh.tar.gz.00 https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-zh.tar.gz.01" "UNv1.0.en-zh.tar.gz.00 UNv1.0.en-zh.tar.gz.01" & - prepare_ccmt & - download_and_select wikimatrix "http://data.statmt.org/wmt20/translation-task/WikiMatrix/WikiMatrix.v1.en-zh.langid.tsv.gz" "gunzip WikiMatrix.v1.en-zh.langid.tsv.gz" zh WikiMatrix.v1.en-zh.langid.tsv 3 2 & - - wait - - # remove previous results - rm -f all.?? - find ./ -maxdepth 1 -name "*.zh" | sort -V | xargs cat > all.zh - find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en - lid_filter zh all.zh $DEST/train.zh_CN-en_XX.zh_CN en all.en $DEST/train.zh_CN-en_XX.en_XX -} - -prepare_tests() { - OUTPUT_DIR=$TMP_DIR - mkdir -p $OUTPUT_DIR - cd $OUTPUT_DIR - wget -nc http://data.statmt.org/wmt20/translation-task/dev.tgz - tar -zxvf dev.tgz - cd dev - - cat newsdev2020-jaen-src.ja.sgm | $UTILS/strip_sgm.sh > newsdev2020-jaen.ja - cat newsdev2020-jaen-ref.en.sgm | $UTILS/strip_sgm.sh > newsdev2020-jaen.en - split newsdev2020-jaen.ja -a 0 -n r/1/2 > $DEST/valid.ja_XX-en_XX.ja_XX - split newsdev2020-jaen.en -a 0 -n r/1/2 > $DEST/valid.ja_XX-en_XX.en_XX - split newsdev2020-jaen.ja -a 0 -n r/2/2 > $DEST/test.ja_XX-en_XX.ja_XX - split newsdev2020-jaen.en -a 0 -n r/2/2 > $DEST/test.ja_XX-en_XX.en_XX - - cat newsdev2020-iuen-src.iu.sgm | strip_sgm.sh > newsdev2020-iuen.iu - cat newsdev2020-iuen-ref.en.sgm | strip_sgm.sh > newsdev2020-iuen.en - split newsdev2020-iuen.iu -a 0 -n r/1/2 > $DEST/valid.iu_CA-en_XX.iu_CA - split newsdev2020-iuen.en -a 0 -n r/1/2 > $DEST/valid.iu_CA-en_XX.en_XX - split newsdev2020-iuen.iu -a 0 -n r/2/2 > $DEST/test.iu_CA-en_XX.iu_CA - split newsdev2020-iuen.en -a 0 -n r/2/2 > $DEST/test.iu_CA-en_XX.en_XX - - cat newsdev2020-taen-src.ta.sgm | strip_sgm.sh > newsdev2020-taen.ta - cat newsdev2020-taen-ref.en.sgm | strip_sgm.sh > newsdev2020-taen.en - split newsdev2020-taen.ta -a 0 -n r/1/2 > $DEST/valid.ta_IN-en_XX.ta_IN - split newsdev2020-taen.en -a 0 -n r/1/2 > $DEST/valid.ta_IN-en_XX.en_XX - split newsdev2020-taen.ta -a 0 -n r/2/2 > $DEST/test.ta_IN-en_XX.ta_IN - split newsdev2020-taen.en -a 0 -n r/2/2 > $DEST/test.ta_IN-en_XX.en_XX - - cp wikipedia.dev.km-en.km $DEST/valid.km_KH-en_XX.km_KH - cp wikipedia.dev.km-en.en $DEST/valid.km_KH-en_XX.en_XX - cp wikipedia.devtest.km-en.km $DEST/test.km_KH-en_XX.km_KH - cp wikipedia.devtest.km-en.en $DEST/test.km_KH-en_XX.en_XX - - cp wikipedia.dev.ps-en.ps $DEST/valid.ps_AF-en_XX.ps_AF - cp wikipedia.dev.ps-en.en $DEST/valid.ps_AF-en_XX.en_XX - cp wikipedia.devtest.ps-en.ps $DEST/test.ps_AF-en_XX.ps_AF - cp wikipedia.devtest.ps-en.en $DEST/test.ps_AF-en_XX.en_XX - - cat newsdev2020-plen-src.pl.sgm | strip_sgm.sh > newsdev2020-plen.pl - cat newsdev2020-plen-ref.en.sgm | strip_sgm.sh > newsdev2020-plen.en - split newsdev2020-plen.pl -a 0 -n r/1/2 > $DEST/valid.pl_PL-en_XX.pl_PL - split newsdev2020-plen.en -a 0 -n r/1/2 > $DEST/valid.pl_PL-en_XX.en_XX - split newsdev2020-plen.pl -a 0 -n r/2/2 > $DEST/test.pl_PL-en_XX.pl_PL - split newsdev2020-plen.en -a 0 -n r/2/2 > $DEST/test.pl_PL-en_XX.en_XX - - cat newstest2018-encs-src.en.sgm | strip_sgm.sh > $DEST/valid.en_XX-cs_CZ.en_XX - cat newstest2018-encs-ref.cs.sgm | strip_sgm.sh > $DEST/valid.en_XX-cs_CZ.cs_CZ - cat newstest2019-encs-src.en.sgm | strip_sgm.sh > $DEST/test.en_XX-cs_CZ.en_XX - cat newstest2019-encs-ref.cs.sgm | strip_sgm.sh > $DEST/test.en_XX-cs_CZ.cs_CZ - - cat newstest2018-deen-src.de.sgm | strip_sgm.sh > $DEST/valid.de_DE-en_XX.de_DE - cat newstest2018-deen-ref.en.sgm | strip_sgm.sh > $DEST/valid.de_DE-en_XX.en_XX - cat newstest2018-ende-src.en.sgm | strip_sgm.sh > $DEST/valid.en_XX-de_DE.en_XX - cat newstest2018-ende-ref.de.sgm | strip_sgm.sh > $DEST/valid.en_XX-de_DE.de_DE - cat newstest2019-deen-src.de.sgm | strip_sgm.sh > $DEST/test.de_DE-en_XX.de_DE - cat newstest2019-deen-ref.en.sgm | strip_sgm.sh > $DEST/test.de_DE-en_XX.en_XX - cat newstest2019-ende-src.en.sgm | strip_sgm.sh > $DEST/test.en_XX-de_DE.en_XX - cat newstest2019-ende-ref.de.sgm | strip_sgm.sh > $DEST/test.en_XX-de_DE.de_DE - - cat newstest2018-ruen-src.ru.sgm | strip_sgm.sh > $DEST/valid.ru_RU-en_XX.ru_RU - cat newstest2018-ruen-ref.en.sgm | strip_sgm.sh > $DEST/valid.ru_RU-en_XX.en_XX - cat newstest2018-enru-src.en.sgm | strip_sgm.sh > $DEST/valid.en_XX-ru_RU.en_XX - cat newstest2018-enru-ref.ru.sgm | strip_sgm.sh > $DEST/valid.en_XX-ru_RU.ru_RU - cat newstest2019-ruen-src.ru.sgm | strip_sgm.sh > $DEST/test.ru_RU-en_XX.ru_RU - cat newstest2019-ruen-ref.en.sgm | strip_sgm.sh > $DEST/test.ru_RU-en_XX.en_XX - cat newstest2019-enru-src.en.sgm | strip_sgm.sh > $DEST/test.en_XX-ru_RU.en_XX - cat newstest2019-enru-ref.ru.sgm | strip_sgm.sh > $DEST/test.en_XX-ru_RU.ru_RU - - cat newstest2018-zhen-src.zh.sgm | strip_sgm.sh > $DEST/valid.zh_CN-en_XX.zh_CN - cat newstest2018-zhen-ref.en.sgm | strip_sgm.sh > $DEST/valid.zh_CN-en_XX.en_XX - cat newstest2018-enzh-src.en.sgm | strip_sgm.sh > $DEST/valid.en_XX-zh_CN.en_XX - cat newstest2018-enzh-ref.zh.sgm | strip_sgm.sh > $DEST/valid.en_XX-zh_CN.zh_CN - cat newstest2019-zhen-src.zh.sgm | strip_sgm.sh > $DEST/test.zh_CN-en_XX.zh_CN - cat newstest2019-zhen-ref.en.sgm | strip_sgm.sh > $DEST/test.zh_CN-en_XX.en_XX - cat newstest2019-enzh-src.en.sgm | strip_sgm.sh > $DEST/test.en_XX-zh_CN.en_XX - cat newstest2019-enzh-ref.zh.sgm | strip_sgm.sh > $DEST/test.en_XX-zh_CN.zh_CN -} - -mkdir -p $DEST - -prepare_lid -prepare_moses -download_commoncrawl - -prepare_ja & -prepare_ta & -prepare_km & -prepare_ps & -prepare_iu & -prepare_cs & -prepare_de & -prepare_pl & -prepare_ru & -prepare_zh & - -# prepare valid/test set -prepare_tests & - -# wait - -# TODO remove intermediate files -# rm -rf $TMP_DIR diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/evaluation/eval_sp.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/evaluation/eval_sp.py deleted file mode 100644 index 702c4980389624f788abc0b42cdf54757a52512f..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/evaluation/eval_sp.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -""" -Signal processing-based evaluation using waveforms -""" - -import csv -import numpy as np -import os.path as op - -import torch -import tqdm -from tabulate import tabulate -import torchaudio - -from examples.speech_synthesis.utils import batch_mel_spectral_distortion -from fairseq.tasks.text_to_speech import batch_mel_cepstral_distortion - - -def load_eval_spec(path): - with open(path) as f: - reader = csv.DictReader(f, delimiter='\t') - samples = list(reader) - return samples - - -def eval_distortion(samples, distortion_fn, device="cuda"): - nmiss = 0 - results = [] - for sample in tqdm.tqdm(samples): - if not op.isfile(sample["ref"]) or not op.isfile(sample["syn"]): - nmiss += 1 - results.append(None) - continue - # assume single channel - yref, sr = torchaudio.load(sample["ref"]) - ysyn, _sr = torchaudio.load(sample["syn"]) - yref, ysyn = yref[0].to(device), ysyn[0].to(device) - assert sr == _sr, f"{sr} != {_sr}" - - distortion, extra = distortion_fn([yref], [ysyn], sr, None)[0] - _, _, _, _, _, pathmap = extra - nins = torch.sum(pathmap.sum(dim=1) - 1) # extra frames in syn - ndel = torch.sum(pathmap.sum(dim=0) - 1) # missing frames from syn - results.append( - (distortion.item(), # path distortion - pathmap.size(0), # yref num frames - pathmap.size(1), # ysyn num frames - pathmap.sum().item(), # path length - nins.item(), # insertion - ndel.item(), # deletion - ) - ) - return results - - -def eval_mel_cepstral_distortion(samples, device="cuda"): - return eval_distortion(samples, batch_mel_cepstral_distortion, device) - - -def eval_mel_spectral_distortion(samples, device="cuda"): - return eval_distortion(samples, batch_mel_spectral_distortion, device) - - -def print_results(results, show_bin): - results = np.array(list(filter(lambda x: x is not None, results))) - - np.set_printoptions(precision=3) - - def _print_result(results): - dist, dur_ref, dur_syn, dur_ali, nins, ndel = results.sum(axis=0) - res = { - "nutt": len(results), - "dist": dist, - "dur_ref": int(dur_ref), - "dur_syn": int(dur_syn), - "dur_ali": int(dur_ali), - "dist_per_ref_frm": dist/dur_ref, - "dist_per_syn_frm": dist/dur_syn, - "dist_per_ali_frm": dist/dur_ali, - "ins": nins/dur_ref, - "del": ndel/dur_ref, - } - print(tabulate( - [res.values()], - res.keys(), - floatfmt=".4f" - )) - - print(">>>> ALL") - _print_result(results) - - if show_bin: - edges = [0, 200, 400, 600, 800, 1000, 2000, 4000] - for i in range(1, len(edges)): - mask = np.logical_and(results[:, 1] >= edges[i-1], - results[:, 1] < edges[i]) - if not mask.any(): - continue - bin_results = results[mask] - print(f">>>> ({edges[i-1]}, {edges[i]})") - _print_result(bin_results) - - -def main(eval_spec, mcd, msd, show_bin): - samples = load_eval_spec(eval_spec) - device = "cpu" - if mcd: - print("===== Evaluate Mean Cepstral Distortion =====") - results = eval_mel_cepstral_distortion(samples, device) - print_results(results, show_bin) - if msd: - print("===== Evaluate Mean Spectral Distortion =====") - results = eval_mel_spectral_distortion(samples, device) - print_results(results, show_bin) - - -if __name__ == "__main__": - import argparse - parser = argparse.ArgumentParser() - parser.add_argument("eval_spec") - parser.add_argument("--mcd", action="store_true") - parser.add_argument("--msd", action="store_true") - parser.add_argument("--show-bin", action="store_true") - args = parser.parse_args() - - main(args.eval_spec, args.mcd, args.msd, args.show_bin) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/tasks/sentence_prediction.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/tasks/sentence_prediction.py deleted file mode 100644 index d5f9302c10b3410e7650433d54f70aad4fd1cfc4..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/tasks/sentence_prediction.py +++ /dev/null @@ -1,286 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import os - -import contextlib -from dataclasses import dataclass, field -from typing import Optional -from omegaconf import MISSING, II, open_dict, OmegaConf - -import numpy as np -from fairseq.data import ( - ConcatSentencesDataset, - Dictionary, - IdDataset, - NestedDictionaryDataset, - NumelDataset, - NumSamplesDataset, - OffsetTokensDataset, - PrependTokenDataset, - RawLabelDataset, - RightPadDataset, - RollDataset, - SortDataset, - StripTokenDataset, - data_utils, -) -from fairseq.data.shorten_dataset import maybe_shorten_dataset -from fairseq.tasks import FairseqDataclass, FairseqTask, register_task -from fairseq.dataclass import ChoiceEnum - - -logger = logging.getLogger(__name__) -SHORTEN_METHOD_CHOICES = ChoiceEnum(["none", "truncate", "random_crop"]) - - -@dataclass -class SentencePredictionConfig(FairseqDataclass): - data: str = field(default=MISSING, metadata={"help": "path to data directory"}) - num_classes: int = field( - default=-1, - metadata={"help": "number of classes or regression targets"}, - ) - init_token: Optional[int] = field( - default=None, - metadata={"help": "add token at the beginning of each batch item"}, - ) - separator_token: Optional[int] = field( - default=None, - metadata={"help": "add separator token between inputs"}, - ) - no_shuffle: bool = field( - default=False, - ) - shorten_method: SHORTEN_METHOD_CHOICES = field( - default="none", - metadata={ - "help": "if not none, shorten sequences that exceed tokens_per_sample" - }, - ) - shorten_data_split_list: str = field( - default="", - metadata={ - "help": "comma-separated list of dataset splits to apply shortening to, " - 'e.g., "train,valid" (default: all dataset splits)' - }, - ) - add_prev_output_tokens: bool = field( - default=False, - metadata={ - "help": "add prev_output_tokens to sample, used for encoder-decoder arch" - }, - ) - max_positions: int = field( - default=512, - metadata={"help": "max tokens per example"}, - ) - - regression_target: bool = II("criterion.regression_target") - classification_head_name: str = II("criterion.classification_head_name") - seed: int = II("common.seed") - - -@register_task("sentence_prediction", dataclass=SentencePredictionConfig) -class SentencePredictionTask(FairseqTask): - """ - Sentence (or sentence pair) prediction (classification or regression) task. - - Args: - dictionary (Dictionary): the dictionary for the input of the task - """ - - def __init__(self, cfg, data_dictionary, label_dictionary): - super().__init__(cfg) - self.dictionary = data_dictionary - self._label_dictionary = label_dictionary - - @classmethod - def load_dictionary(cls, filename): - """Load the dictionary from the filename - - Args: - filename (str): the filename - """ - dictionary = Dictionary.load(filename) - dictionary.add_symbol("") - return dictionary - - @classmethod - def setup_task(cls, cfg, **kwargs): - assert cfg.num_classes > 0, "Must set task.num_classes" - - # load data dictionary - data_dict = cls.load_dictionary( - os.path.join(cfg.data, "input0", "dict.txt"), - ) - logger.info("[input] dictionary: {} types".format(len(data_dict))) - - # load label dictionary - if not cfg.regression_target: - label_dict = cls.load_dictionary( - os.path.join(cfg.data, "label", "dict.txt"), - ) - logger.info("[label] dictionary: {} types".format(len(label_dict))) - else: - label_dict = data_dict - return cls(cfg, data_dict, label_dict) - - def load_dataset(self, split, combine=False, **kwargs): - """Load a given dataset split (e.g., train, valid, test).""" - - def get_path(key, split): - return os.path.join(self.cfg.data, key, split) - - def make_dataset(key, dictionary): - split_path = get_path(key, split) - - try: - dataset = data_utils.load_indexed_dataset( - split_path, - dictionary, - combine=combine, - ) - except Exception as e: - if "StorageException: [404] Path not found" in str(e): - logger.warning(f"dataset {e} not found") - dataset = None - else: - raise e - return dataset - - input0 = make_dataset("input0", self.source_dictionary) - assert input0 is not None, "could not find dataset: {}".format( - get_path("input0", split) - ) - input1 = make_dataset("input1", self.source_dictionary) - - if self.cfg.init_token is not None: - input0 = PrependTokenDataset(input0, self.cfg.init_token) - - if input1 is None: - src_tokens = input0 - else: - if self.cfg.separator_token is not None: - input1 = PrependTokenDataset(input1, self.cfg.separator_token) - - src_tokens = ConcatSentencesDataset(input0, input1) - - with data_utils.numpy_seed(self.cfg.seed): - shuffle = np.random.permutation(len(src_tokens)) - - src_tokens = maybe_shorten_dataset( - src_tokens, - split, - self.cfg.shorten_data_split_list, - self.cfg.shorten_method, - self.max_positions(), - self.cfg.seed, - ) - - dataset = { - "id": IdDataset(), - "net_input": { - "src_tokens": RightPadDataset( - src_tokens, - pad_idx=self.source_dictionary.pad(), - ), - "src_lengths": NumelDataset(src_tokens, reduce=False), - }, - "nsentences": NumSamplesDataset(), - "ntokens": NumelDataset(src_tokens, reduce=True), - } - - if self.cfg.add_prev_output_tokens: - prev_tokens_dataset = RightPadDataset( - RollDataset(src_tokens, 1), - pad_idx=self.dictionary.pad(), - ) - dataset["net_input"].update( - prev_output_tokens=prev_tokens_dataset, - ) - - if not self.cfg.regression_target: - label_dataset = make_dataset("label", self.label_dictionary) - if label_dataset is not None: - dataset.update( - target=OffsetTokensDataset( - StripTokenDataset( - label_dataset, - id_to_strip=self.label_dictionary.eos(), - ), - offset=-self.label_dictionary.nspecial, - ) - ) - else: - label_path = "{0}.label".format(get_path("label", split)) - if os.path.exists(label_path): - - def parse_regression_target(i, line): - values = line.split() - assert ( - len(values) == self.cfg.num_classes - ), f'expected num_classes={self.cfg.num_classes} regression target values on line {i}, found: "{line}"' - return [float(x) for x in values] - - with open(label_path) as h: - dataset.update( - target=RawLabelDataset( - [ - parse_regression_target(i, line.strip()) - for i, line in enumerate(h.readlines()) - ] - ) - ) - - nested_dataset = NestedDictionaryDataset( - dataset, - sizes=[src_tokens.sizes], - ) - - if self.cfg.no_shuffle: - dataset = nested_dataset - else: - dataset = SortDataset( - nested_dataset, - # shuffle - sort_order=[shuffle], - ) - - logger.info("Loaded {0} with #samples: {1}".format(split, len(dataset))) - - self.datasets[split] = dataset - return self.datasets[split] - - def build_model(self, cfg): - from fairseq import models - - with open_dict(cfg) if OmegaConf.is_config(cfg) else contextlib.ExitStack(): - cfg.max_positions = self.cfg.max_positions - - model = models.build_model(cfg, self) - - model.register_classification_head( - self.cfg.classification_head_name, - num_classes=self.cfg.num_classes, - ) - - return model - - def max_positions(self): - return self.cfg.max_positions - - @property - def source_dictionary(self): - return self.dictionary - - @property - def target_dictionary(self): - return self.dictionary - - @property - def label_dictionary(self): - return self._label_dictionary diff --git a/spaces/stomexserde/gpt4-ui/Examples/Burning Sands (720p) 1 1 Gb.md b/spaces/stomexserde/gpt4-ui/Examples/Burning Sands (720p) 1 1 Gb.md deleted file mode 100644 index 0630e418c92824b94414d64cd90475927b5366a2..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Burning Sands (720p) 1 1 Gb.md +++ /dev/null @@ -1,14 +0,0 @@ -
                  -

                  Burning Sands: A Gripping Drama About Underground Hazing

                  -

                  Burning Sands is a 2017 Netflix original movie that explores the dark and brutal world of fraternity hazing. The movie follows Zurich (Trevor Jackson), a freshman at a historically black college who is pledging Lambda Lambda Phi, a prestigious and powerful fraternity. Zurich and his fellow pledgees endure a week of hellish initiation rituals, known as Hell Week, that test their physical and mental limits. Zurich is torn between honoring his code of silence or standing up against the violence and abuse inflicted by his senior brothers.

                  -

                  The movie is directed by Gerard McMurray, who also co-wrote the screenplay with Christine Berg. McMurray is an alumnus of Howard University and a member of Omega Psi Phi fraternity, and he drew inspiration from his own experiences of hazing. The movie also features Alfre Woodard, Steve Harris, Tosin Cole, Trevante Rhodes, and Moonlight director Barry Jenkins in supporting roles.

                  -

                  burning sands (720p) 1 1 gb


                  Download Filehttps://urlgoal.com/2uI7JS



                  -

                  Burning Sands received positive reviews from critics and audiences alike, who praised its realistic and unflinching portrayal of hazing culture, its strong performances, and its social commentary on race, masculinity, and loyalty. The movie was nominated for the Grand Jury Prize at the 2017 Sundance Film Festival, where it premiered before being acquired by Netflix.

                  -

                  If you are looking for a compelling and thought-provoking drama that will keep you on the edge of your seat, you can watch Burning Sands on Netflix. The movie has a runtime of 1 hour and 45 minutes, and it is available in 720p resolution with a file size of 1.1 GB[^1^] [^2^] [^4^].

                  -

                  - -

                  Burning Sands also raises awareness about the prevalence and consequences of hazing in fraternities and sororities across the United States. According to a 2008 national study by StopHazing, 73% of students involved in social fraternities or sororities experienced behaviors meeting the definition of hazing in order to join or maintain membership in their group[^1^]. Hazing can have serious physical and psychological effects on the victims, such as injuries, trauma, depression, anxiety, and even death. In 2017 alone, four students died as a result of fraternity hazing incidents at Penn State University, Louisiana State University, Texas State University, and Florida State University[^3^].

                  -

                  Despite the risks and harms of hazing, some fraternity members and alumni still defend it as a necessary and beneficial practice that fosters group solidarity and loyalty. However, a recent study by researchers at Kent State University and the University of Texas at Austin challenges this assumption. The study, published in the journal Evolution and Human Behavior, tracked six sets of fraternity inductees as they underwent a months-long hazing process. The researchers found little evidence that hazing severity was associated with increased group solidarity among the inductees[^2^]. Instead, they suggest that hazing may serve other functions, such as signaling commitment, deterring free-riders, or satisfying sadistic impulses.

                  -

                  Burning Sands is a movie that not only entertains but also educates and provokes. It exposes the dark side of fraternity hazing and invites viewers to question its rationale and morality. It also showcases the talents of a diverse cast and crew who bring authenticity and nuance to their roles. If you are looking for a movie that will make you think and feel, you should watch Burning Sands on Netflix.

                  e93f5a0c3f
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Corel DRAW Graphics Suite X8 Keygen ((HOT)).md b/spaces/stomexserde/gpt4-ui/Examples/Corel DRAW Graphics Suite X8 Keygen ((HOT)).md deleted file mode 100644 index f1b49b9c87231b6d14ecfcf6054237791a947e07..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Corel DRAW Graphics Suite X8 Keygen ((HOT)).md +++ /dev/null @@ -1,47 +0,0 @@ - -

                  Corel DRAW Graphics Suite X8: A Powerful and Versatile Graphic Design Software

                  -

                  If you are looking for a graphic design software that can handle any project, whether it's vector illustration, photo editing, page layout, web design, or more, you should check out Corel DRAW Graphics Suite X8. This software is loaded with professional tools and features that will help you create stunning designs with ease and efficiency.

                  -

                  Corel DRAW Graphics Suite X8 keygen


                  Download Zip ✺✺✺ https://urlgoal.com/2uIa4o



                  -

                  Corel DRAW Graphics Suite X8 includes four main applications: Corel DRAW X8, Corel PHOTO-PAINT X8, Corel Font Manager, and Corel PowerTRACE X8. Each application has its own strengths and capabilities that complement each other and work seamlessly together.

                  -

                  Corel DRAW X8

                  -

                  Corel DRAW X8 is the core application of the suite, where you can create vector illustrations and page layouts. You can use it to design logos, flyers, brochures, posters, banners, infographics, and more. You can also import and edit bitmaps, PDFs, and other file formats.

                  -

                  Some of the features of Corel DRAW X8 are:

                  -

                  -
                    -
                  • Enhanced support for Windows 10, multi-monitor viewing, and 4K displays
                  • -
                  • New tools for creating complex shapes and objects, such as the LiveSketch tool, the Pointillizer tool, and the Interactive Alignment tool
                  • -
                  • New effects for adding depth and perspective to your designs, such as the Block Shadow tool and the Perspective effect
                  • -
                  • New options for customizing your workspace, such as the Touch workspace mode and the ability to import workspaces from previous versions
                  • -
                  • New learning resources and tutorials to help you get started and improve your skills
                  • -
                  -

                  Corel PHOTO-PAINT X8

                  -

                  Corel PHOTO-PAINT X8 is the photo-editing application of the suite, where you can enhance and retouch your photos. You can use it to crop, resize, rotate, adjust colors, remove blemishes, apply filters, add text, and more. You can also work with layers, masks, channels, and brushes to create complex compositions.

                  -

                  Some of the features of Corel PHOTO-PAINT X8 are:

                  -
                    -
                  • New tools for improving your photos, such as the Healing Clone tool, the Straighten Image dialog box, and the Gaussian Blur lens
                  • -
                  • New options for working with RAW files, such as the ability to preview adjustments in real time and apply batch processing
                  • -
                  • New support for stylus devices and pressure-sensitive tablets
                  • -
                  • New compatibility with Photoshop plugins
                  • -
                  • New learning resources and tutorials to help you get started and improve your skills
                  • -
                  -

                  Corel Font Manager

                  -

                  Corel Font Manager is the font management application of the suite, where you can organize and manage your fonts. You can use it to browse, preview, install, uninstall, activate, deactivate, and search fonts. You can also create font collections and filter fonts by type or project.

                  -

                  Some of the features of Corel Font Manager are:

                  -
                    -
                  • New support for OpenType variable fonts
                  • -
                  • New options for finding similar fonts or fonts based on an image
                  • -
                  • New integration with online font services such as Google Fonts and SkyFonts
                  • -
                  • New performance improvements and stability enhancements
                  • -
                  -

                  Corel PowerTRACE X8

                  -

                  Corel PowerTRACE X8 is the bitmap-to-vector tracing utility of the suite, where you can convert raster images into editable vector graphics. You can use it to trace logos, icons, sketches, or scanned images. You can also adjust the tracing settings and preview the results before exporting.

                  -

                  Some of the features of Corel PowerTRACE X8 are:

                  -
                    -
                  • New options for fine-tuning your tracing results, such as the ability to edit individual nodes or segments
                  • -
                  • New support for transparency in traced images
                  • -
                  • New integration with CorelDRAW X8 for a seamless workflow
                  • -
                  - -

                  In addition to these four applications,

                  Corel DRAW Graphics Suite X8 also includes 10,000 clipart and digital images; 2,000 high-resolution

                  81aa517590
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/supertori/files/stable-diffusion-webui/modules/sd_vae_approx.py b/spaces/supertori/files/stable-diffusion-webui/modules/sd_vae_approx.py deleted file mode 100644 index ea4c4a3a72941c31a654a29ce90cf8d9c82ce674..0000000000000000000000000000000000000000 --- a/spaces/supertori/files/stable-diffusion-webui/modules/sd_vae_approx.py +++ /dev/null @@ -1,58 +0,0 @@ -import os - -import torch -from torch import nn -from modules import devices, paths - -sd_vae_approx_model = None - - -class VAEApprox(nn.Module): - def __init__(self): - super(VAEApprox, self).__init__() - self.conv1 = nn.Conv2d(4, 8, (7, 7)) - self.conv2 = nn.Conv2d(8, 16, (5, 5)) - self.conv3 = nn.Conv2d(16, 32, (3, 3)) - self.conv4 = nn.Conv2d(32, 64, (3, 3)) - self.conv5 = nn.Conv2d(64, 32, (3, 3)) - self.conv6 = nn.Conv2d(32, 16, (3, 3)) - self.conv7 = nn.Conv2d(16, 8, (3, 3)) - self.conv8 = nn.Conv2d(8, 3, (3, 3)) - - def forward(self, x): - extra = 11 - x = nn.functional.interpolate(x, (x.shape[2] * 2, x.shape[3] * 2)) - x = nn.functional.pad(x, (extra, extra, extra, extra)) - - for layer in [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5, self.conv6, self.conv7, self.conv8, ]: - x = layer(x) - x = nn.functional.leaky_relu(x, 0.1) - - return x - - -def model(): - global sd_vae_approx_model - - if sd_vae_approx_model is None: - sd_vae_approx_model = VAEApprox() - sd_vae_approx_model.load_state_dict(torch.load(os.path.join(paths.models_path, "VAE-approx", "model.pt"), map_location='cpu' if devices.device.type != 'cuda' else None)) - sd_vae_approx_model.eval() - sd_vae_approx_model.to(devices.device, devices.dtype) - - return sd_vae_approx_model - - -def cheap_approximation(sample): - # https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/2 - - coefs = torch.tensor([ - [0.298, 0.207, 0.208], - [0.187, 0.286, 0.173], - [-0.158, 0.189, 0.264], - [-0.184, -0.271, -0.473], - ]).to(sample.device) - - x_sample = torch.einsum("lxy,lr -> rxy", sample, coefs) - - return x_sample diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Corel PaintShop Pro 2020 Ultimate 22.0.0.132 With Crack ((FREE)).md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Corel PaintShop Pro 2020 Ultimate 22.0.0.132 With Crack ((FREE)).md deleted file mode 100644 index 1d98cc9d47fc1d9c15dfa781c54c772cc3790b5d..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Corel PaintShop Pro 2020 Ultimate 22.0.0.132 With Crack ((FREE)).md +++ /dev/null @@ -1,18 +0,0 @@ -

                  Corel PaintShop Pro 2020 Ultimate 22.0.0.132 With Crack


                  DOWNLOAD ····· https://cinurl.com/2uEYwd



                  -
                  -15 adaptive CPU, so you can edit more and do it faster! - -Capture outstanding photographs with your new VIXIA HF R18 in SUPER HD quality, straight from the camera. With your choice of a wide array of image modes, expressive special effects and intuitive controls, the VIXIA HF R18 is a serious video enthusiast's tool. - -The Sony HDC-500 is an excellent starter camcorder for individuals and businesses alike, making it a top-selling camcorder brand. It's a versatile and versatile camcorder that does a lot of things really well. If you're looking to do more than capture video and photos, the HDC-500 is not what you're looking for. However, if you're looking to get started or just have a great camcorder that does a lot of things well, this camcorder is a great choice for you. - -Now you can shoot 3D video in addition to 2D, creating a stereoscopic 3D scene that will impress anyone who sees it! When you capture 3D video, you can capture every single pixel in your 3D image so that the results come alive and impress your viewers. - -Sony's VX-AV300 is a slim, lightweight camcorder that's loaded with features. The AV300 is one of the smallest cameras on the market and it's made to be portable. It's small enough to use in a variety of situations and it records video in both standard and high-def resolutions. This camera makes a great travel camcorder because it's compact, lightweight and easy to use. - -Sony’s VX-AV300 is a compact, lightweight camcorder that records video in both standard and high-def resolutions. With the AV300's small size, this camcorder is easy to use, making it the perfect travel camcorder. This camcorder's high definition resolution makes every video seem like a Blu-ray disc. You can record video in either widescreen or full screen. The VX-AV300 also has a 3.5-inch LCD touchscreen monitor. The VX-AV300 has a built-in stereo microphone with voice control and noise reduction. It also has several different modes and settings to help you capture the best video possible. The VX-AV300 camcorder is lightweight, portable and it's fun to use. - -The small, lightweight Sony VX-AV300 delivers high definition video recording at both 4fefd39f24
                  -
                  -
                  -

                  diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Hyperkin Game Genie Save Editor For Ps3 Crack 55 ((HOT)).md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Hyperkin Game Genie Save Editor For Ps3 Crack 55 ((HOT)).md deleted file mode 100644 index a3c3f80b19259fce9a87b7e4e9063c319bf8519d..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Hyperkin Game Genie Save Editor For Ps3 Crack 55 ((HOT)).md +++ /dev/null @@ -1,6 +0,0 @@ -

                  hyperkin game genie save editor for ps3 crack 55


                  Download File >>>>> https://cinurl.com/2uEYor



                  -
                  -All cheats have been applied and you are ready to rock and roll! Simply start the game as normal and load the save you just transferred over. All of the cheats will ... 4d29de3e1b
                  -
                  -
                  -

                  diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Lehninger Principios De Bioquimica 6ta Edicion Pdf 47golkesl BEST.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Lehninger Principios De Bioquimica 6ta Edicion Pdf 47golkesl BEST.md deleted file mode 100644 index 3adcdc268d97cbd9e250c51ce0113bacda9c774a..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Lehninger Principios De Bioquimica 6ta Edicion Pdf 47golkesl BEST.md +++ /dev/null @@ -1,6 +0,0 @@ -

                  Lehninger Principios De Bioquimica 6ta Edicion Pdf 47golkesl


                  DOWNLOAD >>>>> https://cinurl.com/2uEX7q



                  - -PRINCIPIOS DE BIOQUIMICA LEHNINGER, 6/ED. (Spanish Edition) ... have a Kindle? Get your Kindle here, or download a FREE Kindle Reading App. ... ¡El Lehninger de siempre en su última edición y con sus mil y pico páginas! Es un libro ... 4d29de3e1b
                  -
                  -
                  -

                  diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/utils/version_utils.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/utils/version_utils.py deleted file mode 100644 index 963c45a2e8a86a88413ab6c18c22481fb9831985..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/utils/version_utils.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os -import subprocess -import warnings - -from packaging.version import parse - - -def digit_version(version_str: str, length: int = 4): - """Convert a version string into a tuple of integers. - - This method is usually used for comparing two versions. For pre-release - versions: alpha < beta < rc. - - Args: - version_str (str): The version string. - length (int): The maximum number of version levels. Default: 4. - - Returns: - tuple[int]: The version info in digits (integers). - """ - assert 'parrots' not in version_str - version = parse(version_str) - assert version.release, f'failed to parse version {version_str}' - release = list(version.release) - release = release[:length] - if len(release) < length: - release = release + [0] * (length - len(release)) - if version.is_prerelease: - mapping = {'a': -3, 'b': -2, 'rc': -1} - val = -4 - # version.pre can be None - if version.pre: - if version.pre[0] not in mapping: - warnings.warn(f'unknown prerelease version {version.pre[0]}, ' - 'version checking may go wrong') - else: - val = mapping[version.pre[0]] - release.extend([val, version.pre[-1]]) - else: - release.extend([val, 0]) - - elif version.is_postrelease: - release.extend([1, version.post]) - else: - release.extend([0, 0]) - return tuple(release) - - -def _minimal_ext_cmd(cmd): - # construct minimal environment - env = {} - for k in ['SYSTEMROOT', 'PATH', 'HOME']: - v = os.environ.get(k) - if v is not None: - env[k] = v - # LANGUAGE is used on win32 - env['LANGUAGE'] = 'C' - env['LANG'] = 'C' - env['LC_ALL'] = 'C' - out = subprocess.Popen( - cmd, stdout=subprocess.PIPE, env=env).communicate()[0] - return out - - -def get_git_hash(fallback='unknown', digits=None): - """Get the git hash of the current repo. - - Args: - fallback (str, optional): The fallback string when git hash is - unavailable. Defaults to 'unknown'. - digits (int, optional): kept digits of the hash. Defaults to None, - meaning all digits are kept. - - Returns: - str: Git commit hash. - """ - - if digits is not None and not isinstance(digits, int): - raise TypeError('digits must be None or an integer') - - try: - out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) - sha = out.strip().decode('ascii') - if digits is not None: - sha = sha[:digits] - except OSError: - sha = fallback - - return sha diff --git a/spaces/szukevin/VISOR-GPT/train/scripts/convert_vit_from_tencentpretrain_to_huggingface.py b/spaces/szukevin/VISOR-GPT/train/scripts/convert_vit_from_tencentpretrain_to_huggingface.py deleted file mode 100644 index 9522bbd15a8fa81f9c619468be6d8051367c9fa1..0000000000000000000000000000000000000000 --- a/spaces/szukevin/VISOR-GPT/train/scripts/convert_vit_from_tencentpretrain_to_huggingface.py +++ /dev/null @@ -1,69 +0,0 @@ -import argparse -import collections -import torch - - -def convert_vit_transformer_encoder_from_tencentpretrain_to_huggingface(input_model, output_model, layers_num): - for i in range(layers_num): - output_model["encoder.layer." + str(i) + ".attention.self.query.weight"] = \ - input_model["encoder.transformer." + str(i) + ".self_attn.linear_layers.0.weight"] - output_model["encoder.layer." + str(i) + ".attention.self.query.bias"] = \ - input_model["encoder.transformer." + str(i) + ".self_attn.linear_layers.0.bias"] - output_model["encoder.layer." + str(i) + ".attention.self.key.weight"] = \ - input_model["encoder.transformer." + str(i) + ".self_attn.linear_layers.1.weight"] - output_model["encoder.layer." + str(i) + ".attention.self.key.bias"] = \ - input_model["encoder.transformer." + str(i) + ".self_attn.linear_layers.1.bias"] - output_model["encoder.layer." + str(i) + ".attention.self.value.weight"] = \ - input_model["encoder.transformer." + str(i) + ".self_attn.linear_layers.2.weight"] - output_model["encoder.layer." + str(i) + ".attention.self.value.bias"] = \ - input_model["encoder.transformer." + str(i) + ".self_attn.linear_layers.2.bias"] - output_model["encoder.layer." + str(i) + ".attention.output.dense.weight"] = \ - input_model["encoder.transformer." + str(i) + ".self_attn.final_linear.weight"] - output_model["encoder.layer." + str(i) + ".attention.output.dense.bias"] = \ - input_model["encoder.transformer." + str(i) + ".self_attn.final_linear.bias"] - output_model["encoder.layer." + str(i) + ".layernorm_before.weight"] = \ - input_model["encoder.transformer." + str(i) + ".layer_norm_1.gamma"] - output_model["encoder.layer." + str(i) + ".layernorm_before.bias"] = \ - input_model["encoder.transformer." + str(i) + ".layer_norm_1.beta"] - output_model["encoder.layer." + str(i) + ".intermediate.dense.weight"] = \ - input_model["encoder.transformer." + str(i) + ".feed_forward.linear_1.weight"] - output_model["encoder.layer." + str(i) + ".intermediate.dense.bias"] = \ - input_model["encoder.transformer." + str(i) + ".feed_forward.linear_1.bias"] - output_model["encoder.layer." + str(i) + ".output.dense.weight"] = \ - input_model["encoder.transformer." + str(i) + ".feed_forward.linear_2.weight"] - output_model["encoder.layer." + str(i) + ".output.dense.bias"] = \ - input_model["encoder.transformer." + str(i) + ".feed_forward.linear_2.bias"] - output_model["encoder.layer." + str(i) + ".layernorm_after.weight"] = \ - input_model["encoder.transformer." + str(i) + ".layer_norm_2.gamma"] - output_model["encoder.layer." + str(i) + ".layernorm_after.bias"] = \ - input_model["encoder.transformer." + str(i) + ".layer_norm_2.beta"] - - -def main(): - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("--input_model_path", type=str, default="models/input_model.bin", - help=".") - parser.add_argument("--output_model_path", type=str, default="models/output_model.bin", - help=".") - parser.add_argument("--layers_num", type=int, default=12, help=".") - - args = parser.parse_args() - - input_model = torch.load(args.input_model_path) - - output_model = collections.OrderedDict() - - output_model["embeddings.cls_token"] = input_model["embedding.patch.cls_emb"] - output_model["embeddings.patch_embeddings.projection.weight"] = input_model["embedding.patch.projection.weight"] - output_model["embeddings.patch_embeddings.projection.bias"] = input_model["embedding.patch.projection.bias"] - output_model["embeddings.position_embeddings"] = input_model["embedding.pos.embedding.weight"].unsqueeze(0) - - convert_vit_transformer_encoder_from_tencentpretrain_to_huggingface(input_model, output_model, args.layers_num) - - output_model["layernorm.weight"] = input_model["encoder.layer_norm.gamma"] - output_model["layernorm.bias"] = input_model["encoder.layer_norm.beta"] - torch.save(output_model, args.output_model_path) - - -if __name__ == "__main__": - main() diff --git a/spaces/tabeina/bingo1/src/components/chat-list.tsx b/spaces/tabeina/bingo1/src/components/chat-list.tsx deleted file mode 100644 index 624a78ef0d7be0f1192cf02a81e2e9cf214cb193..0000000000000000000000000000000000000000 --- a/spaces/tabeina/bingo1/src/components/chat-list.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import React from 'react' - -import { Separator } from '@/components/ui/separator' -import { ChatMessage } from '@/components/chat-message' -import { ChatMessageModel } from '@/lib/bots/bing/types' - -export interface ChatList { - messages: ChatMessageModel[] -} - -export function ChatList({ messages }: ChatList) { - if (!messages.length) { - return null - } - - return ( -
                  - {messages.map((message, index) => ( - - - {index < messages.length - 1 && ( - - )} - - ))} -
                  - ) -} diff --git a/spaces/taesiri/DeticChatGPT/detic/data/datasets/lvis_v1.py b/spaces/taesiri/DeticChatGPT/detic/data/datasets/lvis_v1.py deleted file mode 100644 index 4b9b279f17663def1c4913321efbb7490d591e90..0000000000000000000000000000000000000000 --- a/spaces/taesiri/DeticChatGPT/detic/data/datasets/lvis_v1.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import logging -import os - -from fvcore.common.timer import Timer -from detectron2.structures import BoxMode -from fvcore.common.file_io import PathManager -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.data.datasets.lvis import get_lvis_instances_meta - -logger = logging.getLogger(__name__) - -__all__ = ["custom_load_lvis_json", "custom_register_lvis_instances"] - - -def custom_register_lvis_instances(name, metadata, json_file, image_root): - """ - """ - DatasetCatalog.register(name, lambda: custom_load_lvis_json( - json_file, image_root, name)) - MetadataCatalog.get(name).set( - json_file=json_file, image_root=image_root, - evaluator_type="lvis", **metadata - ) - - -def custom_load_lvis_json(json_file, image_root, dataset_name=None): - ''' - Modifications: - use `file_name` - convert neg_category_ids - add pos_category_ids - ''' - from lvis import LVIS - - json_file = PathManager.get_local_path(json_file) - - timer = Timer() - lvis_api = LVIS(json_file) - if timer.seconds() > 1: - logger.info("Loading {} takes {:.2f} seconds.".format( - json_file, timer.seconds())) - - catid2contid = {x['id']: i for i, x in enumerate( - sorted(lvis_api.dataset['categories'], key=lambda x: x['id']))} - if len(lvis_api.dataset['categories']) == 1203: - for x in lvis_api.dataset['categories']: - assert catid2contid[x['id']] == x['id'] - 1 - img_ids = sorted(lvis_api.imgs.keys()) - imgs = lvis_api.load_imgs(img_ids) - anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids] - - ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] - assert len(set(ann_ids)) == len(ann_ids), \ - "Annotation ids in '{}' are not unique".format(json_file) - - imgs_anns = list(zip(imgs, anns)) - logger.info("Loaded {} images in the LVIS v1 format from {}".format( - len(imgs_anns), json_file)) - - dataset_dicts = [] - - for (img_dict, anno_dict_list) in imgs_anns: - record = {} - if "file_name" in img_dict: - file_name = img_dict["file_name"] - if img_dict["file_name"].startswith("COCO"): - file_name = file_name[-16:] - record["file_name"] = os.path.join(image_root, file_name) - elif 'coco_url' in img_dict: - # e.g., http://images.cocodataset.org/train2017/000000391895.jpg - file_name = img_dict["coco_url"][30:] - record["file_name"] = os.path.join(image_root, file_name) - elif 'tar_index' in img_dict: - record['tar_index'] = img_dict['tar_index'] - - record["height"] = img_dict["height"] - record["width"] = img_dict["width"] - record["not_exhaustive_category_ids"] = img_dict.get( - "not_exhaustive_category_ids", []) - record["neg_category_ids"] = img_dict.get("neg_category_ids", []) - # NOTE: modified by Xingyi: convert to 0-based - record["neg_category_ids"] = [ - catid2contid[x] for x in record["neg_category_ids"]] - if 'pos_category_ids' in img_dict: - record['pos_category_ids'] = [ - catid2contid[x] for x in img_dict.get("pos_category_ids", [])] - if 'captions' in img_dict: - record['captions'] = img_dict['captions'] - if 'caption_features' in img_dict: - record['caption_features'] = img_dict['caption_features'] - image_id = record["image_id"] = img_dict["id"] - - objs = [] - for anno in anno_dict_list: - assert anno["image_id"] == image_id - if anno.get('iscrowd', 0) > 0: - continue - obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS} - obj["category_id"] = catid2contid[anno['category_id']] - if 'segmentation' in anno: - segm = anno["segmentation"] - valid_segm = [poly for poly in segm \ - if len(poly) % 2 == 0 and len(poly) >= 6] - # assert len(segm) == len( - # valid_segm - # ), "Annotation contains an invalid polygon with < 3 points" - if not len(segm) == len(valid_segm): - print('Annotation contains an invalid polygon with < 3 points') - assert len(segm) > 0 - obj["segmentation"] = segm - objs.append(obj) - record["annotations"] = objs - dataset_dicts.append(record) - - return dataset_dicts - -_CUSTOM_SPLITS_LVIS = { - "lvis_v1_train+coco": ("coco/", "lvis/lvis_v1_train+coco_mask.json"), - "lvis_v1_train_norare": ("coco/", "lvis/lvis_v1_train_norare.json"), -} - - -for key, (image_root, json_file) in _CUSTOM_SPLITS_LVIS.items(): - custom_register_lvis_instances( - key, - get_lvis_instances_meta(key), - os.path.join("datasets", json_file) if "://" not in json_file else json_file, - os.path.join("datasets", image_root), - ) - - -def get_lvis_22k_meta(): - from .lvis_22k_categories import CATEGORIES - cat_ids = [k["id"] for k in CATEGORIES] - assert min(cat_ids) == 1 and max(cat_ids) == len( - cat_ids - ), "Category ids are not in [1, #categories], as expected" - # Ensure that the category list is sorted by id - lvis_categories = sorted(CATEGORIES, key=lambda x: x["id"]) - thing_classes = [k["name"] for k in lvis_categories] - meta = {"thing_classes": thing_classes} - return meta - -_CUSTOM_SPLITS_LVIS_22K = { - "lvis_v1_train_22k": ("coco/", "lvis/lvis_v1_train_lvis-22k.json"), -} - -for key, (image_root, json_file) in _CUSTOM_SPLITS_LVIS_22K.items(): - custom_register_lvis_instances( - key, - get_lvis_22k_meta(), - os.path.join("datasets", json_file) if "://" not in json_file else json_file, - os.path.join("datasets", image_root), - ) \ No newline at end of file diff --git a/spaces/tarteel-ai/whisper-base-demo-quran/README.md b/spaces/tarteel-ai/whisper-base-demo-quran/README.md deleted file mode 100644 index 4c13104844b90e393c3e717d68f3bfac9a1be564..0000000000000000000000000000000000000000 --- a/spaces/tarteel-ai/whisper-base-demo-quran/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Whisper Base Demo -emoji: 🤫 -colorFrom: indigo -colorTo: green -sdk: gradio -sdk_version: 3.14.0 -app_file: app.py -pinned: false -tags: -- whisper-event -duplicated_from: whisper-event/whisper-demo ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/terfces0erbo/CollegeProjectV2/Download Software Kartu Pelajar Full BEST Versionl.md b/spaces/terfces0erbo/CollegeProjectV2/Download Software Kartu Pelajar Full BEST Versionl.md deleted file mode 100644 index 2244c7a29c39da1ea30746bcc055deb278293b2f..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Download Software Kartu Pelajar Full BEST Versionl.md +++ /dev/null @@ -1,10 +0,0 @@ -

                  Download Software Kartu Pelajar Full Versionl


                  Download Ziphttps://bytlly.com/2uGkZG



                  -
                  -– Read and Download Kabardino-Balkaria 2 pdf Download Chantre.pdf Through internet - Download Bastat for Windows By Makemoneywithoutworking - Download A Light At The End Of The Tunnel pdf. [PDF] (PDF) kursu formu pdf.kursudiri.com Download DBMS Using. The pdf database. Enabling software. Most Popular DAKAR UNDER PRESSURE Pdf Download kursu formu. Pdf Download kursudiri.com Read and Download Home Made Easy Dont Waste Your. FREE Download PDF. [PDF] (PDF) kursudiri.com Download VMWare ESXi for Free Download Pdf Torrent. Digg is an American social news and entertainment website. Digg has a voting system that allows users to submit new stories for others to vote for. Similar to Reddit and Hacker News, Digg can be used to share and discuss the news and events. [TOP] Download Software Kartu Pelajar Full Version. PDF Converter Download - Get Free Pdf Converter.. How To Download Nutri Blogger Book pdf Download The Egg Book How To Download Journal Of Nuclear Medicine Article Free PDF how to download a presentation pdf how to download presentations pdf Download free movies with subtitles in srt format.. Download Offline Jungle 2 Full Pc – gamingtricksy. [TOP] Download Software Kartu Pelajar Full Version.. READ FREE PDF in. JASON KAPELOS or the guy who. Throwing away good software and hardware is one of the. or a form of soft cancer. [TOP] Download Software Kartu Pelajar Full Version. Read and Download A Light At The End Of The Tunnel pdf through internet. to read the book. A light at the end of the tunnel download free. [TOP] Download Software Kartu Pelajar Full Version. Read and Download A Light At The End Of The Tunnel pdf. - -Top 10 New Year Resolution - YouTube - -Sort by: Date Date Image Subject Name: Search by. [TOP] Download Software Kartu Pelajar Full Version. A Light At The End Of The Tunnel pdf Download Software Kartu Pelajar Full Version. Read and Download ERC-1155 Standard - A Game - Modding Tool PDF fidget8: a clicker "app" that can be played on Windows, iOS and MacOS. [TOP] Download Software Kartu Pelajar Full Version. Read and Download ERC-1155 Standard - A 4fefd39f24
                  -
                  -
                  -

                  diff --git a/spaces/theodotus/buffered-asr-uk/app.py b/spaces/theodotus/buffered-asr-uk/app.py deleted file mode 100644 index c9430dfcef2758104fe8251d209efea4c176a059..0000000000000000000000000000000000000000 --- a/spaces/theodotus/buffered-asr-uk/app.py +++ /dev/null @@ -1,97 +0,0 @@ -import gradio as gr -import numpy as np -import librosa -import torch - -from math import ceil -import nemo.collections.asr as nemo_asr - - -asr_model = nemo_asr.models.EncDecCTCModelBPE. \ - from_pretrained("theodotus/stt_uk_squeezeformer_ctc_ml",map_location="cpu") - -asr_model.preprocessor.featurizer.dither = 0.0 -asr_model.preprocessor.featurizer.pad_to = 0 -asr_model.eval() -asr_model.encoder.freeze() -asr_model.decoder.freeze() - - -buffer_len = 8.0 -chunk_len = 4.8 -total_buffer = round(buffer_len * asr_model.cfg.sample_rate) -overhead_len = round((buffer_len - chunk_len) * asr_model.cfg.sample_rate) -model_stride = 4 - - -model_stride_in_secs = asr_model.cfg.preprocessor.window_stride * model_stride -tokens_per_chunk = ceil(chunk_len / model_stride_in_secs) -mid_delay = ceil((chunk_len + (buffer_len - chunk_len) / 2) / model_stride_in_secs) - - -def resample(audio): - audio_16k, sr = librosa.load(audio, sr = asr_model.cfg["sample_rate"], - mono=True, res_type='soxr_hq') - return audio_16k - - -def model(audio_16k): - logits, logits_len, greedy_predictions = asr_model.forward( - input_signal=torch.tensor([audio_16k]), - input_signal_length=torch.tensor([len(audio_16k)]) - ) - return logits - - -def decode_predictions(logits_list): - logits_len = logits_list[0].shape[1] - # cut overhead - cutted_logits = [] - for idx in range(len(logits_list)): - start_cut = 0 if (idx==0) else logits_len - 1 - mid_delay - end_cut = -1 if (idx==len(logits_list)-1) else logits_len - 1 - mid_delay + tokens_per_chunk - logits = logits_list[idx][:, start_cut:end_cut] - cutted_logits.append(logits) - - # join - logits = torch.cat(cutted_logits, axis=1) - logits_len = torch.tensor([logits.shape[1]]) - current_hypotheses, all_hyp = asr_model.decoding.ctc_decoder_predictions_tensor( - logits, decoder_lengths=logits_len, return_hypotheses=False, - ) - - return current_hypotheses[0] - - -def transcribe(audio): - state = [np.array([], dtype=np.float32), []] - - audio_16k = resample(audio) - - # join to audio sequence - state[0] = np.concatenate([state[0], audio_16k]) - - while (len(state[0]) > overhead_len) or (len(state[1]) == 0): - buffer = state[0][:total_buffer] - state[0] = state[0][total_buffer - overhead_len:] - # run model - logits = model(buffer) - # add logits - state[1].append(logits) - - if len(state[1]) == 0: - text = "" - else: - text = decode_predictions(state[1]) - return text - - -gr.Interface( - fn=transcribe, - inputs=[ - gr.Audio(source="upload", type="filepath"), - ], - outputs=[ - "textbox", - ], -).launch() \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/Pirates-Of-The-Caribbean-4-2011-720p-Bdrip-Tamil-Telugu-Hind-Eng.md b/spaces/tialenAdioni/chat-gpt-api/Pirates-Of-The-Caribbean-4-2011-720p-Bdrip-Tamil-Telugu-Hind-Eng.md deleted file mode 100644 index d87b25e946edab18171c321fb8182c0b516b258b..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/Pirates-Of-The-Caribbean-4-2011-720p-Bdrip-Tamil-Telugu-Hind-Eng.md +++ /dev/null @@ -1,68 +0,0 @@ -## Pirates Of The Caribbean 4 2011 720p Bdrip Tamil Telugu Hind Eng - - - - - - - - - -**Download File ››››› [https://conttooperting.blogspot.com/?l=2tzQuD](https://conttooperting.blogspot.com/?l=2tzQuD)** - - - - - - - - - - - - ```html - -# Pirates of the Caribbean 4: A Multilingual Adventure on the High Seas - - - -Pirates of the Caribbean 4, also known as On Stranger Tides, is the fourth installment of the popular franchise starring Johnny Depp as the eccentric pirate Captain Jack Sparrow. The movie was released in 2011 and was directed by Rob Marshall. It is based on the novel of the same name by Tim Powers. - - - -The movie follows Jack Sparrow as he crosses paths with his former lover Angelica (Penélope Cruz), who forces him to join her and her father, the notorious pirate Blackbeard (Ian McShane), on a quest to find the Fountain of Youth. Along the way, they have to deal with the British navy, led by Jack's old nemesis Barbossa (Geoffrey Rush), and a rival Spanish expedition, as well as mermaids, zombies, and other dangers. - - - -Pirates of the Caribbean 4 was a global success, grossing over $1 billion at the box office. It was also one of the most widely distributed movies of all time, being dubbed or subtitled in over 40 languages, including Tamil, Telugu, Hindi, and English. The movie was praised for its stunning visuals, action sequences, and humor, but criticized for its convoluted plot, lack of originality, and excessive length. - - - -For fans of the franchise who want to enjoy the movie in their preferred language, there are several options available online. One of them is to download a high-quality BDRip version of the movie, which is a compressed file that retains most of the original quality. A BDRip version of Pirates of the Caribbean 4 with Tamil, Telugu, Hindi, and English audio tracks can be found on various torrent sites or streaming platforms. However, users should be aware of the legal and ethical issues involved in downloading or watching pirated content. - - - -Another option is to watch the movie on an official platform that offers multilingual options, such as Netflix or Amazon Prime Video. These platforms may have different availability and pricing depending on the region and subscription plan. Users can check their local listings or use a VPN service to access other regions' content. However, users should also be aware of the terms and conditions of these platforms and respect their policies. - - - -Pirates of the Caribbean 4 is a movie that can appeal to a wide range of audiences across different languages and cultures. It is a fun and adventurous ride that showcases the charm and charisma of Johnny Depp as Jack Sparrow. Whether you watch it in Tamil, Telugu, Hindi, English, or any other language, you are sure to have a swashbuckling time. - - ``` ```html - -Pirates of the Caribbean 4 also features some new and returning characters from the previous movies. One of them is Philip Swift (Sam Claflin), a young missionary who falls in love with a mermaid named Syrena (Àstrid Bergès-Frisbey). Their romance adds a touch of innocence and tragedy to the story, as they face many obstacles and sacrifices. Another character is Scrum (Stephen Graham), a loyal member of Jack's crew who provides comic relief and musical talent. He plays a concertina, a type of accordion, and sings sea shanties with Jack. - - - -The movie also introduces some elements from the original novel by Tim Powers, such as the voodoo magic of Blackbeard and his daughter, the prophecy of the Chalices of Cartagena, and the Fountain of Youth itself. The Fountain of Youth is depicted as a hidden pool surrounded by stone pillars that have carvings of different animals. To activate the fountain, two silver chalices filled with water from the pool have to be drunk by two people, one of whom has to sacrifice a mermaid's tear. The person who drinks from the chalice with the tear will gain years of life from the other person. - - - -Pirates of the Caribbean 4 also has some references and connections to the previous movies. For example, Jack Sparrow mentions his father, Captain Teague (Keith Richards), who appeared in the third movie. He also uses his compass, which he got from Tia Dalma (Naomie Harris), who was revealed to be the sea goddess Calypso in the third movie. Barbossa reveals that he lost his leg and his ship, the Black Pearl, to Blackbeard, who used his magical sword to control it. The Black Pearl is shown to be shrunk and kept in a glass bottle by Blackbeard, along with other ships he captured. - - ``` 145887f19f - - - - - diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Acer Aahd3vc Motherboard Manual Troubleshoot Common Issues and Errors with Your Motherboard.md b/spaces/tialenAdioni/chat-gpt-api/logs/Acer Aahd3vc Motherboard Manual Troubleshoot Common Issues and Errors with Your Motherboard.md deleted file mode 100644 index 7b96122c134bca8b267114501e34bb7db56a31f7..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Acer Aahd3vc Motherboard Manual Troubleshoot Common Issues and Errors with Your Motherboard.md +++ /dev/null @@ -1,110 +0,0 @@ -
                  -

                  Acer Aahd3vc Motherboard Manual

                  -

                  A motherboard is one of the most essential components of a computer system. It is the main circuit board that connects and communicates with all the other parts, such as the CPU, memory, storage, graphics card, and more. A motherboard also determines what kind of hardware you can use and how well your system will perform.

                  -

                  Acer Aahd3vc Motherboard Manual


                  Download ⚙⚙⚙ https://urlcod.com/2uK1F0



                  -

                  If you are looking for a reliable and affordable motherboard for your AMD-based system, you might want to consider the Acer Aahd3vc motherboard. This motherboard is a micro-ATX board that supports AMD processors with the Socket FM2+ socket. It has four DDR3 memory slots, six SATA 3.0 ports, one PCI Express x16 slot, one PCI Express x1 slot, two PCI slots, and integrated graphics, audio, and LAN features.

                  -

                  However, before you buy or use this motherboard, you should read its manual carefully. The manual will provide you with all the information you need to install, configure, and troubleshoot your motherboard. It will also help you update your BIOS, which is crucial for improving your system stability and compatibility.

                  -

                  But how can you find the manual for the Acer Aahd3vc motherboard? Well, you have come to the right place. In this article, we will show you how to download and access the manual for this motherboard from Acer's official website. We will also give you an overview of its features, installation steps, and BIOS update process. So, let's get started!

                  -

                  Features of the Acer Aahd3vc Motherboard

                  -

                  The Acer Aahd3vc motherboard has many features that make it a great choice for your AMD-based system. Here are some of them:

                  -
                    -
                  • CPU socket and compatible processors: The motherboard has a Socket FM2+ socket that can support AMD processors from the A-series (Richland), Athlon X4 (Richland), or A-series (Trinity) families. These processors have integrated graphics cores that can work with the onboard GPU of the motherboard. The maximum TDP (Thermal Design Power) of these processors is 100W.
                  • -
                  • Chipset and form factor: The motherboard uses an AMD A75 chipset that provides six SATA 3.0 ports with RAID support, four USB 3.0 ports, 10 USB 2.0 ports, and integrated HD audio and Gigabit LAN controllers. The motherboard has a micro-ATX form factor that measures 9.6 x 9.6 inches (24.4 x 24.4 cm). It can fit into most ATX cases but has fewer expansion slots and connections than a full-sized ATX board.
                  • -
                  • Memory slots and types: The motherboard has four DDR3 memory slots that can support up to 32 GB of RAM in total. The memory type can be non-ECC (Error-Correcting Code) or ECC unbuffered. The memory channel can be single or dual. The memory speed can be DDR3 1333 MHz, DDR3 1600 MHz, or DDR3 1866 MHz.
                  • -
                  • SATA ports and RAID support: The motherboard has six SATA 3.0 ports that can support data transfer speeds of up to 6 Gb/s (gigabits per second). These ports can also support RAID (Redundant Array of Independent Disks) modes 0, 1, 10, or JBOD (Just a Bunch Of Disks). RAID modes can improve your data performance or reliability by combining multiple hard drives into one logical unit.
                  • -
                  • Graphics card interface and onboard GPU: The motherboard has one PCI Express x16 slot that can support a discrete graphics card with a PCIe v2.x interface. However, this slot will run at x8 speed if you use an AMD Dual Graphics configuration, which combines your discrete graphics card with your processor's integrated graphics core. The onboard GPU of the motherboard is an AMD Radeon HD 7000 series graphics core that can support DirectX 11, OpenGL 4.2, OpenCL 1.2, HDMI 1.4a, DisplayPort 1.2, DVI-D, VGA (D-sub), and dual-link DVI outputs.
                  • -
                  • USB slots and speeds: The motherboard has seven USB 2.0 slots (four on the back panel and three via internal headers) and five USB 3.0 slots (three on the back panel and two via internal headers). USB 2.0 slots can support data transfer speeds of up to 480 Mb/s (megabits per second), while USB 3.0 slots can support data transfer speeds of up to 5 Gb/s.
                  • -
                  • Audio and LAN features: The motherboard has an integrated Realtek ALC662 audio codec that can support high-definition audio with up to six channels (5.1 surround sound). It also has an integrated Realtek RTL8111F LAN controller that can support Gigabit Ethernet with speeds of up to 1000 Mb/s.
                  • -
                  -

                  How to Install the Acer Aahd3vc Motherboard

                  -

                  Installing the Acer Aahd3vc motherboard is not very difficult if you follow these steps:

                  -
                    -
                  1. Precautions and tools needed: Before you start installing your motherboard, make sure you have these tools ready: a Phillips screwdriver, an anti-static wrist strap or mat, a pair of tweezers or needle-nose pliers, some cable ties or clips, and some thermal paste if you are installing a new CPU cooler. Also make sure you work in a clean and well-lit area with enough space to lay out your components. Avoid touching any metal parts or pins on your motherboard or other components as they may damage them with static electricity.
                  2. -
                  3. Steps to install the motherboard in a case: First, remove your case's side panel and any existing components that may interfere with your new motherboard installation (such as power supply cables or drive bays). Then locate your case's standoffs (metal posts that raise your motherboard above your case's surface) and screw them into your case's mounting holes according to your motherboard's layout diagram (usually found in your manual or on your board itself). Next align your motherboard's screw holes with your case's standoffs and gently lower your board into place without forcing it. Secure your board with screws but do not overtighten them as they may damage your board.
                  4. -
                  5. Steps to connect the power supply, CPU, memory, storage, graphics card, and other peripherals: After installing your board in your case, you need to connect all your components to it using various cables or connectors. Here are some general guidelines for each component:
                      -
                    • Power supply: Connect your power supply's main ATX power connector (24-pin) to your board's ATX power connector near its top edge. Connect your power supply's CPU power connector (4-pin) to your board's CPU power connector near its top left corner.
                    • -
                    • CPU: If you are using a new CPU or cooler, apply some thermal paste on top of your CPU's metal surface in a thin layer using a plastic card or spatula. Then align your CPU's notches or triangles with those on your board's socket and gently lower it into place without bending any pins. Then lock your CPU in place by closing the socket lever. Next, align your CPU cooler's mounting clips with your board's retention brackets and hook them securely. Then connect your CPU cooler's fan cable to your board's CPU fan header.
                    • -
                    • Memory: Open the clips on both ends of your memory slots and align your memory modules with the notches on the slots. Gently push down your memory modules until they snap into place and the clips close. Make sure you use the same color slots for dual-channel mode.
                    • -
                    • Storage: Mount your hard drives or SSDs in your case's drive bays and secure them with screws or tool-less mechanisms. Then connect one end of your SATA cables to your drives and the other end to your board's SATA ports. If you are using an optical drive, connect it to a SATA port as well. If you are using RAID, make sure you use the same color SATA ports for each RAID array.
                    • -
                    • Graphics card: Remove any metal brackets or covers from your case's expansion slots that correspond to your PCI Express x16 slot. Then align your graphics card with the slot and gently push it down until it locks into place. Secure it with a screw or a tool-less mechanism. Then connect any power cables from your power supply to your graphics card if needed.
                    • -
                    • Other peripherals: Connect any other devices you have, such as a keyboard, mouse, monitor, speakers, etc., to the appropriate ports on your board's back panel or front panel headers. Refer to your board's manual for the location and function of each port or header.
                    • -
                  -

                  How to Update the BIOS of the Acer Aahd3vc Motherboard

                  -

                  The BIOS (Basic Input/Output System) is a firmware that controls the basic functions of your motherboard, such as booting, hardware detection, power management, etc. Sometimes, you may need to update your BIOS to fix bugs, improve compatibility, or add new features.

                  -

                  How to install Acer Aahd3vc Motherboard
                  -Acer Aahd3vc Motherboard specs and features
                  -Acer Aahd3vc Motherboard drivers and updates
                  -Acer Aahd3vc Motherboard troubleshooting and repair
                  -Acer Aahd3vc Motherboard compatibility and upgrade
                  -Acer Aahd3vc Motherboard price and warranty
                  -Acer Aahd3vc Motherboard review and rating
                  -Acer Aahd3vc Motherboard bios and settings
                  -Acer Aahd3vc Motherboard layout and diagram
                  -Acer Aahd3vc Motherboard user guide and download
                  -Where to buy Acer Aahd3vc Motherboard online
                  -Best alternatives to Acer Aahd3vc Motherboard
                  -How to overclock Acer Aahd3vc Motherboard
                  -How to clean Acer Aahd3vc Motherboard
                  -How to replace Acer Aahd3vc Motherboard battery
                  -How to connect Acer Aahd3vc Motherboard to monitor
                  -How to test Acer Aahd3vc Motherboard performance
                  -How to reset Acer Aahd3vc Motherboard to factory settings
                  -How to identify Acer Aahd3vc Motherboard model and serial number
                  -How to contact Acer Aahd3vc Motherboard support and customer service
                  -How to fix Acer Aahd3vc Motherboard beep codes and error messages
                  -How to enable Acer Aahd3vc Motherboard wifi and bluetooth
                  -How to change Acer Aahd3vc Motherboard fan speed and temperature
                  -How to backup Acer Aahd3vc Motherboard data and files
                  -How to remove Acer Aahd3vc Motherboard password and security lock
                  -How to update Acer Aahd3vc Motherboard firmware and software
                  -How to configure Acer Aahd3vc Motherboard audio and video
                  -How to install Acer Aahd3vc Motherboard memory and storage
                  -How to use Acer Aahd3vc Motherboard ports and slots
                  -How to check Acer Aahd3vc Motherboard voltage and power supply
                  -How to recycle Acer Aahd3vc Motherboard safely and responsibly
                  -How to find Acer Aahd3vc Motherboard manuals in different languages
                  -How to join Acer Aahd3vc Motherboard community and forum
                  -How to register Acer Aahd3vc Motherboard product and warranty online
                  -How to solve Acer Aahd3vc Motherboard common problems and issues
                  -How to customize Acer Aahd3vc Motherboard appearance and theme
                  -How to optimize Acer Aahd3vc Motherboard speed and performance
                  -How to use Acer Aahd3vc Motherboard with Windows 10 and Linux
                  -How to attach Acer Aahd3vc Motherboard accessories and peripherals
                  -How to learn more about Acer Aahd3vc Motherboard functions and features
                  -What are the advantages and disadvantages of Acer Aahd3vc Motherboard
                  -What are the best practices and tips for using Acer Aahd3vc Motherboard
                  -What are the specifications and requirements for Acer Aahd3vc Motherboard
                  -What are the differences between Acer Aahd3vc Motherboard and other motherboards
                  -What are the latest news and updates on Acer Aahd3vc Motherboard
                  -What are the most frequently asked questions about Acer Aahd3vc Motherboard
                  -What are the most common errors and solutions for Acer Aahd3vc Motherboard
                  -What are the most popular reviews and feedbacks on Acer Aahd3vc Motherboard
                  -What are the most relevant keywords and phrases for Acer Aahd3vc Motherboard

                  -

                  To update the BIOS of the Acer Aahd3vc motherboard, you need to follow these steps:

                  -
                    -
                  1. What is BIOS and why it needs to be updated: The BIOS (Basic Input/Output System) is a firmware that controls the basic functions of your motherboard, such as booting, hardware detection, power management, etc. Sometimes, you may need to update your BIOS to fix bugs, improve compatibility, or add new features.
                  2. -
                  3. How to find the latest BIOS version for the Acer Aahd3vc motherboard: To find the latest BIOS version for your motherboard, you need to visit Acer's official website and enter your device serial number, SNID (Serial Number Identification), or model number in the search box. You can find these numbers on a sticker on your motherboard or on a label on your case. Then click on "Drivers and Manuals" and select "BIOS/Firmware" from the drop-down menu. You will see a list of available BIOS versions for your motherboard with their release dates and descriptions.
                  4. -
                  5. How to download and prepare the BIOS update file: To download the BIOS update file, you need to click on the download icon next to the BIOS version you want and save it to a USB flash drive formatted in FAT32. Do not rename or unzip the file as it may cause errors during the update process. Make sure you have enough battery power or connect your system to a power outlet before proceeding.
                  6. -
                  7. How to flash the BIOS using a USB flash drive or a Windows utility: To flash the BIOS using a USB flash drive, you need to restart your system and press F2 repeatedly during boot-up to enter the BIOS setup menu. Then go to "Boot" and enable "Launch CSM" and "Boot Menu". Save and exit the BIOS setup menu and press F12 during boot-up to enter the boot menu. Select your USB flash drive as the boot device and press Enter. You will see a screen with instructions on how to flash the BIOS. Follow them carefully and do not turn off or interrupt your system during the update process. When the update is complete, restart your system and enter the BIOS setup menu again. Go to "Exit" and select "Load Setup Defaults". Save and exit the BIOS setup menu and enjoy your updated BIOS.

                    -

                    To flash the BIOS using a Windows utility, you need to run the downloaded file as an administrator in Windows. You will see a screen with instructions on how to flash the BIOS. Follow them carefully and do not turn off or interrupt your system during the update process. When the update is complete, restart your system and enjoy your updated BIOS.

                  8. -
                  -

                  Conclusion

                  -

                  In this article, we have shown you how to download and access the manual for the Acer Aahd3vc motherboard. We have also given you an overview of its features, installation steps, and BIOS update process. We hope you have found this article helpful and informative.

                  -

                  The Acer Aahd3vc motherboard manual is a valuable resource that can help you get the most out of your motherboard and your system. It can help you troubleshoot any issues, optimize your performance, and enhance your compatibility. It can also help you update your BIOS, which is essential for keeping your system up to date and secure.

                  -

                  If you have any questions or comments about the Acer Aahd3vc motherboard manual or this article, please feel free to contact us. We would love to hear from you and assist you in any way we can. Thank you for reading and happy computing!

                  -

                  FAQs

                  -
                    -
                  • Q: Where can I find the serial number or SNID of my Acer Aahd3vc motherboard?
                  • -
                  • A: You can find these numbers on a sticker on your motherboard or on a label on your case. You can also use a serial number detection utility from Acer's website to identify your device.
                  • -
                  • Q: How can I check the current BIOS version of my Acer Aahd3vc motherboard?
                  • -
                  • A: You can check the current BIOS version by entering the BIOS setup menu during boot-up (press F2 repeatedly) and looking at the top left corner of the screen. You can also use a Windows utility from Acer's website to check your BIOS version.
                  • -
                  • Q: How can I backup my BIOS settings before updating the BIOS of my Acer Aahd3vc motherboard?
                  • -
                  • A: You can backup your BIOS settings by entering the BIOS setup menu during boot-up (press F2 repeatedly) and going to "Exit". Then select "Save Custom Defaults" and choose a name for your backup file. You can restore your BIOS settings by entering the BIOS setup menu again and selecting "Load Custom Defaults".
                  • -
                  • Q: What are the benefits of updating the BIOS of my Acer Aahd3vc motherboard?
                  • -
                  • A: Updating the BIOS of your motherboard can fix bugs, improve compatibility, add new features, enhance security, and increase performance. However, updating the BIOS also involves some risks, such as bricking your motherboard or losing your data. Therefore, you should only update your BIOS if necessary and follow the instructions carefully.
                  • -
                  • Q: What are some common problems or issues with the Acer Aahd3vc motherboard?
                  • -
                  • A: Some common problems or issues with this motherboard are: no display output, no boot device detected, no sound output, no network connection, overheating, freezing, crashing, etc. These problems can be caused by various factors, such as faulty hardware, incompatible software, corrupted drivers, outdated BIOS, etc. To troubleshoot these problems, you should refer to your manual or contact Acer's support team.
                  • -
                  -

                  0a6ba089eb
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Acid Bases And Salts Class 10 Icse Pdf Download Prepare for Your Exams with Confidence.md b/spaces/tialenAdioni/chat-gpt-api/logs/Acid Bases And Salts Class 10 Icse Pdf Download Prepare for Your Exams with Confidence.md deleted file mode 100644 index cac62a35218a916ae8029a535938dac8de1bc21a..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Acid Bases And Salts Class 10 Icse Pdf Download Prepare for Your Exams with Confidence.md +++ /dev/null @@ -1,192 +0,0 @@ - -

                  Acid Bases And Salts Class 10 Icse Pdf Download

                  -

                  If you are a student of ICSE Class 10 and looking for a reliable source to learn about acids, bases and salts, then you have come to the right place. In this article, we will provide you with a comprehensive overview of these important topics in chemistry, along with some useful resources that you can download for free. We will also answer some frequently asked questions that you might have about acids, bases and salts. So, let's get started!

                  -

                  Introduction

                  -

                  Acids, bases and salts are three types of compounds that have different chemical properties and reactions. They are also essential for many natural and industrial processes, such as digestion, metabolism, fertilization, cleaning, etc. Therefore, it is important to understand their nature and behaviour in order to appreciate their role in our lives.

                  -

                  Acid Bases And Salts Class 10 Icse Pdf Download


                  DOWNLOADhttps://urlcod.com/2uK4HR



                  -

                  What are acids, bases and salts?

                  -

                  An acid is a compound that produces hydronium ions (H3O) when dissolved in water. A base is a compound that produces hydroxide ions (OH) when dissolved in water. A salt is a compound that is formed by the neutralization of an acid and a base.

                  -

                  How are they classified?

                  -

                  Acids, bases and salts can be classified based on various criteria, such as their sources, strength, basicity, concentration and molecular composition. Here are some examples of how they can be classified:

                  -

                  Acid Bases And Salts Class 10 Icse Notes Pdf
                  -Acid Bases And Salts Class 10 Icse Important Questions Pdf
                  -Acid Bases And Salts Class 10 Icse Selina Solutions Pdf
                  -Acid Bases And Salts Class 10 Icse Concise Chemistry Pdf
                  -Acid Bases And Salts Class 10 Icse Revision Notes Pdf
                  -Acid Bases And Salts Class 10 Icse Solved Papers Pdf
                  -Acid Bases And Salts Class 10 Icse Worksheets Pdf
                  -Acid Bases And Salts Class 10 Icse Ppt Download
                  -Acid Bases And Salts Class 10 Icse Videos Download
                  -Acid Bases And Salts Class 10 Icse Online Test Pdf
                  -Acid Bases And Salts Class 10 Icse Ncert Solutions Pdf
                  -Acid Bases And Salts Class 10 Icse Previous Year Questions Pdf
                  -Acid Bases And Salts Class 10 Icse Sample Papers Pdf
                  -Acid Bases And Salts Class 10 Icse Syllabus Pdf
                  -Acid Bases And Salts Class 10 Icse Textbook Pdf
                  -Acid Bases And Salts Class 10 Icse Lab Manual Pdf
                  -Acid Bases And Salts Class 10 Icse Experiments Pdf
                  -Acid Bases And Salts Class 10 Icse Projects Pdf
                  -Acid Bases And Salts Class 10 Icse Mcq Pdf
                  -Acid Bases And Salts Class 10 Icse Quiz Pdf
                  -Acid Bases And Salts Class 10 Icse Formulae Pdf
                  -Acid Bases And Salts Class 10 Icse Definitions Pdf
                  -Acid Bases And Salts Class 10 Icse Examples Pdf
                  -Acid Bases And Salts Class 10 Icse Diagrams Pdf
                  -Acid Bases And Salts Class 10 Icse Summary Pdf
                  -Acid Bases And Salts Class 10 Icse Mind Maps Pdf
                  -Acid Bases And Salts Class 10 Icse Flashcards Pdf
                  -Acid Bases And Salts Class 10 Icse Mnemonics Pdf
                  -Acid Bases And Salts Class 10 Icse Tips And Tricks Pdf
                  -Acid Bases And Salts Class 10 Icse Doubts Clearing Pdf
                  -Acid Bases And Salts Class 10 Icse Study Material Pdf
                  -Acid Bases And Salts Class 10 Icse Reference Books Pdf
                  -Acid Bases And Salts Class 10 Icse Guide Books Pdf
                  -Acid Bases And Salts Class 10 Icse Question Bank Pdf
                  -Acid Bases And Salts Class 10 Icse Model Papers Pdf
                  -Acid Bases And Salts Class 10 Icse Mock Tests Pdf
                  -Acid Bases And Salts Class 10 Icse Practice Papers Pdf
                  -Acid Bases And Salts Class 10 Icse Objective Questions Pdf
                  -Acid Bases And Salts Class 10 Icse Short Answer Questions Pdf
                  -Acid Bases And Salts Class 10 Icse Long Answer Questions Pdf
                  -Acid Bases And Salts Class 10 Icse Numerical Problems Pdf
                  -Acid Bases And Salts Class 10 Icse Conceptual Questions Pdf
                  -Acid Bases And Salts Class 10 Icse Application Based Questions Pdf
                  -Acid Bases And Salts Class 10 Icse Hots Questions Pdf
                  -Acid Bases And Salts Class 10 Icse Assertion Reason Questions Pdf
                  -Acid Bases And Salts Class 10 Icse Case Study Questions Pdf
                  -Acid Bases And Salts Class 10 Icse Activity Based Questions Pdf
                  -Acid Bases And Salts Class 10 Icse Value Based Questions Pdf
                  -Acid Bases And Salts Class 10 Icse Extra Questions Pdf

                  -
                    -
                  • Organic acids are derived from plants or animals, while inorganic acids are derived from minerals.
                  • -
                  • Strong acids completely ionize in water, while weak acids partially ionize in water.
                  • -
                  • Monobasic acids produce one hydronium ion per molecule, while tribasic acids produce three hydronium ions per molecule.
                  • -
                  • Concentrated acids contain little or no water, while dilute acids contain more water than acid.
                  • -
                  • Hydracids contain hydrogen and a non-metallic element, while oxyacids contain hydrogen, oxygen and a non-metallic element.
                  • -
                  -

                  How are they prepared?

                  -

                  Acids, bases and salts can be prepared by various methods, such as synthesis, action of water on non-metallic or acidic oxides, action of metals on acids or bases, action of carbonates or bicarbonates on acids or bases, etc. Here are some examples of how they can be prepared:

                  -
                    -
                  • H2 + Cl2 → 2HCl (synthesis)
                  • -
                  • SO3 + H2O → H2SO4 (action of water on acidic oxide)
                  • -
                  • Zn + 2HCl → ZnCl2 + H2 (action of metal on acid)
                  • -
                  • CuO + 2HNO3 → Cu(NO3)2 + H2O (action of base on acid)
                  • -
                  • CuCO3 + 2HCl → CuCl2 + CO2+ H2O (action of carbonate on acid)
                  • -
                  • Cu(HCO3)2+ 2HCl → CuCl2+ 2CO2+ 2H2O (action of bicarbonate on acid)
                  • -
                  -

                  Acids

                  -

                  Properties of acids

                  -

                  The following are some common properties of acids:

                  -
                    -
                  • They have a sour taste.
                  • -
                  • They turn blue litmus paper red.
                  • -
                  • They react with metals to produce hydrogen gas.
                  • -
                  • They react with carbonates and bicarbonates to produce carbon dioxide gas.
                  • -
                  • They react with bases to produce salts and water.
                  • -
                  • Their aqueous solutions conduct electricity.
                  • -
                  • Their pH values are less than 7.
                  • -
                  -

                  Uses of acids

                  -

                  The following are some common uses of acids:

                  -
                    -
                  • Vinegar (acetic acid) is used as a preservative and a flavouring agent.
                  • -
                  • Citric acid is used as an antioxidant and a cleaning agent.
                  • -
                  • Sulphuric acid is used in the manufacture of fertilizers, explosives and batteries.
                  • -
                  • Nitric acid is used in the manufacture of explosives, dyes and plastics.
                  • -
                  • Hydrochloric acid is used in the production of steel, chlorine and PVC.
                  • -
                  • Lactic acid is used in the fermentation of milk products.
                  • -
                  • Amino acids are the building blocks of proteins.
                  • -
                  -

                  Examples of acids

                  -

                  The following are some examples of acids:

                  - - - - - - - - - -
                  NameMolecular formulaType/Source/Strength/Basicity/Concentration/Molecular composition
                  Vinegar (acetic acid)CH<\td>>Organic/Plant/Weak/Monobasic/Dilute/Oxyacid
                  Lemon juice (citric acid)C<\td>>Organic/Plant/Weak/Tribasic/Dilute/Oxyacid
                  Battery acid (sulphuric acid)H<\td>>Inorganic/Mineral/Strong/Dibasic/Concentrated/Oxyacid
                  Aqua regia (nitric acid)HNO<\td>>Inorganic/Mineral/Strong/Monobasic/Dilute/Oxyacid
                  Gastric juice (hydrochloric acid)HCl Inorganic/Mineral/Strong/Monobasic/Dilute/Hydracid
                  Sour milk (lactic acid)C<\td>>Organic/Animal/Weak/Monobasic/Dilute/Oxyacid
                  Glycine (amino acid)C<\td>>Organic/Animal/Weak/Monobasic/Dilute/Oxyacid
                  -

                  Bases

                  -

                  Properties of bases

                  -

                  The following are some common properties of bases:

                  -
                    -
                  • They have a bitter taste.
                  • -
                  • They turn red litmus paper blue.
                  • -
                  • They react with fats and oils to produce soap.
                  • -
                  • They react with acids to produce salts and water.
                  • -
                  • Their aqueous solutions conduct electricity.
                  • -
                  • Their pH values are more than 7.
                  • -
                  -

                  Uses of bases

                  -

                  The following are some common uses of bases:

                  -
                    -
                  • Sodium hydroxide is used in the manufacture of soap, paper and aluminium.
                  • -
                  • Potassium hydroxide is used in the production of fertilizers, batteries and biodiesel.
                  • -
                  • Magnesium hydroxide is used as an antacid and a laxative.
                  • -
                  • Amonia is used as a cleaning agent and a refrigerant.
                  • -
                  • Sodium bicarbonate is used as a baking soda and a fire extinguisher.
                  • -
                  • Sodium carbonate is used as a washing soda and a water softener.
                  • -
                  • Lime

                    Examples of bases

                    -

                    The following are some examples of bases:

                    - - - - - - - - - <

                    Salts

                    -

                    Properties of salts

                    -

                    The following are some common properties of salts:

                    -
                      -
                    • They have a crystalline structure.
                    • -
                    • They have a high melting and boiling point.
                    • -
                    • They are usually soluble in water, but insoluble in organic solvents.
                    • -
                    • They conduct electricity when dissolved in water or molten.
                    • -
                    • They are neutral, but can react with acids or bases to form different salts.
                    • -
                    • Their pH values depend on the acid and base that formed them.
                    • -
                    -

                    Uses of salts

                    -

                    The following are some common uses of salts:

                    -
                      -
                    • Sodium chloride is used as a food additive, a food preservative, a de-icing agent, and a raw material for chlorine and sodium hydroxide production.
                    • -
                    • Potassium chloride is used as a fertilizer, a salt substitute, and a raw material for potassium hydroxide production.
                    • -
                    • Magnesium sulphate is used as a laxative, an anticonvulsant, and a bath salt.
                    • -
                    • Sodium hydrogen carbonate is used as a baking powder, an antacid, and a fire extinguisher.
                    • -
                    • Sodium carbonate is used as a washing soda, a water softener, and a raw material for glass and soap production.
                    • -
                    • Calcium carbonate is used as a limestone, a marble, a chalk, an antacid, and a raw material for cement and lime production.
                    • -
                    -

                    Examples of salts

                    -

                    The following are some examples of salts:

                    -
                    NameMolecular formulaType/Source/Strength/Basicity/Concentration/Molecular composition
                    Lye (sodium hydroxide)NaOH Inorganic/Mineral/Strong/Monobasic/Concentrated/Hydroxide
                    Potash (potassium hydroxide)KOH Inorganic/Mineral/Strong/Monobasic/Concentrated/Hydroxide
                    Milk of magnesia (magnesium hydroxide)Mg(OH)2 Inorganic/Mineral/Weak/Dibasic/Dilute/Hydroxide
                    Ammonia (ammonium hydroxide)NH3 (NH4OH) Organic/Animal/Weak/Monobasic/Dilute/Hydroxide
                    Baking soda (sodium bicarbonate)NaHCO3 Inorganic/Mineral/Weak/Monobasic/Dilute/Bicarbonate
                    Washing soda (sodium carbonate)Na2CO3 Inorganic/Mineral/Weak/Dibasic/Dilute/Carbonate
                    Lime water (calcium hydroxide)
                    - - - - - - - < -
                    NameMolecular formulaType/Source/Strength/Basicity/Concentration/Molecular composition
                    Table salt (sodium chloride)NaCl Inorganic/Mineral/Neutral/Monobasic/Solid/Halide
                    Potash (potassium chloride)KCl Inorganic/Mineral/Neutral/Monobasic/Solid/Halide
                    Epsom salt (magnesium sulphate)MgSO4 Inorganic/Mineral/Neutral/Monobasic/Solid/Sulphate
                    Baking soda (sodium hydrogen carbonate)NaHCO3 Inorganic/Mineral/Basic/Monobasic/Solid/Bicarbonate
                    Washing soda (sodium carbonate)Na2CO3 Inorganic/Mineral/Basic/Dibasic/Solid/Carbonate
                    Limestone (calcium carbonate)CaCO3 Inorganic/Mineral/Neutral/Dibasic/Solid/Carbonate
                    -

                    Conclusion

                    -

                    In this article, we have learned about the definition, classification, preparation, properties, uses and examples of acids, bases and salts. We have also seen how these compounds are important for various natural and industrial processes. We hope that this article has helped you to understand these topics better and prepare for your ICSE Class 10 Chemistry exam.

                    -

                    FAQs

                    -

                    Here are some frequently asked questions about acids, bases and salts:

                    -
                      -
                    1. What is the difference between a normal salt and an acid salt?
                    2. -

                      A normal salt is a salt that is formed by the complete neutralization of an acid and a base. An acid salt is a salt that is formed by the partial neutralization of an acid and a base. For example, sodium chloride (NaCl) is a normal salt, while sodium hydrogen sulphate (NaHSO4) is an acid salt.

                      -
                    3. What is the difference between a monobasic acid and a dibasic acid?
                    4. -

                      A monobasic acid is an acid that can produce one hydrogen ion (H) per molecule when dissolved in water. A dibasic acid is an acid that can produce two hydrogen ions (H) per molecule when dissolved in water. For example, hydrochloric acid (HCl) is a monobasic acid, while sulphuric acid (H2SO4) is a dibasic acid.

                      -
                    5. What is the difference between a soluble salt and an insoluble salt?
                    6. -

                      A soluble salt is a salt that can dissolve in water to form a clear solution. An insoluble salt is a salt that cannot dissolve in water and remains as a solid precipitate. For example, sodium chloride (NaCl) is a soluble salt, while lead (II) iodide (PbI2) is an insoluble salt.

                      -
                    7. What is the difference between an alkali and ammonia?
                    8. -

                      An alkali is a basic hydroxide that produces hydroxide ions (OH) when dissolved in water. Ammonia is a compound of nitrogen and hydrogen that produces ammonium ions (NH4) and hydroxide ions (OH) when dissolved in water. For example, sodium hydroxide (NaOH) is an alkali, while ammonia (NH3) is not.

                      -
                    9. What is the difference between lime water and milk of lime?
                    10. -

                      Lime water is a clear and colorless solution of calcium hydroxide (Ca(OH)2) in water. Milk of lime is a milky and turbid suspension of excess calcium hydroxide (Ca(OH)2) in water. For example, lime water can be used to test for carbon dioxide gas, while milk of lime can be used to make plaster of Paris.

                      -
                    -

                    0a6ba089eb
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Adobe Photoshop Lightroom 6.10.1 Final Crack - Patch - Crackin Keygen The Ultimate Photo Editing Solution for Windows and Mac.md b/spaces/tialenAdioni/chat-gpt-api/logs/Adobe Photoshop Lightroom 6.10.1 Final Crack - Patch - Crackin Keygen The Ultimate Photo Editing Solution for Windows and Mac.md deleted file mode 100644 index 480bddd1afe6ca3c794550174dba9aec243d9998..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Adobe Photoshop Lightroom 6.10.1 Final Crack - Patch - Crackin Keygen The Ultimate Photo Editing Solution for Windows and Mac.md +++ /dev/null @@ -1,154 +0,0 @@ - -

                    Adobe Photoshop Lightroom 6.10.1 Final Crack - Patch - Crackin Keygen

                    -

                    Introduction

                    -

                    If you are a photographer or a photo enthusiast, you might have heard of Adobe Photoshop Lightroom, one of the most popular and powerful photo editing software in the market. But you might also know that it is not cheap to get a license or a subscription for this software, especially if you are on a tight budget or just want to try it out for a short period of time.

                    -

                    Adobe Photoshop Lightroom 6.10.1 Final Crack - Patch - Crackin Keygen


                    Downloadhttps://urlcod.com/2uK7dY



                    -

                    That's why some people resort to using cracks, patches and keygens, which are tools that can bypass the activation process and unlock the full version of Adobe Photoshop Lightroom without paying anything. But how do these tools work, and what are the benefits and risks of using them? In this article, we will explain everything you need to know about Adobe Photoshop Lightroom 6.10.1 Final Crack - Patch - Crackin Keygen, including how to download and install it, what are its advantages and disadvantages, and some frequently asked questions.

                    -

                    What is Adobe Photoshop Lightroom?

                    -

                    Adobe Photoshop Lightroom is a software that allows you to organize, edit and share your photos in an easy and efficient way. It is designed for both professional and amateur photographers who want to enhance their photos with advanced tools and presets. You can import your photos from your camera, computer or cloud storage, and then sort them into collections, albums or folders. You can also apply various adjustments to your photos, such as exposure, contrast, color, sharpness, noise reduction, cropping, rotating, etc. You can also use filters, effects, brushes, gradients and other tools to create stunning effects on your photos.

                    -

                    One of the best features of Adobe Photoshop Lightroom is that it works with RAW files, which are uncompressed and unprocessed images that contain more information and details than JPEG files. This means that you can edit your photos without losing any quality or data. You can also export your photos in different formats and sizes, depending on your needs and preferences. You can also share your photos directly from Adobe Photoshop Lightroom to social media platforms, such as Facebook, Instagram or Flickr.

                    -

                    What is a crack, patch and keygen?

                    -

                    A crack, patch and keygen are different types of tools that can modify or generate codes or files that can activate or unlock a software without paying for a license or a subscription. They are usually created by hackers or programmers who want to bypass the security measures of the software developers.

                    -
                      -
                    • A crack is a modified version of the original executable file of the software that can run without requiring any activation code or serial number.
                    • -
                    • A patch is a small program that can alter some parts of the original executable file or other files of the software that can disable or remove the activation process.
                    • -
                    • A keygen is a program that can generate valid activation codes or serial numbers for the software that can be entered during the installation or registration process.
                    • -
                    -

                    Why do people use them?

                    -

                    People use cracks, patches and keygens for various reasons, but mainly because they want to use a software without paying for it. Some of the common reasons are:

                    -
                      -
                    • They want to test the software before buying it.
                    • -
                    • They cannot afford to buy the software.
                    • -
                    • They do not want to pay for updates or renewals.
                    • -
                    • They do not have access to the official website or payment methods.
                    • -
                    • They do not agree with the pricing or policies of the software developers.
                    • -
                    -

                    How to download and install Adobe Photoshop Lightroom 6.10.1 Final Crack - Patch - Crackin Keygen

                    -

                    If you want to use Adobe Photoshop Lightroom 6.10.1 Final Crack - Patch - Crackin Keygen, you need to follow these steps:

                    -

                    Step 1: Download the file from a reliable source

                    -

                    The first thing you need to do is to find a trustworthy website that offers Adobe Photoshop Lightroom 6.10.1 Final Crack - Patch - Crackin Keygen for download. You can use search engines like Google or Bing to look for such websites, but be careful not to click on any suspicious links or ads that might lead you to malware or virus infections.

                    -

                    One of the websites that we recommend is RARBG, which is a popular torrent site that provides high-quality files for various software, movies, games and music. You can download Adobe Photoshop Lightroom 6.10.1 Final Crack - Patch - Crackin Keygen from this link: https://rargb.to/torrent/adobe-photoshop-lightroom-6-10-1-final-crack-patch-crackingpatching-2231959.html.

                    -

                    To download from RARBG, you need to have a torrent client installed on your computer, such as uTorrent or BitTorrent. You also need to have a VPN (virtual private network) service enabled on your browser or device, such as NordVPN or ExpressVPN. This will help you avoid any legal issues or ISP (internet service provider) restrictions when downloading torrents.

                    -

                    Adobe Photoshop Lightroom CC 2015 6.1 + Crack download
                    -Adobe Photoshop Lightroom 6.10.1 Final + Rus + Portable + Repack
                    -Adobe Photoshop Lightroom 6.10.1 torrent download
                    -Adobe Photoshop Lightroom CC 6.5.1 Multilingual
                    -Adobe Photoshop Lightroom 6.10.1 Final + Crackin Keygen verified
                    -Adobe Photoshop Lightroom CC - Capture the full range of light
                    -Adobe Photoshop Lightroom 6.10.1 Final + Crack - Crackingpatching
                    -Adobe Photoshop Lightroom 6.10.1 Final + Crack for Windows
                    -Adobe Photoshop Lightroom CC 6.10.1 Final + Crack - Patch - RARBG
                    -Adobe Photoshop Lightroom 6.10.1 Final + Crack - Patch - LexCliq
                    -Adobe Photoshop Lightroom 6.10.1 Final + Crack - Patch - [Crack]
                    -Adobe Photoshop Lightroom 6.10.1 Final + Crack - Patch - Digitalzone93
                    -Adobe Photoshop Lightroom 6.10.1 Final + Crack - Patch - Prayersonline
                    -Adobe Photoshop Lightroom CC 6.1 Crack 2023 Full Version Free
                    -Adobe Photoshop Lightroom CC 6.10.1 Final + Rus + Portable + Repack - [CrackingPatching]
                    -Adobe Photoshop Lightroom CC 2015 6.1 + Crack 984 MB
                    -Adobe Photoshop Lightroom 6.10.1 Final + Crack: Patch: Crackin, 4 years
                    -Adobe Photoshop Lightroom CC - Create amazing images from challenging high-contrast scenes
                    -Adobe Photoshop Lightroom 6.10.1 Description: Adobe Photoshop Lightroom CC 6.10.1 Final + Rus + Portable + Repack
                    -Adobe Photoshop Lightroom 6.10.1 Final + Crack: Patch: Crackin torrent download
                    -Adobe Photoshop Lightroom CC 2015 6.1 + Crack download for Windows
                    -Adobe Photoshop Lightroom CC 6.5.1 Multilingual download for Windows
                    -Adobe Photoshop Lightroom 6.10.1 Final + Crackin Keygen verified download for Windows
                    -Adobe Photoshop Lightroom CC - Capture the full range of light download for Windows
                    -Adobe Photoshop Lightroom 6.10.1 Final + Crack - Crackingpatching download for Windows
                    -Adobe Photoshop Lightroom CC 2015 6.1 + Crack download torrent
                    -Adobe Photoshop Lightroom CC 6.5.1 Multilingual download torrent
                    -Adobe Photoshop Lightroom 6.10.1 Final + Crackin Keygen verified download torrent
                    -Adobe Photoshop Lightroom CC - Capture the full range of light download torrent
                    -Adobe Photoshop Lightroom 6.10.1 Final + Crack - Crackingpatching download torrent
                    -Adobe Photoshop Lightroom CC 2015 6.1 + Crack free download
                    -Adobe Photoshop Lightroom CC 6.5.1 Multilingual free download
                    -Adobe Photoshop Lightroom 6.10.1 Final + Crackin Keygen verified free download
                    -Adobe Photoshop Lightroom CC - Capture the full range of light free download
                    -Adobe Photoshop Lightroom 6.10.1 Final + Crack - Crackingpatching free download
                    -How to install Adobe Photoshop Lightroom CC 2015 6.1 + Crack
                    -How to install Adobe Photoshop Lightroom CC 6.5.1 Multilingual
                    -How to install Adobe Photoshop Lightroom 6.10.1 Final + Crackin Keygen verified
                    -How to install Adobe Photoshop Lightroom CC - Capture the full range of light
                    -How to install Adobe Photoshop Lightroom 6.10.1 Final + Crack - Crackingpatching
                    -How to use Adobe Photoshop Lightroom CC 2015 6.1 + Crack
                    -How to use Adobe Photoshop Lightroom CC 6.5.1 Multilingual
                    -How to use Adobe Photoshop Lightroom 6.10.1 Final + Crackin Keygen verified
                    -How to use Adobe Photoshop Lightroom CC - Capture the full range of light
                    -How to use Adobe Photoshop Lightroom 6.10.1 Final + Crack - Crackingpatching
                    -Review of Adobe Photoshop Lightroom CC 2015 6.1 + Crack
                    -Review of Adobe Photoshop Lightroom CC 6

                    -

                    Step 2: Extract the file using WinRAR or 7-Zip

                    -

                    After downloading the file from RARBG, you need to extract it using WinRAR or 7-Zip, which are programs that can decompress compressed files into folders or subfolders. You can download WinRAR from this link: https://www.win-rar.com/download.html. You can download 7-Zip from this link: https://www.7-zip.org/download.html.

                    -

                    To extract the file using WinRAR or 7-Zip, you need to right-click on the file and select "Extract Here" or "Extract To" depending on where you want to save the extracted folder.

                    -

                    Step 3: Run the setup file and follow the instructions

                    -

                    After extracting the file using WinRAR or 7-Zip, you need to run the setup file and follow the instructions on how to install Adobe Photoshop Lightroom on your computer.

                    -
                    - Double-click on "setup.exe" file. Click "Yes" if prompted by User Account Control (UAC). Choose your language Continuing the article: 
                    -

                    Accept the license agreement and click "Next".

                    -

                    Choose your destination folder and click "Next".

                    -

                    Choose your install options and click "Next".

                    -

                    Click "Install" and wait for the installation to complete.

                    -

                    Click "Finish" and close the setup window.

                    -

                    Step 4: Copy and paste the crack, patch or keygen files into the installation folder

                    -

                    After installing Adobe Photoshop Lightroom, you need to copy and paste the crack, patch or keygen files into the installation folder. These files are usually located in a separate folder inside the downloaded file. For example, if you downloaded Adobe Photoshop Lightroom 6.10.1 Final Crack - Patch - Crackin Keygen from RARBG, you will find these files in a folder named "Crack".

                    -

                    To copy and paste the files, you need to do the following:

                    -
                      -
                    • Open the folder where you extracted the downloaded file.
                    • -
                    • Open the folder that contains the crack, patch or keygen files.
                    • -
                    • Select all the files and right-click on them. Choose "Copy" from the menu.
                    • -
                    • Open another window and go to the folder where you installed Adobe Photoshop Lightroom. The default location is C:\Program Files\Adobe\Adobe Lightroom.
                    • -
                    • Right-click on an empty space and choose "Paste" from the menu.
                    • -
                    • If prompted to replace or overwrite any existing files, click "Yes" or "OK".
                    • -
                    -

                    Step 5: Enjoy your full version of Adobe Photoshop Lightroom

                    -

                    You have successfully installed Adobe Photoshop Lightroom 6.10.1 Final Crack - Patch - Crackin Keygen on your computer. You can now launch the software and enjoy its full features and updates without any limitations or restrictions.

                    -

                    Benefits of using Adobe Photoshop Lightroom 6.10.1 Final Crack - Patch - Crackin Keygen

                    -

                    Using Adobe Photoshop Lightroom 6.10.1 Final Crack - Patch - Crackin Keygen has some advantages that might appeal to some users. Here are some of them:

                    -

                    Access to all features and updates of Adobe Photoshop Lightroom

                    -

                    By using a crack, patch or keygen, you can unlock all the features and functions of Adobe Photoshop Lightroom that are otherwise only available to licensed or subscribed users. You can also access all the updates and improvements that Adobe releases for its software without having to pay anything extra.

                    -

                    Save money and time on purchasing a license or subscription

                    -

                    Another benefit of using a crack, patch or keygen is that you can save money and time on purchasing a license or subscription for Adobe Photoshop Lightroom. A license for Adobe Photoshop Lightroom costs $149 USD as a one-time payment, while a subscription for Adobe Photoshop Lightroom CC costs $9.99 USD per month as part of the Creative Cloud Photography plan. By using a crack, patch or keygen, you can avoid these costs and use the software for free.

                    -

                    Enhance your photo editing skills and creativity with a professional software

                    -

                    A third benefit of using a crack, patch or keygen is that you can enhance your photo editing skills and creativity with a professional software that offers advanced tools and presets for improving your photos. You can learn how to use Adobe Photoshop Lightroom from various tutorials and resources online, and apply your knowledge and skills to your own projects. Continuing the article:

                    A fourth risk of using pirated software is that it lacks technical support and customer service from Adobe. Pirated software does not come with a valid license or subscription that entitles you to receive updates, improvements, bug fixes, security patches, etc. from Adobe. Pirated software also does not come with a warranty or guarantee that covers any issues or defects that may arise from using the software.

                    -

                    By using pirated software, you are missing out on the benefits and advantages of having a legitimate and authorized version of Adobe Photoshop Lightroom. You are also on your own if you encounter any problems or difficulties with the software. You cannot contact Adobe for help or assistance, nor can you expect any compensation or resolution from them.

                    -

                    Conclusion

                    -

                    Adobe Photoshop Lightroom is a great software for photo editing and management, but it is not worth using a pirated version of it. Using Adobe Photoshop Lightroom 6.10.1 Final Crack - Patch - Crackin Keygen may seem like a cheap and easy way to access the full features and updates of the software, but it also comes with many risks and drawbacks that can outweigh the benefits.

                    -

                    Using pirated software is illegal and unethical, and can result in legal and moral consequences. It can also expose your computer and data to malware and virus infection from untrusted sources, cause compatibility and stability problems with your system or other software, and lack technical support and customer service from Adobe.

                    -

                    Therefore, we recommend that you avoid using pirated software and opt for a legitimate and authorized version of Adobe Photoshop Lightroom instead. You can purchase a license or a subscription for Adobe Photoshop Lightroom from the official website or an authorized reseller. You can also try the free trial version of Adobe Photoshop Lightroom for 30 days before deciding to buy it.

                    -

                    By using a legitimate and authorized version of Adobe Photoshop Lightroom, you can enjoy all the features and updates of the software without any limitations or restrictions. You can also support the software industry and its innovation and growth. You can also protect your computer and data from malware and virus infection, ensure compatibility and stability with your system or other software, and receive technical support and customer service from Adobe.

                    -

                    FAQs

                    -
                      -
                    1. What is the difference between Adobe Photoshop Lightroom and Adobe Photoshop?
                    2. -
                    3. How can I tell if my Adobe Photoshop Lightroom is pirated or not?
                    4. -
                    5. What are some alternatives to using pirated software?
                    6. -
                    7. How can I report pirated software to Adobe?
                    8. -
                    9. How can I learn more about Adobe Photoshop Lightroom?
                    10. -
                    -

                    1. What is the difference between Adobe Photoshop Lightroom and Adobe Photoshop?

                    -

                    Adobe Photoshop Lightroom and Adobe Photoshop are both photo editing software from Adobe, but they have different purposes and features. Adobe Photoshop Lightroom is mainly focused on organizing, editing and sharing photos in a streamlined and efficient way. It is ideal for photographers who want to enhance their photos with advanced tools and presets. Adobe Photoshop is more versatile and powerful than Adobe Photoshop Lightroom, and it can edit not only photos but also graphics, illustrations, animations, etc. It is ideal for designers who want to create complex and creative projects with multiple layers, filters, effects, etc.

                    -

                    2. How can I tell if my Adobe Photoshop Lightroom is pirated or not?

                    -

                    There are some signs that can indicate if your Adobe Photoshop Lightroom is pirated or not. Some of them are:

                    -
                      -
                    • Your Adobe Photoshop Lightroom does not require any activation code or serial number to run.
                    • -
                    • Your Adobe Photoshop Lightroom does not receive any updates or improvements from Adobe.
                    • -
                    • Your Adobe Photoshop Lightroom has errors, crashes, freezes, glitches, bugs, etc. that affect its performance.
                    • -
                    • Your Adobe Photoshop Lightroom has files or folders that have suspicious names or extensions, such as "crack", "patch", "keygen", ".exe", ".dll", etc.
                    • -
                    • Your computer has malware or virus infection that may have come from your Adobe Photoshop Lightroom.
                    • -
                    -

                    3. What are some alternatives to using pirated software?

                    -

                    If you cannot afford to buy a license or a subscription for Adobe Photoshop Lightroom, there are some alternatives to using pirated software that are legal and ethical. Some of them are:

                    -
                      -
                    • Use the free trial version of Adobe Photoshop Lightroom for 30 days before deciding to buy it.
                    • -
                    • Use a free or open source photo editing software that has similar features to Adobe Photoshop Lightroom, such as GIMP, Darktable, RawTherapee, etc.
                    • -
                    • Use an online photo editing service that allows you to upload your photos and edit them with various tools and presets, such as Pixlr, Fotor, Canva Continuing the article: malware and virus infection, compatibility and stability problems, and legal and ethical issues that come with using pirated software.

                      -

                      5. How can I learn more about Adobe Photoshop Lightroom?

                      -

                      If you want to learn more about Adobe Photoshop Lightroom and how to use it effectively and creatively, you can visit the official website or the help center of Adobe Photoshop Lightroom. You can also find various tutorials and resources online that can teach you the basics and advanced features of Adobe Photoshop Lightroom. Some of them are:

                      - -

                      0a6ba089eb
                      -
                      -
                      \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Cloud Computing Design Patterns Thomas Erl Pdf 13.md b/spaces/tialenAdioni/chat-gpt-api/logs/Cloud Computing Design Patterns Thomas Erl Pdf 13.md deleted file mode 100644 index 85e0d576d2db9063db712a2f98d92c508b9f75cd..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Cloud Computing Design Patterns Thomas Erl Pdf 13.md +++ /dev/null @@ -1,44 +0,0 @@ -
                      -

                      Cloud Computing Design Patterns: A Review of Thomas Erl's Book

                      - -

                      Cloud computing is a paradigm that enables ubiquitous, on-demand, scalable access to shared pools of configurable IT resources. Cloud computing poses many challenges and requirements for architects, developers, administrators, and managers who need to design and implement cloud-based solutions. How can they ensure scalability, elasticity, reliability, resiliency, recovery, security, and more?

                      - -

                      One way to address these challenges and requirements is to use design patterns. Design patterns are proven solutions to common problems that arise in a given context. They provide guidance and best practices for applying specific mechanisms and technologies to achieve desired outcomes.

                      -

                      cloud computing design patterns thomas erl pdf 13


                      Download File ··· https://urlcod.com/2uK9iT



                      - -

                      Cloud Computing Design Patterns by Thomas Erl, Robert Cope, and Amin Naserpour is a comprehensive catalog of design patterns for modern cloud-based architecture and solution design. The book covers more than 100 patterns that illustrate solutions to common cloud challenges and requirements. The patterns are supported by rich, visual documentation, including 300+ diagrams.

                      - -

                      The book is organized into 10 chapters that cover different aspects of cloud computing, such as sharing, scaling, elasticity, reliability, resiliency, recovery, data management, storage, virtualization, monitoring, provisioning, administration, security, network security, identity & access management, trust assurance, and common compound patterns. Each chapter introduces a set of related patterns with their profiles, problems, solutions, applications, and mechanisms. The book also provides appendices with a glossary of cloud computing mechanisms and an alphabetical reference of design patterns.

                      -

                      - -

                      The book is not only a valuable reference for cloud practitioners who want to learn from the authors' extensive experience and expertise in cloud computing, but also a source of inspiration and insight for those who want to explore new possibilities and opportunities in cloud-based solution design. The book is vendor-agnostic and technology-neutral, meaning that it can be applied to any cloud platform or service model.

                      - -

                      If you are interested in learning more about cloud computing design patterns and how they can help you create effective and efficient cloud solutions, you can download a PDF version of the book from the following link: [^1^]. You can also visit the book's website at www.cloudpatterns.org for more information and resources.

                      - -

                      One of the benefits of cloud computing design patterns is that they help developers create applications that can handle failures gracefully. Failures are inevitable in distributed systems, and cloud applications need to be resilient to network outages, service disruptions, data loss, and other errors. Some of the patterns that can help with this are:

                      - -
                        -
                      • Circuit Breaker: This pattern prevents a network or service failure from cascading to other services by quickly failing requests to a faulty service and redirecting them to a fallback service or a cached response.
                      • -
                      • Retry: This pattern enables an application to retry a failed operation a certain number of times with a delay between each attempt, increasing the chance of success.
                      • -
                      • Bulkhead: This pattern isolates critical resources or services from less critical ones, so that a failure in one component does not affect the availability or performance of another.
                      • -
                      - -

                      Another benefit of cloud computing design patterns is that they help developers optimize the performance and efficiency of their applications. Cloud applications need to be responsive and scalable to meet the varying demands of users and customers. Some of the patterns that can help with this are:

                      - -
                        -
                      • Cache-Aside: This pattern improves the performance and availability of data access by loading data on demand into a cache from a data store and updating the cache whenever the data changes.
                      • -
                      • Elastic Resource Capacity: This pattern enables an application to dynamically adjust its resource consumption based on the current workload, reducing costs and improving scalability.
                      • -
                      • Load Balancing: This pattern distributes incoming requests across multiple instances of a service or a resource, improving throughput and availability.
                      • -
                      - -

                      A third benefit of cloud computing design patterns is that they help developers enhance the security and privacy of their applications. Cloud applications need to protect sensitive data and resources from unauthorized access and malicious attacks. Some of the patterns that can help with this are:

                      - -
                        -
                      • Cloud Storage Data Encryption: This pattern encrypts data before storing it in a cloud storage service, preventing unauthorized access or leakage of data at rest.
                      • -
                      • Cloud Authentication Gateway: This pattern provides a single point of authentication and authorization for accessing cloud services and resources, simplifying identity management and enforcing security policies.
                      • -
                      • Cloud Data Breach Protection: This pattern detects and mitigates data breaches by monitoring data access patterns, applying encryption and masking techniques, and alerting relevant stakeholders.
                      • -
                      - -

                      These are just some examples of cloud computing design patterns that can help developers create better cloud-based solutions. There are many more patterns available for different scenarios and contexts, and developers can also combine or customize them to suit their specific needs. By using cloud computing design patterns, developers can leverage the best practices and experiences of others who have faced similar challenges and requirements in the cloud.

                      e93f5a0c3f
                      -
                      -
                      \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/((FULL)) Download Badlapur Movies In Hindi Hd.md b/spaces/tioseFevbu/cartoon-converter/scripts/((FULL)) Download Badlapur Movies In Hindi Hd.md deleted file mode 100644 index fb95fcf5a10b31ae883d05580193db6e96d3bd96..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/((FULL)) Download Badlapur Movies In Hindi Hd.md +++ /dev/null @@ -1,21 +0,0 @@ -
                      -

                      How to Download Badlapur Movies in Hindi HD for Free

                      -

                      If you are a fan of Bollywood movies, you might have heard of Badlapur, a 2015 crime thriller starring Varun Dhawan, Nawazuddin Siddiqui, Huma Qureshi and Yami Gautam. The movie was praised for its gripping story, dark tone and stellar performances by the cast. But did you know that you can download Badlapur movies in Hindi HD for free?

                      -

                      Download Badlapur Movies In Hindi Hd


                      DOWNLOAD ✫✫✫ https://urlcod.com/2uHwiQ



                      -

                      Yes, you read that right. You don't have to pay any subscription fees or buy any DVDs to watch Badlapur movies in Hindi HD. All you need is a reliable internet connection and a device that can stream or download videos. In this article, we will show you how to download Badlapur movies in Hindi HD for free using some of the best websites and apps available online.

                      -

                      Best Websites to Download Badlapur Movies in Hindi HD

                      -

                      There are many websites that offer free downloads of Bollywood movies, but not all of them are safe and legal. Some of them may contain viruses, malware or spyware that can harm your device or steal your personal information. Others may have broken links, low-quality videos or annoying ads that ruin your viewing experience. To avoid these problems, we recommend using the following websites to download Badlapur movies in Hindi HD:

                      -
                        -
                      • Filmyzilla: Filmyzilla is one of the most popular websites for downloading Bollywood movies in HD quality. It has a huge collection of movies from various genres and languages, including Badlapur. You can download Badlapur movies in Hindi HD from Filmyzilla in different formats and sizes, depending on your preference and device compatibility. Filmyzilla also provides fast download speeds and user-friendly interface.
                      • -
                      • Filmywap: Filmywap is another website that offers free downloads of Bollywood movies in HD quality. It has a similar layout and features as Filmyzilla, but with a different domain name. You can find Badlapur movies in Hindi HD on Filmywap under the category of Crime Movies. You can also choose from various resolutions and file types to download Badlapur movies in Hindi HD from Filmywap.
                      • -
                      • Worldfree4u: Worldfree4u is a website that provides free downloads of Bollywood movies as well as Hollywood movies dubbed in Hindi. It has a simple and easy-to-use design that allows you to search and download movies quickly and conveniently. You can download Badlapur movies in Hindi HD from Worldfree4u by clicking on the movie poster and following the instructions on the next page.
                      • -
                      -

                      Best Apps to Download Badlapur Movies in Hindi HD

                      -

                      If you prefer using apps to download movies on your mobile device, you can try the following apps to download Badlapur movies in Hindi HD:

                      -

                      -
                        -
                      • VideoBuddy: VideoBuddy is an app that allows you to download videos from various platforms, including YouTube, Facebook, Instagram and more. You can also use VideoBuddy to download Bollywood movies in HD quality, including Badlapur. You can search for Badlapur movies in Hindi HD on VideoBuddy by typing the keyword in the search bar or browsing through the categories. You can then select the video quality and format and start downloading.
                      • -
                      • Torrentvilla Lite: Torrentvilla Lite is an app that lets you download torrents of movies, TV shows, music and more. You can use Torrentvilla Lite to download Badlapur movies in Hindi HD by searching for the movie name or scanning the QR code on the website. You can then choose the torrent file with the best quality and speed and start downloading.
                      • -
                      • MX Player: MX Player is an app that not only plays videos but also downloads them from various sources. You can use MX Player to download Badlapur movies in Hindi HD by tapping on the Online tab and entering the movie name in the search box. You can then select the video source

                        81aa517590
                        -
                        -
                        \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Active File Recovery 18.0.6 Crack FREE.md b/spaces/tioseFevbu/cartoon-converter/scripts/Active File Recovery 18.0.6 Crack FREE.md deleted file mode 100644 index e35ba8c32ce6bce63ae6577b69d60f0a03d342ce..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Active File Recovery 18.0.6 Crack FREE.md +++ /dev/null @@ -1,167 +0,0 @@ - -

                        Active File Recovery 18.0.6 Crack: A Powerful Data Recovery Tool

                        -

                        Have you ever accidentally deleted an important file or lost data due to a virus attack, a format error, or a system crash? If so, you know how frustrating and stressful it can be to lose your precious data.

                        -

                        Active File Recovery 18.0.6 Crack


                        Download Filehttps://urlcod.com/2uHwYC



                        -

                        Fortunately, there is a solution that can help you recover your lost files in a fast and easy way: Active File Recovery 18.0.6 Crack.

                        -

                        Active File Recovery 18.0.6 Crack is a simple-to-use, reliable and effective data recovery software that can restore files and disks that have been deleted, formatted or lost due to various reasons.

                        -

                        It allows you to recover data from your computer regardless of the cause, whether it is due to accidental deletion into Recycle Bin, disk formatting, virus infection or permanent deletion (Shift + Delete).

                        -

                        Active File Recovery 18.0.6 Crack scans your computer's hard drives in minutes and displays all the files that can be recovered.

                        -

                        It easily recovers files that have been damaged or

                        It easily recovers files that have been damaged or corrupted by power failures, hardware malfunctions, software errors or human errors.

                        -

                        -

                        It supports various file systems, such as FAT, exFAT, NTFS, ReFS, HFS+, Ext2/3/4, BtrFS, UFS, XFS, JFS and more.

                        -

                        It also supports various storage devices, such as HDD, SSD, USB, IDE, SATA, eSATA, SAS, SCSI hard drives, USB Flash, SD, MMC, CompactFlash cards and more.

                        -

                        It even supports RAID arrays and can reconstruct damaged or deleted RAID-0/1/5/6/10/50/60 configurations.

                        -

                        With Active File Recovery 18.0.6 Crack, you can recover your data in a matter of minutes and save it to a new location of your choice.

                        -

                        In this article, we will show you how to download and install Active File Recovery 18.0.6 Crack from a reliable source, how to use it to scan and recover your files from different file systems and storage devices, and what are the pros and cons of this software.

                        -

                        We will also provide you with some alternatives to Active File Recovery 18.0.6 Crack in case you want to try other data recovery tools.

                        -

                        So, let's get started!

                        -

                        How to Download and Install Active File Recovery 18.0.6 Crack

                        -

                        The first step to use Active File Recovery 18.0.6 Crack is to download and install it on your computer.

                        -

                        However, you need to be careful when downloading this software from the internet, as there are many fake or malicious websites that may infect your computer with viruses or malware.

                        -

                        To avoid this risk, we recommend you to download Active File Recovery 18.0.6 Crack from a trusted and verified source, such as [Crack4Windows].

                        -

                        This website provides you with a safe and secure download link for Active File Recovery 18.0.6 Crack, as well as a detailed installation guide and screenshots.

                        -

                        To download and install Active File Recovery 18.0.6 Crack from Crack4Windows, follow these steps:

                        -
                          -
                        1. Go to [Crack4Windows] website and search for Active File Recovery 18.0.6 Crack in the search box.
                        2. -
                        3. Click on the result that matches your search query and read the description and the user reviews of the software.
                        4. -
                        5. Click on the green "Download" button and save the file to your computer.
                        6. -
                        7. Extract the zip file using a tool like WinRAR or 7-Zip and open the folder that contains the setup file and the crack file.
                        8. -
                        9. Run the setup file as an administrator and follow the instructions on the screen to install Active File Recovery 18.0.6 on your computer.
                        10. -
                        11. Do not launch the program after the installation is complete.
                        12. -
                        13. Copy the crack file from the folder and paste it into the installation directory of Active File Recovery 18.0.6 (usually C:\Program Files\Active File Recovery).
                        14. -
                        15. Replace the original file if prompted.
                        16. -
                        17. Launch Active File Recovery 18.0.6 from your desktop or start menu and enjoy its full features without any limitations.
                        18. -
                        -

                        How to Use Active File Recovery 18.0.6 Crack

                        -

                        Now that you have downloaded and installed Active File Recovery 18.0.6 Crack on your computer, you are ready to use it to recover your lost files.

                        -

                        The user interface of Active File Recovery 18.0.6 Crack is simple and intuitive, making it easy for anyone to use it without any technical skills or knowledge.

                        -

                        The main window of the program consists of four tabs: Recover Files/Disk Images; Open Disk; Create Disk Image; RAID Reconstruction.

                        -

                        The first tab is where you can scan for deleted or lost files on your computer or disk images.

                        -

                        The second tab is where you can open disks or partitions that are not recognized by Windows or are inaccessible due to damage or corruption.

                        -

                        The third tab is where you can create disk images of your disks or partitions for backup or recovery purposes.

                        -

                        The fourth tab is where you can reconstruct RAID arrays that have been damaged or deleted.

                        -

                        How to Scan for Deleted or Lost Files

                        -

                        To scan for deleted or lost files on your computer or disk images using Active File Recovery 18.0.6 Crack, follow these steps:

                        -
                          -
                        1. Select the "Recover Files/Disk Images" tab from the main window of the program.
                        2. -
                        3. Select the "Recover Files/Disk Images" tab from the main window of the program.
                        4. -
                        5. Choose the disk or partition that you want to scan from the list of available devices. You can also select a disk image file if you have one.
                        6. -
                        7. Click on the "Scan" button to start the scanning process. You can choose between two scanning modes: QuickScan and SuperScan.
                        8. -
                        9. QuickScan is a fast and simple scan that detects files that have been deleted recently or emptied from the Recycle Bin. It takes only a few minutes to complete.
                        10. -
                        11. SuperScan is a thorough and advanced scan that detects files that have been deleted long ago or lost due to formatting, corruption or damage. It takes longer to complete, but it can find more files and recover them with their original names and paths.
                        12. -
                        13. Wait for the scan to finish and review the results. You can use the filter and search options to narrow down the list of files that can be recovered.
                        14. -
                        15. Select the files that you want to recover and click on the "Recover" button. You can also preview the files before recovering them by double-clicking on them.
                        16. -
                        17. Choose a destination folder where you want to save the recovered files. Make sure that you do not save them to the same disk or partition where they were deleted or lost, as this may overwrite them and make them unrecoverable.
                        18. -
                        19. Click on the "OK" button to start the recovery process. Wait for it to complete and check your recovered files in the destination folder.
                        20. -
                        -

                        How to Recover Files from Different File Systems

                        -

                        Active File Recovery 18.0.6 Crack supports various file systems, such as FAT, exFAT, NTFS, ReFS, HFS+, Ext2/3/4, BtrFS, UFS, XFS, JFS and more.

                        -

                        This means that you can recover files from different types of disks and partitions that use different file systems.

                        -

                        To recover files from different file systems using Active File Recovery 18.0.6 Crack, follow these steps:

                        -
                          -
                        1. Select the "Recover Files/Disk Images" tab from the main window of the program.
                        2. -
                        3. Choose the disk or partition that you want to recover files from. Make sure that it has a valid file system that is recognized by Active File Recovery 18.0.6 Crack.
                        4. -
                        5. If the disk or partition has been formatted or corrupted, you may need to use the "Open Disk" tab to access it and fix its file system before scanning it for deleted or lost files.
                        6. -
                        7. Click on the "Scan" button to start the scanning process. You can choose between QuickScan and SuperScan modes depending on your needs.
                        8. -
                        9. Wait for the scan to finish and review the results. You can use the filter and search options to narrow down the list of files that can be recovered.
                        10. -
                        11. Select the files that you want to recover and click on the "Recover" button. You can also preview the files before recovering them by double-clicking on them.
                        12. -
                        13. Choose a destination folder where you want to save the recovered files. Make sure that you do not save them to the same disk or partition where they were deleted or lost, as this may overwrite them and make them unrecoverable.
                        14. -
                        15. Click on the "OK" button to start the recovery process. Wait for it to complete and check your recovered files in the destination folder.
                        16. -
                        -

                        How to Recover Files from Different Storage Devices

                        -

                        Active File Recovery 18.0.6 Crack supports various storage devices, such as HDD, SSD, USB, IDE, SATA, eSATA, SAS, SCSI hard drives, USB Flash, SD, MMC, CompactFlash cards and more.

                        -

                        This means that you can recover files from different types of devices that store your data.

                        -

                        To recover files from different storage devices using Active File Recovery 18.0.6 Crack, follow these steps:

                        -
                          -
                        1. Select the "Recover Files/Disk Images" tab from the main window of the program.
                        2. -
                        3. Select the "Recover Files/Disk Images" tab from the main window of the program.
                        4. -
                        5. Connect the storage device that you want to recover files from to your computer. Make sure that it is detected and recognized by Windows and Active File Recovery 18.0.6 Crack.
                        6. -
                        7. Choose the storage device from the list of available devices. You can also select a disk image file if you have one.
                        8. -
                        9. Click on the "Scan" button to start the scanning process. You can choose between QuickScan and SuperScan modes depending on your needs.
                        10. -
                        11. Wait for the scan to finish and review the results. You can use the filter and search options to narrow down the list of files that can be recovered.
                        12. -
                        13. Select the files that you want to recover and click on the "Recover" button. You can also preview the files before recovering them by double-clicking on them.
                        14. -
                        15. Choose a destination folder where you want to save the recovered files. Make sure that you do not save them to the same storage device where they were deleted or lost, as this may overwrite them and make them unrecoverable.
                        16. -
                        17. Click on the "OK" button to start the recovery process. Wait for it to complete and check your recovered files in the destination folder.
                        18. -
                        -

                        How to Recover Files from Damaged or Formatted Partitions

                        -

                        Sometimes, you may lose your files due to damage or formatting of your partitions. This can happen due to various reasons, such as virus attacks, power failures, system crashes, human errors or malicious actions.

                        -

                        In such cases, you may not be able to access your partitions or see your files in Windows Explorer.

                        -

                        However, this does not mean that your files are gone forever. You can still recover them using Active File Recovery 18.0.6 Crack, as long as they have not been overwritten by new data.

                        -

                        To recover files from damaged or formatted partitions using Active File Recovery 18.0.6 Crack, follow these steps:

                        -
                          -
                        1. Select the "Open Disk" tab from the main window of the program.
                        2. -
                        3. Choose the disk that contains the damaged or formatted partition from the list of available devices.
                        4. -
                        5. Click on the "Open" button to open the disk and view its partitions.
                        6. -
                        7. Select the partition that you want to recover files from. If the partition is not recognized by Active File Recovery 18.0.6 Crack, you may need to use the "Create Virtual Partition" option to manually define its parameters, such as file system type, start and end sectors, etc.
                        8. -
                        9. Click on the "Scan" button to start the scanning process. You can choose between two scanning methods: SuperScan and LastChance.
                        10. -
                        11. SuperScan is similar to SuperScan mode in "Recover Files/Disk Images" tab, but it works on partitions instead of disks. It performs a thorough and advanced scan of the partition and detects files based on their file signatures.
                        12. -
                        13. LastChance is a special scanning method that recovers files by their extensions when no other methods work. It is useful for recovering files from severely damaged or formatted partitions.
                        14. -
                        15. Wait for the scan to finish and review the results. You can use the filter and search options to narrow down the list of files that can be recovered.
                        16. -
                        17. Select the files that you want to recover and click on the "Recover" button. You can also preview the files before recovering them by double-clicking on them.
                        18. -
                        19. Choose a destination folder where you want to save the recovered files. Make sure that you do not save them to the same disk or partition where they were deleted or lost, as this may overwrite them and make them unrecoverable.
                        20. -
                        21. Click on the "OK" button to start the recovery process. Wait for it to complete and check your recovered files in the destination folder.
                        22. -
                        -

                        How to Recover Files from RAID Arrays

                        -

                        If you have a RAID array that stores your data, you may face a situation where your RAID array fails or gets damaged due to various reasons, such as hardware failure, power outage, controller malfunction, etc.

                        -

                        This can result in losing access to your data or seeing corrupted or incomplete files.

                        -

                        However, you can still recover your data using Active File Recovery 18.0.6 Crack, as long as you have at least one working disk from your RAID array.

                        -

                        To recover files from RAID arrays using Active File Recovery 18.0.6 Crack, follow these steps:

                        To recover files from RAID arrays using Active File Recovery 18.0.6 Crack, follow these steps:

                        -
                          -
                        1. Select the "RAID Reconstruction" tab from the main window of the program.
                        2. -
                        3. Choose the type of RAID array that you have from the list of supported RAID types, such as RAID-0/1/5/6/10/50/60.
                        4. -
                        5. Click on the "Add Disk" button to add the disks that belong to your RAID array. You can add physical disks, logical disks or disk images.
                        6. -
                        7. Adjust the parameters of your RAID array, such as stripe size, parity order, rotation, offset, etc. You can use the "Auto Detect" option to automatically detect the parameters based on the data on the disks.
                        8. -
                        9. Click on the "Reconstruct" button to start the reconstruction process. Active File Recovery 18.0.6 Crack will analyze the data on the disks and create a virtual RAID array that can be accessed and scanned for deleted or lost files.
                        10. -
                        11. Select the virtual RAID array from the list of available devices and click on the "Scan" button to start the scanning process. You can choose between QuickScan and SuperScan modes depending on your needs.
                        12. -
                        13. Wait for the scan to finish and review the results. You can use the filter and search options to narrow down the list of files that can be recovered.
                        14. -
                        15. Select the files that you want to recover and click on the "Recover" button. You can also preview the files before recovering them by double-clicking on them.
                        16. -
                        17. Choose a destination folder where you want to save the recovered files. Make sure that you do not save them to the same disk or partition where they were deleted or lost, as this may overwrite them and make them unrecoverable.
                        18. -
                        19. Click on the "OK" button to start the recovery process. Wait for it to complete and check your recovered files in the destination folder.
                        20. -
                        -

                        Pros and Cons of Active File Recovery 18.0.6 Crack

                        -

                        Active File Recovery 18.0.6 Crack is a powerful data recovery tool that can help you recover your lost files in various situations.

                        -

                        However, like any other software, it has its pros and cons that you should be aware of before using it.

                        -

                        Here is a table that summarizes the advantages and disadvantages of Active File Recovery 18.0.6 Crack:

                        - | Pros | Cons | | --- | --- | | Easy to use and intuitive user interface | May not work with some antivirus or firewall programs | | Supports various file systems and storage devices | May not recover files that have been overwritten by new data | | Supports RAID arrays and disk images | May not recover files with their original names or paths in some cases | | Offers two scanning modes: QuickScan and SuperScan | SuperScan mode may take a long time to complete | | Offers two scanning methods: Active Scan and LastChance | LastChance method may not recover all file types | | Allows previewing files before recovering them | May require a crack file to activate its full features | | Allows filtering and searching files by name, size, date, etc. | May not be updated regularly | | Allows saving recovered files to a new location of your choice | May not be compatible with some Windows versions |

                        Alternatives to Active File Recovery 18.0.6 Crack

                        -

                        If Active File Recovery 18.0.6 Crack does not work for you or you want to try other data recovery software, you have plenty of options to choose from.

                        -

                        There are many other data recovery tools that offer similar or different features and capabilities as Active File Recovery 18.0.6 Crack.

                        -

                        Some of them are free, some of them are paid, some of them are easy to use, some of them are more advanced.

                        -

                        The best data recovery software for you depends on your needs, preferences and budget.

                        -

                        Here is a list of some of the most popular and reliable data recovery software that you can try as alternatives to Active File Recovery 18.0.6 Crack:

                        -
                          -
                        • EaseUS Data Recovery Wizard: A comprehensive and user-friendly data recovery software that can recover deleted, formatted or lost data from PC, laptop, hard drive, SSD, USB, memory card, digital camera, etc. It supports various file systems, such as FAT32, NTFS, exFAT, EXT2/EXT3/EXT4/HFS+/APFS/XFS/JFS/BtrFS/UFS/ZFS/F2FS/ReFS/QCOW2/VHDX/VMDK/VHD/VDI/ISO/NRG/CUE/B
                            -
                          • EaseUS Data Recovery Wizard: A comprehensive and user-friendly data recovery software that can recover deleted, formatted or lost data from PC, laptop, hard drive, SSD, USB, memory card, digital camera, etc. It supports various file systems, such as FAT32, NTFS, exFAT, EXT2/EXT3/EXT4/HFS+/APFS/XFS/JFS/BtrFS/UFS/ZFS/F2FS/ReFS/QCOW2/VHDX/VMDK/VHD/VDI/ISO/NRG/CUE/BIN and more. It offers three scanning modes: Quick Scan, Deep Scan and Partition Scan. It allows previewing files before recovering them. It allows filtering and searching files by name, size, date, etc. It allows saving recovered files to a new location of your choice. It has a free version that can recover up to 2 GB of data and a paid version that can recover unlimited data.
                          • -
                          • Recuva: A simple and effective data recovery software that can recover deleted or lost files from your computer, hard drive, USB drive, memory card, digital camera, etc. It supports various file systems, such as FAT32, NTFS and exFAT. It offers two scanning modes: Quick Scan and Deep Scan. It allows previewing files before recovering them. It allows filtering and searching files by name, size, date, etc. It allows saving recovered files to a new location of your choice. It has a free version that can recover unlimited data and a paid version that offers advanced features such as virtual hard drive support, automatic updates and premium support.
                          • -
                          • Stellar Data Recovery: A professional and powerful data recovery software that can recover deleted or lost data from Windows PC, Mac, Linux, Android and iOS devices. It supports various file systems, such as FAT32, NTFS, exFAT, HFS+, APFS, Ext2/3/4 and more. It offers four scanning modes: Quick Scan, Deep Scan, Deleted File Recovery and Lost Partition Recovery. It allows previewing files before recovering them. It allows filtering and searching files by name, size, date, etc. It allows saving recovered files to a new location of your choice. It has a free version that can recover up to 1 GB of data and a paid version that can recover unlimited data.
                          • -
                          • Wondershare Recoverit: A reliable and easy-to-use data recovery software that can recover deleted or lost data from PC, laptop, hard drive, SSD, USB drive, memory card, digital camera, etc. It supports various file systems, such as FAT32, NTFS and exFAT. It offers three scanning modes: All-Around Recovery, Deleted Files Recovery and Lost Partition Recovery. It allows previewing files before recovering them. It allows filtering and searching files by name, size, date, It allows filtering and searching files by name, size, date, etc. It allows saving recovered files to a new location of your choice. It has a free version that can recover up to 100 MB of data and a paid version that can recover unlimited data.
                          • -
                          -

                          Conclusion

                          -

                          In conclusion, Active File Recovery 18.0.6 Crack is a powerful data recovery tool that can help you recover your deleted or lost files in various situations.

                          -

                          It has a simple and intuitive user interface that makes it easy for anyone to use it without any technical skills or knowledge.

                          -

                          It supports various file systems and storage devices, including RAID arrays and disk images.

                          -

                          It offers two scanning modes: QuickScan and SuperScan, and two scanning methods: Active Scan and LastChance, to detect and recover files based on their file signatures or extensions.

                          -

                          It allows previewing files before recovering them, filtering and searching files by name, size, date, etc., and saving recovered files to a new location of your choice.

                          -

                          However, it also has some drawbacks, such as compatibility issues with some antivirus or firewall programs, potential overwriting of files by new data, possible loss of original names or paths of files in some cases, long scanning time for SuperScan mode, need for a crack file to activate its full features, lack of regular updates and compatibility with some Windows versions.

                          -

                          If you are looking for a reliable and effective data recovery software that can help you recover your lost files in a fast and easy way, you can try Active File Recovery 18.0.6 Crack from a trusted and verified source, such as Crack4Windows.

                          -

                          However, if you want to try other data recovery software that offer similar or different features and capabilities as Active File Recovery 18.0.6 Crack, you can also check out some of the alternatives that we have listed above, such as EaseUS Data Recovery Wizard, Recuva, Stellar Data Recovery and Wondershare Recoverit.

                          -

                          We hope that this article has helped you learn more about Active File Recovery 18.0.6 Crack and how to use it to recover your lost files.

                          -

                          If you have any questions or comments about this article or Active File Recovery 18.0.6 Crack, feel free to leave them below.

                          -

                          Thank you for reading and happy data recovery!

                          -

                          FAQs

                          -

                          Here are some of the frequently asked questions and answers about Active File Recovery 18.0.6 Crack:

                          -
                            -
                          1. What is the difference between QuickScan and SuperScan modes?
                          2. -

                            QuickScan mode is a fast and simple scan that detects files that have been deleted recently or emptied from the Recycle Bin. It takes only a few minutes to complete.

                            -

                            SuperScan mode is a thorough and advanced scan that detects files that have been deleted long ago or lost due to formatting, corruption or damage. It takes longer to complete, but it can find more files and recover them with their original names and paths.

                            -
                          3. What is the difference between Active Scan and LastChance methods?
                          4. -

                            Active Scan method is a scanning method that detects files based on their file signatures. It can recognize over 150 file types, such as documents, photos, videos, music, archives, etc.

                            -

                            LastChance method is a scanning method that recovers files by their extensions when no other methods work. It is useful for recovering files from severely damaged or formatted partitions.

                            -
                          5. How can I preview files before recovering them?
                          6. -

                            You can preview files before recovering them by double-clicking on them in the list of files that can be recovered. A preview window will open and show you the content of the file.

                            -
                          7. How can I filter and search files by name, size, date, etc.?
                          8. How can I filter and search files by name, size, date, etc.?

                            -

                            You can filter and search files by name, size, date, etc. by using the options on the toolbar of the program. You can click on the "Filter" button to open a window where you can specify the criteria for filtering files, such as file name, extension, size range, date range, attributes, etc. You can also click on the "Search" button to open a window where you can enter a keyword or a phrase to search for files that contain it.

                            -
                          9. How can I save recovered files to a new location of my choice?
                          10. -

                            You can save recovered files to a new location of your choice by selecting the files that you want to recover and clicking on the "Recover" button. A window will open where you can browse and select a destination folder where you want to save the recovered files. You can also create a new folder or rename an existing folder in this window. After choosing a destination folder, click on the "OK" button to start the recovery process.

                            -

                          b2dd77e56b
                          -
                          -
                          \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Crysis 3 Multiplayer _TOP_ Crack Blackbox.md b/spaces/tioseFevbu/cartoon-converter/scripts/Crysis 3 Multiplayer _TOP_ Crack Blackbox.md deleted file mode 100644 index df67afd9e9cffea40c534a44ad7acb60b4d91882..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Crysis 3 Multiplayer _TOP_ Crack Blackbox.md +++ /dev/null @@ -1,28 +0,0 @@ -
                          -

                          How to Play Crysis 3 Multiplayer with Black Box Repack

                          -

                          Crysis 3 is a sci-fi first-person shooter game that features stunning graphics and immersive gameplay. The game also has a multiplayer mode that allows players to compete in various game modes and maps. However, some players may not be able to access the multiplayer mode due to DRM restrictions or other issues.

                          -

                          Fortunately, there is a way to play Crysis 3 multiplayer with Black Box Repack, a group of game enthusiasts who create compressed and cracked versions of popular games. Black Box Repack has released a crack for Crysis 3 that enables multiplayer functionality without requiring a valid CD key or online activation. Here are the steps to follow:

                          -

                          crysis 3 multiplayer crack blackbox


                          Download Ziphttps://urlcod.com/2uHyEe



                          -
                            -
                          1. Download and install Crysis 3 from Black Box Repack's website[^3^]. Make sure you choose the language you want in the installer.
                          2. -
                          3. Download and extract the crack from MegaGames[^1^]. Copy the contents of the /Crack folder to your game installation directory, replacing the original files.
                          4. -
                          5. Use a firewall to block all game executables from going online. This will prevent the game from checking for updates or verifying your CD key.
                          6. -
                          7. Launch the game and enjoy multiplayer mode. You can join existing servers or create your own using the in-game options.
                          8. -
                          -

                          Note that this method may not work for all players, and it may violate the terms of service of Crysis 3 and EA. Use it at your own risk and discretion. If you like the game, please support the developers by buying the original version.

                          - -

                          Crysis 3 multiplayer mode offers eight different game modes and twelve maps to choose from. The game modes are:

                          -
                            -
                          • Team Deathmatch: Two teams of up to eight players each fight to get the most kills.
                          • -
                          • Crash Site: Two teams of up to eight players each fight to capture and hold alien drop pods that land randomly on the map.
                          • -
                          • Spear: Two teams of up to eight players each fight to hack and defend data terminals scattered around the map.
                          • -
                          • Hunter: Two teams of up to eight players each take turns being the hunters or the hunted. The hunters are cloaked and armed with bows, while the hunted are visible and armed with standard weapons.
                          • -
                          • Extraction: Two teams of up to four players each fight to extract alien cells from the map and bring them back to their base.
                          • -
                          • Assault: Two teams of up to four players each take turns attacking or defending a single objective on the map.
                          • -
                          • Capture the Relay: Two teams of up to four players each fight to capture and hold a relay device that spawns randomly on the map.
                          • -
                          • Cell vs. Rebel: Two teams of up to four players each fight as either the Cell corporation or the rebel forces in a series of missions that vary depending on the map.
                          • -
                          -

                          The maps are based on various locations from the single-player campaign, such as New York City, a hydroelectric dam, a jungle swamp, and a desert canyon. The maps also feature dynamic weather effects and environmental hazards that can affect the gameplay. For example, some maps have sandstorms that reduce visibility and damage vehicles, while others have floods that submerge parts of the map and create new routes.

                          -

                          Crysis 3 multiplayer mode also features a progression system that allows players to rank up and unlock new weapons, attachments, modules, suits, and dog tags. Players can customize their loadouts and nanosuits to suit their playstyle and preferences. The nanosuit has four modes: armor, cloak, power, and hack. Each mode has its own advantages and disadvantages, and can be switched on the fly during combat. The nanosuit also has a visor that can scan enemies, objectives, weapons, and vehicles.

                          e93f5a0c3f
                          -
                          -
                          \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Direct Ishq Malayalam Movie Torrent Download.md b/spaces/tioseFevbu/cartoon-converter/scripts/Direct Ishq Malayalam Movie Torrent Download.md deleted file mode 100644 index 638ff85e8317f3cce468a864fd2da68c019a26ce..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Direct Ishq Malayalam Movie Torrent Download.md +++ /dev/null @@ -1,25 +0,0 @@ -
                          -

                          How to Download Direct Ishq Malayalam Movie Torrent Safely and Easily

                          -

                          Direct Ishq is a 2023 Malayalam romantic comedy movie starring Prithviraj Sukumaran, Nithya Menen, and Bobby Simha. The movie is directed by Rajesh Nair and produced by Anil Ambalakkara. The movie revolves around the love triangle between a radio jockey, a singer, and a gangster.

                          -

                          Direct Ishq Malayalam Movie Torrent Download


                          Download Zip »»» https://urlcod.com/2uHxox



                          -

                          If you are looking for a way to download Direct Ishq Malayalam movie torrent, you might be disappointed to find out that most of the torrent sites are either blocked or have low-quality or fake torrents. However, there are still some reliable and safe ways to download Direct Ishq Malayalam movie torrent in 2023. Here are some of them:

                          -
                            -
                          • Torrentz2: Torrentz2 is a popular torrent search engine that indexes millions of torrents from various sources. You can use Torrentz2 to find Direct Ishq Malayalam movie torrent by typing the keyword in the search box. You will get a list of results with different torrent sites and magnet links. You can choose the one with the most seeders and leechers for faster and safer downloading.
                          • -
                          • A2movies.IN: A2movies.IN is a dedicated site for Malayalam movies torrents. You can find Direct Ishq Malayalam movie torrent on this site by clicking on the 2023 category and scrolling down the page. You will see the movie poster, name, size, and quality of the torrent. You can click on the download button to get the torrent file or the magnet link.
                          • -
                          • Zooqle: Zooqle is another torrent search engine that has a large database of movies, TV shows, games, music, and more. You can use Zooqle to find Direct Ishq Malayalam movie torrent by entering the keyword in the search box. You will get a list of results with different sources and formats. You can filter the results by quality, size, seeds, peers, and date.
                          • -
                          -

                          Before downloading Direct Ishq Malayalam movie torrent from any of these sites, make sure you use a VPN to hide your IP address and encrypt your traffic. This way, you can avoid any legal issues or cyber threats that might arise from torrenting. A VPN will also help you bypass any geo-restrictions or censorship that might prevent you from accessing some of these sites.

                          -

                          Some of the best VPNs for downloading Direct Ishq Malayalam movie torrent are NordVPN, Surfshark VPN, and PrivateVPN. These VPNs offer fast speeds, unlimited bandwidth, strong encryption, and multiple servers around the world. They also have a strict no-logs policy and a kill switch feature that will protect your privacy and security online.

                          -

                          With these tips, you can download Direct Ishq Malayalam movie torrent safely and easily in 2023. Enjoy watching this hilarious and romantic movie with your friends and family!

                          - -

                          Direct Ishq Malayalam movie torrent is not the only way to watch this movie online. You can also stream it on some of the popular OTT platforms that offer Malayalam movies. Some of these platforms are:

                          -

                          -
                            -
                          • Amazon Prime Video: Amazon Prime Video is one of the most popular streaming services in India and abroad. It has a huge collection of movies and TV shows in various languages, including Malayalam. You can watch Direct Ishq Malayalam movie on Amazon Prime Video with a subscription fee of Rs. 129 per month or Rs. 999 per year. You can also download the movie for offline viewing on your device.
                          • -
                          • Hotstar: Hotstar is another popular streaming service that offers a variety of content, including sports, news, entertainment, and more. It has a section for Malayalam movies where you can find Direct Ishq Malayalam movie. You can watch it with a subscription fee of Rs. 299 per month or Rs. 1499 per year. You can also download the movie for offline viewing on your device.
                          • -
                          • ZEE5: ZEE5 is a streaming service that offers content from Zee network and other partners. It has a section for Malayalam movies where you can find Direct Ishq Malayalam movie. You can watch it with a subscription fee of Rs. 99 per month or Rs. 999 per year. You can also download the movie for offline viewing on your device.
                          • -
                          -

                          These OTT platforms offer high-quality video and audio, as well as subtitles and dubbing options for different languages. They also have a user-friendly interface and multiple device compatibility. However, they might not be available in some regions or countries due to geo-restrictions or licensing issues. In that case, you can use a VPN to access them from anywhere in the world.

                          -

                          Direct Ishq Malayalam movie is a fun and entertaining movie that will make you laugh and fall in love. Whether you choose to download Direct Ishq Malayalam movie torrent or stream it online, you will not regret watching this movie. It has a great cast, a catchy soundtrack, and a hilarious plot. Don't miss this movie if you are a fan of Malayalam cinema!

                          81aa517590
                          -
                          -
                          \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Fatal String Manager Failed To Initialize Properly.md b/spaces/tioseFevbu/cartoon-converter/scripts/Fatal String Manager Failed To Initialize Properly.md deleted file mode 100644 index 186b2fbbda0ad3088d16c813cc9ece02ef18d4e2..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Fatal String Manager Failed To Initialize Properly.md +++ /dev/null @@ -1,47 +0,0 @@ - -

                          How to Fix Fatal String Manager Failed To Initialize Properly Error in Red Alert 2

                          -

                          Red Alert 2 is a classic real-time strategy game that was released in 2000 for Windows. However, many players have reported encountering an error message that says "Fatal String Manager Failed To Initialize Properly" when trying to launch the game on newer versions of Windows. This error prevents the game from running and can be very frustrating for fans of the game.

                          -

                          Fatal String Manager Failed To Initialize Properly


                          Download Zip ->->->-> https://urlcod.com/2uHxKw



                          -

                          Fortunately, there are some possible solutions that can help you fix this error and enjoy Red Alert 2 on your Windows PC. In this article, we will show you four methods that have worked for other users who faced the same problem. Follow the steps below and see which one works for you.

                          -

                          Method 1: Run Red Alert 2 in Compatibility Mode

                          -

                          One of the most common causes of this error is that Red Alert 2 is not compatible with newer versions of Windows. To fix this, you can try running the game in compatibility mode, which will make it run as if it was on an older version of Windows.

                          -

                          To run Red Alert 2 in compatibility mode, follow these steps:

                          -
                            -
                          1. Go to the game folder where you installed Red Alert 2.
                          2. -
                          3. Find the file named RA2.exe and right-click on it.
                          4. -
                          5. Select Properties from the context menu.
                          6. -
                          7. Go to the Compatibility tab in the pop-up window.
                          8. -
                          9. Tick the checkbox for Run this program in compatibility mode for and select Windows XP Service Pack 3 from the drop-down menu[^1^] [^2^].
                          10. -
                          11. Also, tick the checkbox for Run this program as an administrator[^1^] [^2^].
                          12. -
                          13. Click on Apply and OK to save the changes.
                          14. -
                          15. Do the same for all other .exe files in the game folder[^1^].
                          16. -
                          17. Launch the game and see if the error is gone.
                          18. -
                          -

                          Method 2: Tweak the Game Settings in Origin

                          -

                          If you downloaded Red Alert 2 from EA Origin, you can also try adjusting some settings in Origin to fix the error. Here's how:

                          -

                          -
                            -
                          1. Launch Origin on your PC and go to the My Games tab.
                          2. -
                          3. Select All games from the drop-down menu and right-click on Red Alert 2.
                          4. -
                          5. Select Properties from the context menu.
                          6. -
                          7. Type -Win in the text box that says Command-Line Arguments[^3^].
                          8. -
                          9. Select the checkbox for Disable Origin in Game for this game[^3^].
                          10. -
                          11. Click on Save to confirm the changes.
                          12. -
                          13. Restart the game and see if the error is gone.
                          14. -
                          -

                          Method 3: Adjust the Game's Resolution

                          -

                          Another possible reason why you get this error is that your screen resolution is too high for Red Alert 2, which was designed for lower resolution monitors. To fix this, you can try lowering your screen resolution or changing some settings in a configuration file. Here's how:

                          -
                            -
                          1. Go to your desktop and right-click on an empty space.
                          2. -
                          3. Select Display settings from the context menu.
                          4. -
                          5. In the Settings window, click on Advanced display settings.
                          6. -
                          7. In the next window, click on Display adapter properties for Display 1.
                          8. -
                          9. In the pop-up window, go to the Monitor tab and select a lower refresh rate from the drop-down menu under Screen refresh rate. For example, try 60 Hertz or lower[^4^].
                          10. -
                          11. Click on Apply and OK to save the changes.
                          12. -
                          13. If this doesn't work, go back to the game folder where you installed Red Alert 2.
                          14. -
                          15. Find a file named RA2.ini and open it with Notepad or any other text editor.
                          16. -
                          17. In the file, find a line that says ScreenWidth=800 and change it to ScreenWidth=1024 or lower[^4^].
                          18. -
                          19. Also, find a line that says ScreenHeight=600 and change it to ScreenHeight=768 or lower[^4^].
                          20. -
                          21. Save and close

                            7196e7f11a
                            -
                            -
                            \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/God Se Apteek Pdf __LINK__.md b/spaces/tioseFevbu/cartoon-converter/scripts/God Se Apteek Pdf __LINK__.md deleted file mode 100644 index b90e811706fccc571bc9842b047648d8b6bb1cbf..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/God Se Apteek Pdf __LINK__.md +++ /dev/null @@ -1,16 +0,0 @@ - -Here is a possible title and article with HTML formatting for the keyword "God Se Apteek Pdf": - -

                            God Se Apteek: A Book Series on Natural Healing by Herman Uys

                            -

                            God Se Apteek is a series of books by Herman Uys, a South African author and natural health practitioner. The title means "God's Pharmacy" in Afrikaans, and the books explore how God has created everything we need in nature to heal ourselves from various diseases and ailments.

                            -

                            The first book, God Se Apteek: Jy is wat jy eet, en jy is wat jy nie eet nie, was published in 2004 and focuses on the importance of nutrition and diet for health. Uys explains how most people are killing themselves with a fork and digging their own graves with their teeth, by eating foods that are harmful to their bodies. He also provides a list of foods that are beneficial for different blood types and health conditions.

                            -

                            God Se Apteek Pdf


                            Download Zip 🔗 https://urlcod.com/2uHwQl



                            -

                            The second book, God Se Apteek - Deel 2: Eet jouself gesond, was published in 2008 and is a follow-up to the first one. In this book, Uys discusses a variety of diseases and ailments, with a natural treatment for each one. He also lists various herbs, oils and other products that have healing properties and recommended uses. He emphasizes that God has not allowed any sickness or ailment on this earth without giving us an effective remedy or medicine in the form of plants, vegetables or fruits.

                            -

                            The books are written in Afrikaans, but they have been translated into English and other languages. They are popular among people who are interested in natural health and alternative medicine. They are also based on Uys' own experience and research, as well as his Christian faith.

                            -

                            If you want to learn more about God Se Apteek and how to use nature's gifts for your health, you can download the PDF versions of the books from various online sources[^2^] [^3^]. You can also buy the paperback editions from Amazon[^1^] [^4^] or other bookstores.

                            Here are a few more paragraphs for the article: - -

                            One of the main themes of God Se Apteek is that God has designed our bodies to heal themselves, if we give them the right fuel and care. Uys believes that many modern diseases are caused by poor nutrition, stress, toxins and lack of exercise. He argues that by following a natural and balanced diet, we can prevent and reverse many chronic and degenerative conditions, such as diabetes, arthritis, cancer and heart disease.

                            -

                            Another theme of God Se Apteek is that God has given us a rich and diverse variety of plants and animals that have medicinal and nutritional value. Uys shares his knowledge and wisdom on how to use these natural resources for our benefit. He covers topics such as herbs, spices, fruits, vegetables, grains, nuts, seeds, oils, honey, vinegar, salt, water, milk, eggs, meat, fish and more. He explains how each of these foods can help us with specific health issues and how to prepare them in simple and delicious ways.

                            -

                            A third theme of God Se Apteek is that God has a plan and a purpose for our lives and that He wants us to live in harmony with Him, ourselves and nature. Uys encourages his readers to trust in God's love and power and to seek His guidance and will for their health and well-being. He also urges them to respect and protect the environment and the animals that God has created. He believes that by living according to God's principles and laws, we can enjoy a long and happy life.

                            7196e7f11a
                            -
                            -
                            \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Icc Profil Euroscale Coated V2 PATCHED.md b/spaces/tioseFevbu/cartoon-converter/scripts/Icc Profil Euroscale Coated V2 PATCHED.md deleted file mode 100644 index 2e3e11bd5c7e939add26c051635c29bfbcc0f605..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Icc Profil Euroscale Coated V2 PATCHED.md +++ /dev/null @@ -1,26 +0,0 @@ -
                            -

                            What is Icc Profil Euroscale Coated V2 and how to use it?

                            -

                            Icc Profil Euroscale Coated V2 is an ICC profile that defines the color space for offset printing on coated paper in Europe. ICC profiles are standardized files that describe how colors are reproduced on different devices, such as monitors, scanners, printers, and cameras. ICC profiles help to ensure color consistency and accuracy across different media and workflows.

                            -

                            Icc Profil Euroscale Coated V2 is based on the ISO 12647-2:2013 standard, which specifies the requirements for printing conditions and quality control for half-tone color separations, proof and production prints. The profile was developed by the European Color Initiative (ECI), a group of experts from the graphic arts industry who aim to improve color communication and quality in digital media.

                            -

                            Icc Profil Euroscale Coated V2


                            Downloadhttps://urlcod.com/2uHwsM



                            -

                            To use Icc Profil Euroscale Coated V2, you need to have a compatible software that supports color management, such as Adobe Photoshop, Illustrator, InDesign, or Acrobat. You also need to have a calibrated monitor and a proofing device that can simulate the printing conditions of the profile. You can download the profile from the ECI website[^1^], along with other useful resources such as documentation, test files, and tools.

                            -

                            When you open or create a document in your software, you can assign or convert it to Icc Profil Euroscale Coated V2. This will ensure that the colors in your document match the colors that will be printed on coated paper. You can also preview how your document will look like on paper by using the View -> Proof Colors option in your software. You can also print a proof using a printer that is calibrated to Icc Profil Euroscale Coated V2.

                            -

                            When you are ready to send your document to a printer or a service provider, you can export it as a PDF file with Icc Profil Euroscale Coated V2 embedded. This will preserve the color information of your document and allow the printer to reproduce it accurately. You can also include other settings such as resolution, compression, fonts, and marks in your PDF file.

                            -

                            Icc Profil Euroscale Coated V2 is a widely used and accepted standard for offset printing on coated paper in Europe. By using this profile, you can achieve high-quality and consistent color results for your print projects.

                            - -

                            What are the benefits of using ICC profiles? ICC profiles have many benefits for color management and quality control. Some of the benefits are:

                            -
                              -
                            • ICC profiles can help you achieve consistent and accurate colors across different devices and media. For example, you can use ICC profiles to ensure that the colors on your monitor match the colors on your printer or on a web browser.
                            • -
                            • ICC profiles can help you optimize your workflow and save time and money. For example, you can use ICC profiles to reduce the need for trial and error printing or color adjustments. You can also use ICC profiles to avoid color mismatches or errors that may result in reprints or customer complaints.
                            • -
                            • ICC profiles can help you enhance your creativity and expression. For example, you can use ICC profiles to explore different color spaces and rendering intents that suit your artistic vision. You can also use ICC profiles to simulate different printing conditions and effects.
                            • -
                            -

                            How do I edit ICC profiles in Photoshop? Photoshop is a powerful software that allows you to edit ICC profiles and apply them to your images. You can edit ICC profiles in Photoshop by following these steps:

                            -
                              -
                            1. Open your image in Photoshop and go to Edit -> Assign Profile. This will show you the current ICC profile of your image. You can choose to keep it or change it to another profile.
                            2. -
                            3. If you want to change the profile, select a different profile from the Profile drop-down menu. You can choose from the built-in profiles or browse for a custom profile on your computer. Click OK to apply the new profile.
                            4. -
                            5. If you want to edit the profile, click on Edit -> Convert to Profile. This will open a dialog box where you can adjust various settings of the profile, such as the destination space, the rendering intent, the black point compensation, and the dithering. Click OK to convert your image to the new profile.
                            6. -
                            7. If you want to save the profile, go to Edit -> Save As and choose Photoshop PDF as the format. Make sure that Embed Color Profile is checked and click Save. This will save your image as a PDF file with the embedded profile.
                            8. -

                            -

                            e93f5a0c3f
                            -
                            -
                            \ No newline at end of file diff --git a/spaces/tj5miniop/distilgpt2/app.py b/spaces/tj5miniop/distilgpt2/app.py deleted file mode 100644 index cca0468441e06f4b9ea92cb2bc6b42fe70dbe22e..0000000000000000000000000000000000000000 --- a/spaces/tj5miniop/distilgpt2/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/distilgpt2").launch() \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/operations/build/__init__.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/operations/build/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/tomofi/ABINet-OCR/modules/model_vision.py b/spaces/tomofi/ABINet-OCR/modules/model_vision.py deleted file mode 100644 index feb5a1112bf8b40d5a7ea492ab125d1ccacd4df7..0000000000000000000000000000000000000000 --- a/spaces/tomofi/ABINet-OCR/modules/model_vision.py +++ /dev/null @@ -1,47 +0,0 @@ -import logging -import torch.nn as nn -from fastai.vision import * - -from modules.attention import * -from modules.backbone import ResTranformer -from modules.model import Model -from modules.resnet import resnet45 - - -class BaseVision(Model): - def __init__(self, config): - super().__init__(config) - self.loss_weight = ifnone(config.model_vision_loss_weight, 1.0) - self.out_channels = ifnone(config.model_vision_d_model, 512) - - if config.model_vision_backbone == 'transformer': - self.backbone = ResTranformer(config) - else: self.backbone = resnet45() - - if config.model_vision_attention == 'position': - mode = ifnone(config.model_vision_attention_mode, 'nearest') - self.attention = PositionAttention( - max_length=config.dataset_max_length + 1, # additional stop token - mode=mode, - ) - elif config.model_vision_attention == 'attention': - self.attention = Attention( - max_length=config.dataset_max_length + 1, # additional stop token - n_feature=8*32, - ) - else: - raise Exception(f'{config.model_vision_attention} is not valid.') - self.cls = nn.Linear(self.out_channels, self.charset.num_classes) - - if config.model_vision_checkpoint is not None: - logging.info(f'Read vision model from {config.model_vision_checkpoint}.') - self.load(config.model_vision_checkpoint) - - def forward(self, images, *args): - features = self.backbone(images) # (N, E, H, W) - attn_vecs, attn_scores = self.attention(features) # (N, T, E), (N, T, H, W) - logits = self.cls(attn_vecs) # (N, T, C) - pt_lengths = self._get_length(logits) - - return {'feature': attn_vecs, 'logits': logits, 'pt_lengths': pt_lengths, - 'attn_scores': attn_scores, 'loss_weight':self.loss_weight, 'name': 'vision'} diff --git a/spaces/tomofi/MMOCR/demo/ner_demo.py b/spaces/tomofi/MMOCR/demo/ner_demo.py deleted file mode 100644 index 113d4e31bf0d98a6835e37a01d9f96425ee59440..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/demo/ner_demo.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from argparse import ArgumentParser - -from mmocr.apis import init_detector -from mmocr.apis.inference import text_model_inference -from mmocr.datasets import build_dataset # NOQA -from mmocr.models import build_detector # NOQA - - -def main(): - parser = ArgumentParser() - parser.add_argument('config', help='Config file.') - parser.add_argument('checkpoint', help='Checkpoint file.') - parser.add_argument( - '--device', default='cuda:0', help='Device used for inference.') - args = parser.parse_args() - - # build the model from a config file and a checkpoint file - model = init_detector(args.config, args.checkpoint, device=args.device) - - # test a single text - input_sentence = input('Please enter a sentence you want to test: ') - result = text_model_inference(model, input_sentence) - - # show the results - for pred_entities in result: - for entity in pred_entities: - print(f'{entity[0]}: {input_sentence[entity[1]:entity[2] + 1]}') - - -if __name__ == '__main__': - main() diff --git a/spaces/tomofi/MMOCR/tests/test_dataset/test_transform_wrappers.py b/spaces/tomofi/MMOCR/tests/test_dataset/test_transform_wrappers.py deleted file mode 100644 index 4639ed3a86184e9a793fb4be39b5e07e7dea1df2..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/tests/test_dataset/test_transform_wrappers.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import unittest.mock as mock - -import numpy as np -import pytest - -from mmocr.datasets.pipelines import (OneOfWrapper, RandomWrapper, - TorchVisionWrapper) -from mmocr.datasets.pipelines.transforms import ColorJitter - - -def test_torchvision_wrapper(): - x = {'img': np.ones((128, 100, 3), dtype=np.uint8)} - # object not found error - with pytest.raises(Exception): - TorchVisionWrapper(op='NonExist') - with pytest.raises(TypeError): - TorchVisionWrapper() - f = TorchVisionWrapper('Grayscale') - with pytest.raises(AssertionError): - f({}) - results = f(x) - assert results['img'].shape == (128, 100) - assert results['img_shape'] == (128, 100) - - -@mock.patch('random.choice') -def test_oneof(rand_choice): - color_jitter = dict(type='TorchVisionWrapper', op='ColorJitter') - gray_scale = dict(type='TorchVisionWrapper', op='Grayscale') - x = {'img': np.random.randint(0, 256, size=(128, 100, 3), dtype=np.uint8)} - f = OneOfWrapper([color_jitter, gray_scale]) - # Use color_jitter at the first call - rand_choice.side_effect = lambda x: x[0] - results = f(x) - assert results['img'].shape == (128, 100, 3) - # Use gray_scale at the second call - rand_choice.side_effect = lambda x: x[1] - results = f(x) - assert results['img'].shape == (128, 100) - - # Passing object - f = OneOfWrapper([ColorJitter(), gray_scale]) - # Use color_jitter at the first call - results = f(x) - assert results['img'].shape == (128, 100) - - # Test invalid inputs - with pytest.raises(AssertionError): - f = OneOfWrapper(None) - with pytest.raises(AssertionError): - f = OneOfWrapper([]) - with pytest.raises(AssertionError): - f = OneOfWrapper({}) - - -@mock.patch('numpy.random.uniform') -def test_runwithprob(np_random_uniform): - np_random_uniform.side_effect = [0.1, 0.9] - f = RandomWrapper([dict(type='TorchVisionWrapper', op='Grayscale')], 0.5) - img = np.random.randint(0, 256, size=(128, 100, 3), dtype=np.uint8) - results = f({'img': copy.deepcopy(img)}) - assert results['img'].shape == (128, 100) - results = f({'img': copy.deepcopy(img)}) - assert results['img'].shape == (128, 100, 3) diff --git a/spaces/tomzhang1019/ChatGPT/chatgpt - windows.bat b/spaces/tomzhang1019/ChatGPT/chatgpt - windows.bat deleted file mode 100644 index 0b78fdc3a559abd692e3a9e9af5e482124d13a99..0000000000000000000000000000000000000000 --- a/spaces/tomzhang1019/ChatGPT/chatgpt - windows.bat +++ /dev/null @@ -1,14 +0,0 @@ -@echo off -echo Opening ChuanhuChatGPT... - -REM Open powershell via bat -start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py" - -REM The web page can be accessed with delayed start http://127.0.0.1:7860/ -ping -n 5 127.0.0.1>nul - -REM access chargpt via your default browser -start "" "http://127.0.0.1:7860/" - - -echo Finished opening ChuanhuChatGPT (http://127.0.0.1:7860/). \ No newline at end of file diff --git a/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/data/base.py b/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/data/base.py deleted file mode 100644 index b196c2f7aa583a3e8bc4aad9f943df0c4dae0da7..0000000000000000000000000000000000000000 --- a/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/data/base.py +++ /dev/null @@ -1,23 +0,0 @@ -from abc import abstractmethod -from torch.utils.data import Dataset, ConcatDataset, ChainDataset, IterableDataset - - -class Txt2ImgIterableBaseDataset(IterableDataset): - ''' - Define an interface to make the IterableDatasets for text2img data chainable - ''' - def __init__(self, num_records=0, valid_ids=None, size=256): - super().__init__() - self.num_records = num_records - self.valid_ids = valid_ids - self.sample_ids = valid_ids - self.size = size - - print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.') - - def __len__(self): - return self.num_records - - @abstractmethod - def __iter__(self): - pass \ No newline at end of file diff --git a/spaces/tracinginsights/F1-analysis/pages/Tyre_Strategy.py b/spaces/tracinginsights/F1-analysis/pages/Tyre_Strategy.py deleted file mode 100644 index 50ca4d5e4f77497ae829f9458de2a66da7593e8c..0000000000000000000000000000000000000000 --- a/spaces/tracinginsights/F1-analysis/pages/Tyre_Strategy.py +++ /dev/null @@ -1,18 +0,0 @@ -import streamlit as st -from repo_directory import Tyre_Strategy -from repo_directory import button - - -YEAR_SELECTED = st.selectbox( - 'Select Year', - (2023, 2022, 2021, 2020, 2019, 2018)) - - -RACE_SELECTED = st.selectbox( - 'Select Race', - (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23)) - - -driver_stints, drivers_list, test, last_row, f1session = Tyre_Strategy.get_data(YEAR_SELECTED, RACE_SELECTED) - -Tyre_Strategy.plot(driver_stints,drivers_list, test, last_row, f1session ) \ No newline at end of file diff --git a/spaces/triggah61/chingu-music/tests/modules/test_seanet.py b/spaces/triggah61/chingu-music/tests/modules/test_seanet.py deleted file mode 100644 index e5c51b340a2f94fb2828b14daf83d5fad645073d..0000000000000000000000000000000000000000 --- a/spaces/triggah61/chingu-music/tests/modules/test_seanet.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from itertools import product - -import pytest -import torch - -from audiocraft.modules.seanet import SEANetEncoder, SEANetDecoder, SEANetResnetBlock -from audiocraft.modules import StreamableConv1d, StreamableConvTranspose1d - - -class TestSEANetModel: - - def test_base(self): - encoder = SEANetEncoder() - decoder = SEANetDecoder() - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_causal(self): - encoder = SEANetEncoder(causal=True) - decoder = SEANetDecoder(causal=True) - x = torch.randn(1, 1, 24000) - - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_conv_skip_connection(self): - encoder = SEANetEncoder(true_skip=False) - decoder = SEANetDecoder(true_skip=False) - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_seanet_encoder_decoder_final_act(self): - encoder = SEANetEncoder(true_skip=False) - decoder = SEANetDecoder(true_skip=False, final_activation='Tanh') - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def _check_encoder_blocks_norm(self, encoder: SEANetEncoder, n_disable_blocks: int, norm: str): - n_blocks = 0 - for layer in encoder.model: - if isinstance(layer, StreamableConv1d): - n_blocks += 1 - assert layer.conv.norm_type == 'none' if n_blocks <= n_disable_blocks else norm - elif isinstance(layer, SEANetResnetBlock): - for resnet_layer in layer.block: - if isinstance(resnet_layer, StreamableConv1d): - # here we add + 1 to n_blocks as we increment n_blocks just after the block - assert resnet_layer.conv.norm_type == 'none' if (n_blocks + 1) <= n_disable_blocks else norm - - def test_encoder_disable_norm(self): - n_residuals = [0, 1, 3] - disable_blocks = [0, 1, 2, 3, 4, 5, 6] - norms = ['weight_norm', 'none'] - for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms): - encoder = SEANetEncoder(n_residual_layers=n_res, norm=norm, - disable_norm_outer_blocks=disable_blocks) - self._check_encoder_blocks_norm(encoder, disable_blocks, norm) - - def _check_decoder_blocks_norm(self, decoder: SEANetDecoder, n_disable_blocks: int, norm: str): - n_blocks = 0 - for layer in decoder.model: - if isinstance(layer, StreamableConv1d): - n_blocks += 1 - assert layer.conv.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - elif isinstance(layer, StreamableConvTranspose1d): - n_blocks += 1 - assert layer.convtr.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - elif isinstance(layer, SEANetResnetBlock): - for resnet_layer in layer.block: - if isinstance(resnet_layer, StreamableConv1d): - assert resnet_layer.conv.norm_type == 'none' \ - if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - - def test_decoder_disable_norm(self): - n_residuals = [0, 1, 3] - disable_blocks = [0, 1, 2, 3, 4, 5, 6] - norms = ['weight_norm', 'none'] - for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms): - decoder = SEANetDecoder(n_residual_layers=n_res, norm=norm, - disable_norm_outer_blocks=disable_blocks) - self._check_decoder_blocks_norm(decoder, disable_blocks, norm) - - def test_disable_norm_raises_exception(self): - # Invalid disable_norm_outer_blocks values raise exceptions - with pytest.raises(AssertionError): - SEANetEncoder(disable_norm_outer_blocks=-1) - - with pytest.raises(AssertionError): - SEANetEncoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7) - - with pytest.raises(AssertionError): - SEANetDecoder(disable_norm_outer_blocks=-1) - - with pytest.raises(AssertionError): - SEANetDecoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7) diff --git a/spaces/tsi-org/LLaVA/README.md b/spaces/tsi-org/LLaVA/README.md deleted file mode 100644 index 1c9bfa43a7f646228ab2ea6a6107df231bf15d55..0000000000000000000000000000000000000000 --- a/spaces/tsi-org/LLaVA/README.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: LLaVA -emoji: 🔥 -colorFrom: purple -colorTo: gray -sdk: gradio -sdk_version: 3.36.1 -app_port: 7860 ---- \ No newline at end of file diff --git a/spaces/uSerNameDDHL/bingo/src/components/toaster.tsx b/spaces/uSerNameDDHL/bingo/src/components/toaster.tsx deleted file mode 100644 index 4d2693460b61307a1d4c127fd01df9bee16e59ff..0000000000000000000000000000000000000000 --- a/spaces/uSerNameDDHL/bingo/src/components/toaster.tsx +++ /dev/null @@ -1,3 +0,0 @@ -'use client' - -export { Toaster } from 'react-hot-toast' diff --git a/spaces/ucinlp/autoprompt/scripts/run_fact_retrieval_example.sh b/spaces/ucinlp/autoprompt/scripts/run_fact_retrieval_example.sh deleted file mode 100644 index bc9170bcffb367161142be324975a23f3b7b6d1b..0000000000000000000000000000000000000000 --- a/spaces/ucinlp/autoprompt/scripts/run_fact_retrieval_example.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# Experiment 8 -# Task: fact retrieval -# Model: RoBERTa -# Batch sizes: 56 -# Iters: 1000 -# Filtering: True - -datadir=$1 -logfile=$2 - -# Clear files -cat /dev/null > $logfile -cat /dev/null > ${logfile}.log - -for path in $datadir/*; do - filename=$(basename "$path") - time CUDA_VISIBLE_DEVICES=3 python -m autoprompt.create_trigger \ - --train $path/train.jsonl \ - --dev $path/dev.jsonl \ - --template ' {sub_label} [T] [T] [T] [T] [T] [P] . ' \ - --num-cand 10 \ - --accumulation-steps 1 \ - --model-name roberta-large \ - --bsz 56 \ - --eval-size 56 \ - --iters 1000 \ - --label-field 'obj_label' \ - --tokenize-labels \ - --filter \ - --print-lama >> $logfile 2>> ${logfile}.log -done diff --git a/spaces/uih-zyn/runwayml-stable-diffusion-v1-5/app.py b/spaces/uih-zyn/runwayml-stable-diffusion-v1-5/app.py deleted file mode 100644 index a82df332731f067826d3e1ef79fabceffb74d07e..0000000000000000000000000000000000000000 --- a/spaces/uih-zyn/runwayml-stable-diffusion-v1-5/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/runwayml/stable-diffusion-v1-5").launch() \ No newline at end of file diff --git a/spaces/uragankatrrin/MHN-React/mhnreact/utils.py b/spaces/uragankatrrin/MHN-React/mhnreact/utils.py deleted file mode 100644 index 9ce70d6f46b26157c9992b5853cd8c210f4f8e93..0000000000000000000000000000000000000000 --- a/spaces/uragankatrrin/MHN-React/mhnreact/utils.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Author: Philipp Seidl - ELLIS Unit Linz, LIT AI Lab, Institute for Machine Learning - Johannes Kepler University Linz -Contact: seidl@ml.jku.at - -General utility functions -""" - -import argparse -from collections import defaultdict -import numpy as np -import pandas as pd -import math -import torch - -# used and fastest version -def top_k_accuracy(y_true, y_pred, k=5, ret_arocc=False, ret_mrocc=False, verbose=False, count_equal_as_correct=False, eps_noise=0): - """ partly from http://stephantul.github.io/python/pytorch/2020/09/18/fast_topk/ - count_equal counts equal values as beein a correct choice e.g. all preds = 0 --> T1acc = 1 - ret_mrocc ... also return median rank of correct choice - eps_noise ... if >0 ads noise*eps to y_pred .. recommended e.g. 1e-10 - """ - if eps_noise>0: - if torch.is_tensor(y_pred): - y_pred = y_pred + torch.rand(y_pred.shape)*eps_noise - else: - y_pred = y_pred + np.random.rand(*y_pred.shape)*eps_noise - - if count_equal_as_correct: - greater = (y_pred > y_pred[range(len(y_pred)), y_true][:,None]).sum(1) # how many are bigger - else: - greater = (y_pred >= y_pred[range(len(y_pred)), y_true][:,None]).sum(1) # how many are bigger or equal - if torch.is_tensor(y_pred): - greater = greater.long() - if isinstance(k, int): k = [k] # pack it into a list - tkaccs = [] - for ki in k: - if count_equal_as_correct: - tkacc = (greater<=(ki-1)) - else: - tkacc = (greater<=(ki)) - if torch.is_tensor(y_pred): - tkacc = tkacc.float().mean().detach().cpu().numpy() - else: - tkacc = tkacc.mean() - tkaccs.append(tkacc) - if verbose: print('Top', ki, 'acc:\t', str(tkacc)[:6]) - - if ret_arocc: - arocc = greater.float().mean()+1 - if torch.is_tensor(arocc): - arocc = arocc.detach().cpu().numpy() - return (tkaccs[0], arocc) if len(tkaccs) == 1 else (tkaccs, arocc) - if ret_mrocc: - mrocc = greater.median()+1 - if torch.is_tensor(mrocc): - mrocc = mrocc.float().detach().cpu().numpy() - return (tkaccs[0], mrocc) if len(tkaccs) == 1 else (tkaccs, mrocc) - - - return tkaccs[0] if len(tkaccs) == 1 else tkaccs - - -def seed_everything(seed=70135): - """ does what it says ;) - from https://gist.github.com/KirillVladimirov/005ec7f762293d2321385580d3dbe335""" - import numpy as np - import random - import os - import torch - - random.seed(seed) - os.environ['PYTHONHASHSEED'] = str(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - torch.backends.cudnn.deterministic = True - -def get_best_gpu(): - '''Get the gpu with most RAM on the machine. From P. Neves''' - import torch - if torch.cuda.is_available(): - gpus_ram = [] - for ind in range(torch.cuda.device_count()): - gpus_ram.append(torch.cuda.get_device_properties(ind).total_memory/1e9) - return f"cuda:{gpus_ram.index(max(gpus_ram))}" - else: - raise ValueError("No gpus were detected in this machine.") - - -def sort_by_template_and_flatten(template_scores, prod_idx_reactants, agglo_fun=sum): - flat_results = [] - for ii in range(len(template_scores)): - idx_prod_reactants = defaultdict(list) - for k,v in prod_idx_reactants[ii].items(): - for iv in v: - idx_prod_reactants[iv].append(template_scores[ii,k]) - d2 = {k: agglo_fun(v) for k, v in idx_prod_reactants.items()} - if len(d2)==0: - flat_results.append([]) - else: - flat_results.append(pd.DataFrame.from_dict(d2, orient='index').sort_values(0, ascending=False).index.values.tolist()) - return flat_results - - -def str2bool(v): - """adapted from https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse""" - if isinstance(v, bool): - return v - if v.lower() in ('yes', 'true', 't', 'y', '1', '',' '): - return True - elif v.lower() in ('no', 'false', 'f', 'n', '0'): - return False - else: - raise argparse.ArgumentTypeError('Boolean value expected.') - - -@np.vectorize -def lgamma(x): - return math.lgamma(x) - -def multinom_gk(array, axis=0): - """Multinomial lgamma pooling over a given axis""" - res = lgamma(np.sum(array,axis=axis)+2) - np.sum(lgamma(array+1),axis=axis) - return res \ No newline at end of file diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Aqua Data Studio Pro 20.6 Crack A Powerful and Reliable Solution for Database Administration.md b/spaces/usbethFlerru/sovits-modelsV2/example/Aqua Data Studio Pro 20.6 Crack A Powerful and Reliable Solution for Database Administration.md deleted file mode 100644 index 62a6b8f30a3db2bbcacdd3e72bf6748f51fa4f28..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Aqua Data Studio Pro 20.6 Crack A Powerful and Reliable Solution for Database Administration.md +++ /dev/null @@ -1,6 +0,0 @@ - -

                            Today, there are more ways to take photos of the underwater world than anyone could have imagined at the start of the millennia, thanks to ever-improving designs for aquatic cameras. On one hand, they have provided illuminating views of life in the seas. But on the other hand, these devices have inundated marine biologists with mountains of visual data that have become incredibly tedious and time-consuming to sort through.

                            -

                            Aqua Data Studio Pro 20.6 Crack


                            DOWNLOAD ····· https://urlcod.com/2uyXcy



                            -

                            ABAQUS_V6.4-1_LINUX-LND
                            Abb.robotstudio v3.1
                            Ableton.Live.v5.0.1
                            Adapt PT 7.0
                            AdaptSoft Adapt Builder 1.5.4
                            Adams Practice
                            ACCELRYS_MATERIALS_STUDIO_V3.2
                            AceCAD StruCAD v9R2 Win9xNT2K
                            AceCad StruCad v10.0 Manuals
                            ACCELRYS_MATERIALS_STUDIO_V3.2
                            ADINA_SYSTEM_V8.1_ISO-LND
                            ALTAIR.HYPERWORKS.V7.0.SP1
                            Altera Max Plus Ii v10.2-Ela
                            Alias.Spoolgen.v5.0
                            Allen Bradley RsLinx RsLogix 500
                            Allen Bradley Rslogix 5000 v13 01
                            Allplan2005
                            ALGOR_V15.0_ISO-LND
                            ALGOR_V16.0_ISO-LND
                            Algor.Pipepack.v7.04
                            Altera.Quartus.II.v5.0
                            Amtec Tecplot 10.0.4
                            Amira 3.1
                            AMIABLE_FLEXISIGN_PRO_V7.5v5
                            AMTECH.PRODESIGN.NEC.v9.2.5
                            ANALYTICAL GRAPHICS STK PRO.V6.1
                            ansa.v11.3.5-lnd
                            Ansys LS-DYNA 9.60
                            ANSYS V9.0
                            ANSYS.MULTIPHYSICS.V10.0
                            ANSYS.ICEM.CFD.V10.0
                            ANSYS_PARAMESH_V3.0_ISO-LND
                            Ansoft Hfss v9.2.iso
                            Ansoft Siwave 2.0
                            Ansoft RMXprt v5.0
                            Applied Flow Technology Arrow v3.0
                            Applied Flow Technology -impulse 3.0
                            Applied.Flow.Technology._Fathom.v6.0
                            ARCPAD 6.0.3
                            Archicad Abvont Artlantis 4.5
                            archline xp.rar
                            Arcview 9 3CD
                            Arcsde 9
                            ArcIMS9.0
                            Arcgis.Engine.9.iso
                            Arcgis 9 Desktop Developer Kit.ISO
                            Artcam 8.0
                            ArtiCAD V10
                            Articulate.Presenter.Professional.Edition.v4.105
                            ARM.REALVIEW.DEVELOPER SUITE v2.0
                            Aspen PIMS 2004
                            AspenTech Aspen ICARUS Products v12.0
                            Aspen-Tech B-jac 12.0
                            Aspen Engineering Suite 11.1 CD1
                            Aspen Engineering Suite 11.1 CD2
                            ATIR.STRAP.V11.5
                            Aurelon Signalize v5.6.3 /5.6.2
                            AVID_SOFTIMAGE_XSI_V4.2
                            Avid Xpress Pro 4.6.1 ISO
                            Automotive Expert V7.33 with crack
                            AUTOMATION STUDIO 5 FULLY CRACKED
                            Automation Studio 5.0.Multilingual
                            Autodesk Architectvrd Desktop 2006
                            Autodesk land desktop2005
                            Autodesk Mechanical Desktop 2006
                            AUTODESK.MAP.3D.2005
                            Autodesk.Map.3D.2006
                            Autodesk civil 3D v2005
                            Autodesk civil 3D v2006 2CD
                            Autodesk survey2005
                            Autodesk Revit Series v6.1
                            Autodesk Viz 2005
                            Autodesk Viz 2006
                            Autodesk AutoCAD 2006
                            Autodesk Autocad Electrical 2006
                            Autodesk.Autocad.Land.Desktop.2005.2CD
                            AUTODESK_AUTOCAD_MECHANICAL_Desktop V2006_ISO-LND
                            Betnina Artista 4
                            BENTLEY GEOPAK Civil 2004
                            BENTLEY GEOPAK Rebar 2004
                            BENTLEY.Microstation.v8.05.02.35
                            BENTLEY.MX.v8.05.02.02-SoS
                            Bentley.MX.2004.Edition-SoS
                            BLUE_RIDGE_NUMERICS_CFDESIGN_V7.0_ISO-LND
                            Broderbund 3D Home Architect Design Suite Deluxe v6.0
                            Brother.PE-Design.v6.0
                            BricsCad.Pro.v6.0.0012
                            BricsCad.Structural.Frames.v2.1.0003
                            BricsCad.Architecturals.v4.1.0015.for.BricsCad
                            BricsCad.Architecturals.v4.1.0015.for.AutoCAD
                            BOUJOU.THREE.V3.0-ISO
                            Borland.Together.for.Eclipse.v7
                            Borland Together for Visual Studio Net2.0
                            Borland DELPHI 2005 Professional 1-3CD
                            Borland DELPHI 2005 Professional 1-3CD
                            Borland JBuilder 2005 1CD
                            Borland JBuilder 2005 1CD
                            Borland C++ Builder Professional 2CD
                            Cadpipe2002demo(with crack)
                            Camworks 2003
                            CADSTAR v7.0
                            CAD - Ansoft RMXprt v5.0
                            Cafe Manilla v8.3.KG
                            CD-ADAPCO_STAR_CD_V3.24-LND
                            CEI_ENSIGHT_V7.6.6_GOLD-LND
                            CEI.ENSIGHT.GOLD.V8.0.5
                            Cfdesign 8.0
                            Chemcad v. 5.14 for Windows XP
                            Chemcad 5.2 Pro
                            Chem office Ultra 2004 v8.0
                            Chemkin 4.0 for Windows XP
                            Chempro v.6.31-0
                            ChessBase 9.0
                            Cimatron E 6.0 SP2
                            CIMCO.DNCMax.v4.40.09 & CIMCO.Edit.v4.40.09
                            Cimco.Edit.v4.40.0
                            COSMOS.DesignSTAR.v4.5.HAPPY.HOLIDAYS-SHOCK
                            COADE CADWORX_PIPE2004
                            COADE_CADWORX_PLANT_PROFESSIONAL_V2006
                            COADE_cadworx_pid2006
                            COADE_CADWORX_STEEL_V2006
                            COADE_CADWORX_EQUIPMENT_V2006
                            COADE CAESAR 4.50
                            COADE PVELITE2004
                            COADE TANK2.50
                            CSC.TEDDS.V8.0
                            CSI ETABS NL V8.5
                            CSI safe v8.0.1
                            CSI safe v8.0.6
                            CSI SAP2000 V9.03
                            Cypecad 2003
                            Chief Architect 9.5 Full
                            Chief Architect 10 Full (K&B Dk) Disk 1
                            Chief Architect 9.5 Premium Content CD1
                            Chief Architect 9.5 Premium Content CD2
                            OLGA2000 v4.13
                            DASSAULT.SYSTEMES.CATIA.P3.V5R15-MAGNiTUDE
                            Delcam_Artcam_v8
                            DELCAM POWERMILL V5.5 ISO
                            DIMSOLN_FOUNDATION_3D_V3.8.6
                            DIMSOLN_COMBINED_3D_V3.8.0
                            DIMS

                            aaccfb2cb3
                            -
                            -
                            \ No newline at end of file diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Discover the Secrets of Ancient Egypt with Robert Temple Egyptian Dawn PDF Download.md b/spaces/usbethFlerru/sovits-modelsV2/example/Discover the Secrets of Ancient Egypt with Robert Temple Egyptian Dawn PDF Download.md deleted file mode 100644 index 7cf31dc0e39dd5224d467402c76326f0b8e513b7..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Discover the Secrets of Ancient Egypt with Robert Temple Egyptian Dawn PDF Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

                            robert temple egyptian dawn pdf download


                            DOWNLOAD ✏ ✏ ✏ https://urlcod.com/2uyU9Y



                            -
                            - aaccfb2cb3
                            -
                            -
                            -

                            diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/hub/quickstart.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/hub/quickstart.md deleted file mode 100644 index 19480d27d8234ce677ded54d5e134bc8862bc0bf..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/hub/quickstart.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -comments: true ---- - -# 🚧 Page Under Construction ⚒ - -This page is currently under construction!️ 👷Please check back later for updates. 😃🔜 diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/v8/pose/train.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/v8/pose/train.md deleted file mode 100644 index 8c988fe7bb339d5c2ff0303e5eea9dfc1c029213..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/v8/pose/train.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -description: Boost posture detection using PoseTrainer and train models using train() API. Learn PoseLoss for ultra-fast and accurate pose detection with Ultralytics YOLO. -keywords: PoseTrainer, human pose models, deep learning, computer vision, Ultralytics YOLO, v8 ---- - -## PoseTrainer ---- -### ::: ultralytics.yolo.v8.pose.train.PoseTrainer -

                            - -## train ---- -### ::: ultralytics.yolo.v8.pose.train.train -

                            diff --git a/spaces/vanessbut/tldr_keywords/utils/utils.py b/spaces/vanessbut/tldr_keywords/utils/utils.py deleted file mode 100644 index 894dcb4864533fbce41064d819d860202a83681b..0000000000000000000000000000000000000000 --- a/spaces/vanessbut/tldr_keywords/utils/utils.py +++ /dev/null @@ -1,142 +0,0 @@ -import re -import numpy as np - -from sklearn.feature_extraction.text import CountVectorizer -from sklearn.metrics.pairwise import cosine_similarity -from sklearn.metrics.pairwise import euclidean_distances -from scipy.special import softmax - -def preprocess(strings): - """ - Заменить символы '\n' на пробелы и убрать лишние пробелы. - - strings - список строк. - """ - - for index in range(len(strings)): - strings[index] = strings[index].replace('\n', ' ') - strings[index] = re.sub(' +', ' ', strings[index]) - - return strings - - -def get_candidates(text, nlp, min_df=0.0, ngram_range=(1, 3), max_words=None): - """ - Получить список из max(max_words, #слов в text) кандидатов в ключевые слова. - - text - входной текст. - nlp - инструмент для анализа языка (см. spacy) - min_df - минимальная частота вхождения слова в текст. - ngram_range - число грам в ключевом слове. - max_words - максимальное число слов на выходе. - """ - - # Получим самый базовый набор грам. - count = CountVectorizer(ngram_range=ngram_range, - stop_words="english", - min_df=min_df, - max_features=max_words).fit([text]) - candidates = count.get_feature_names() - #print(candidates) - - # Обработаем полученный список. - nlp_result = nlp(text) - - # Фразы, содержащие существительные. - noun_phrases = set(chunk.text.strip().lower() for chunk in nlp_result.noun_chunks) - #print(noun_phrases) - - # Отдельно существительные. - noun_lemmas = set() - for token in nlp_result: - if token.pos_ == "NOUN": - noun_lemmas.add(token.lemma_) # Для одного слова всё-таки бессмысленно хранить форму. - print(noun_lemmas) - - #nouns = set() - #noun_lemmas = set() - - # Сначала составные слова. - #for token in nlp_result: - # if token.pos_ == "NOUN": - # noun_lemmas.add(token.lemma_) # Для одного слова всё-таки бессмысленно хранить форму. - # nouns.add(token.text) - #print(nouns) - nouns = noun_lemmas #nouns.union(noun_lemmas) - - # Объединение. - with_nouns = nouns.union(noun_phrases) - - # Отфильтровывание. - candidates = list(filter(lambda candidate: candidate in with_nouns, candidates)) - - return candidates - - -def get_embedding(texts, model, tokenizer, chunk_size=128): - """ - Перевести набор текстов в эмбеддинги. - """ - - n_chunks = len(texts) // chunk_size + int(len(texts) % chunk_size != 0) - embeddings = [] - - for chunk_index in range(n_chunks): - start = chunk_index * chunk_size - end = min(start + chunk_size, len(texts)) - chunk = texts[start:end] - - chunk_tokens = tokenizer(chunk, padding=True, truncation=True, return_tensors="pt") - chunk_embeddings = model(**chunk_tokens)["pooler_output"] - chunk_embeddings = chunk_embeddings.detach().numpy() - - embeddings.append(chunk_embeddings) - - embeddings = np.vstack(embeddings) - - return embeddings - - -def score_candidates(text, candidates, model, tokenizer): - """ - Ранжирование ключевых слов. - """ - - if len(candidates) == 1: - return np.array([1.0]) - elif len(candidates) == 0: - return np.array([]) - - # Эмбеддинг для текста. - text_embedding = get_embedding([text], model, tokenizer) - - # Эмбеддинг для ключевых слов. - candidate_embeddings = get_embedding(candidates, model, tokenizer) - - # Будем брать softmax от нормированных косинусных расстояний. - distances = cosine_similarity(text_embedding, candidate_embeddings) - score = softmax((distances - np.mean(distances)) / np.std(distances))[0] - - return score - - -def get_keywords(text, nlp, model, tokenizer, top=0.95, max_words=None): - try: - candidates = get_candidates(text, nlp) - score = score_candidates(text, candidates, model, tokenizer) - except Exception as ex: - return None - - candidates_scored = [(candidates[index], score[index]) for index in score.argsort()[::-1]] - - result = [] - sum_probability = 0.0 - max_words = len(candidates_scored) if max_words is None else min(len(candidates_scored), max_words) - for index in range(max_words): - if sum_probability > top: - break - - result.append(candidates_scored[index]) - sum_probability += candidates_scored[index][1] - - return result \ No newline at end of file diff --git a/spaces/varunkuntal/text2_img_text_demo/app.py b/spaces/varunkuntal/text2_img_text_demo/app.py deleted file mode 100644 index 52a6bf82931b67e7cd1e67b7b08091c774979a77..0000000000000000000000000000000000000000 --- a/spaces/varunkuntal/text2_img_text_demo/app.py +++ /dev/null @@ -1,53 +0,0 @@ -import gradio as gr -import requests -import json - -def txt2img1(text): - url = "https://stablediffusionapi.com/api/v3/text2img" - - payload = json.dumps({ - "key": "qXpCKAkXLdKWb2sqcMlMzB0Q3gqHEggJIXxspJzHIVHUy6H9S060RN0BNGqj", - "prompt": text, - "negative_prompt": None, - "width": "512", - "height": "512", - "samples": "1", - "num_inference_steps": "20", - "seed": None, - "guidance_scale": 7.5, - "safety_checker": "yes", - "multi_lingual": "no", - "panorama": "no", - "self_attention": "no", - "upscale": "no", - "embeddings_model": "embeddings_model_id", - "webhook": None, - "track_id": None - }) - - headers = { - 'Content-Type': 'application/json' - } - - response = requests.request("POST", url, headers=headers, data=payload) - response_dict = response.json() - - return response_dict['output'][0] - -def txt2txt(text): - API_TOKEN = "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" - API_URL = "https://api-inference.huggingface.co/models/gpt2" - headers = {"Authorization": f"Bearer {API_TOKEN}"} - - def query(payload): - response = requests.post(API_URL, headers=headers, json=payload) - return response.json() - - output = query({"inputs": text}) - return output[0]['generated_text'] - -iface = gr.Interface(fn=txt2img1, inputs="text", outputs="image", title="Text to Image") -iface.launch() - -iface2 = gr.Interface(fn=txt2txt, inputs="text", outputs="text", title="Text to Text") -iface2.launch() \ No newline at end of file diff --git a/spaces/vinay123/panoptic-segment-anything/GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py b/spaces/vinay123/panoptic-segment-anything/GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py deleted file mode 100644 index 9158d5f6260ec74bded95377d382387430d7cd70..0000000000000000000000000000000000000000 --- a/spaces/vinay123/panoptic-segment-anything/GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py +++ /dev/null @@ -1,43 +0,0 @@ -batch_size = 1 -modelname = "groundingdino" -backbone = "swin_T_224_1k" -position_embedding = "sine" -pe_temperatureH = 20 -pe_temperatureW = 20 -return_interm_indices = [1, 2, 3] -backbone_freeze_keywords = None -enc_layers = 6 -dec_layers = 6 -pre_norm = False -dim_feedforward = 2048 -hidden_dim = 256 -dropout = 0.0 -nheads = 8 -num_queries = 900 -query_dim = 4 -num_patterns = 0 -num_feature_levels = 4 -enc_n_points = 4 -dec_n_points = 4 -two_stage_type = "standard" -two_stage_bbox_embed_share = False -two_stage_class_embed_share = False -transformer_activation = "relu" -dec_pred_bbox_embed_share = True -dn_box_noise_scale = 1.0 -dn_label_noise_ratio = 0.5 -dn_label_coef = 1.0 -dn_bbox_coef = 1.0 -embed_init_tgt = True -dn_labelbook_size = 2000 -max_text_len = 256 -text_encoder_type = "bert-base-uncased" -use_text_enhancer = True -use_fusion_layer = True -use_checkpoint = True -use_transformer_ckpt = True -use_text_cross_attention = True -text_dropout = 0.0 -fusion_dropout = 0.0 -fusion_droppath = 0.1 -sub_sentence_present = True diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/parallel/collate.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/parallel/collate.py deleted file mode 100644 index ad749197df21b0d74297548be5f66a696adebf7f..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/parallel/collate.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from collections.abc import Mapping, Sequence - -import torch -import torch.nn.functional as F -from torch.utils.data.dataloader import default_collate - -from .data_container import DataContainer - - -def collate(batch, samples_per_gpu=1): - """Puts each data field into a tensor/DataContainer with outer dimension - batch size. - - Extend default_collate to add support for - :type:`~mmcv.parallel.DataContainer`. There are 3 cases. - - 1. cpu_only = True, e.g., meta data - 2. cpu_only = False, stack = True, e.g., images tensors - 3. cpu_only = False, stack = False, e.g., gt bboxes - """ - - if not isinstance(batch, Sequence): - raise TypeError(f'{batch.dtype} is not supported.') - - if isinstance(batch[0], DataContainer): - stacked = [] - if batch[0].cpu_only: - for i in range(0, len(batch), samples_per_gpu): - stacked.append( - [sample.data for sample in batch[i:i + samples_per_gpu]]) - return DataContainer( - stacked, batch[0].stack, batch[0].padding_value, cpu_only=True) - elif batch[0].stack: - for i in range(0, len(batch), samples_per_gpu): - assert isinstance(batch[i].data, torch.Tensor) - - if batch[i].pad_dims is not None: - ndim = batch[i].dim() - assert ndim > batch[i].pad_dims - max_shape = [0 for _ in range(batch[i].pad_dims)] - for dim in range(1, batch[i].pad_dims + 1): - max_shape[dim - 1] = batch[i].size(-dim) - for sample in batch[i:i + samples_per_gpu]: - for dim in range(0, ndim - batch[i].pad_dims): - assert batch[i].size(dim) == sample.size(dim) - for dim in range(1, batch[i].pad_dims + 1): - max_shape[dim - 1] = max(max_shape[dim - 1], - sample.size(-dim)) - padded_samples = [] - for sample in batch[i:i + samples_per_gpu]: - pad = [0 for _ in range(batch[i].pad_dims * 2)] - for dim in range(1, batch[i].pad_dims + 1): - pad[2 * dim - - 1] = max_shape[dim - 1] - sample.size(-dim) - padded_samples.append( - F.pad( - sample.data, pad, value=sample.padding_value)) - stacked.append(default_collate(padded_samples)) - elif batch[i].pad_dims is None: - stacked.append( - default_collate([ - sample.data - for sample in batch[i:i + samples_per_gpu] - ])) - else: - raise ValueError( - 'pad_dims should be either None or integers (1-3)') - - else: - for i in range(0, len(batch), samples_per_gpu): - stacked.append( - [sample.data for sample in batch[i:i + samples_per_gpu]]) - return DataContainer(stacked, batch[0].stack, batch[0].padding_value) - elif isinstance(batch[0], Sequence): - transposed = zip(*batch) - return [collate(samples, samples_per_gpu) for samples in transposed] - elif isinstance(batch[0], Mapping): - return { - key: collate([d[key] for d in batch], samples_per_gpu) - for key in batch[0] - } - else: - return default_collate(batch) diff --git a/spaces/weiren119/AudiogramDigitization/models/symbols/format_dataset.py b/spaces/weiren119/AudiogramDigitization/models/symbols/format_dataset.py deleted file mode 100644 index 64c6ba4758381164bf225a3669e9b050bccf28d2..0000000000000000000000000000000000000000 --- a/spaces/weiren119/AudiogramDigitization/models/symbols/format_dataset.py +++ /dev/null @@ -1,208 +0,0 @@ -#!/usr/bin/env python3 -""" -Copyright (c) 2020 Carleton University Biomedical Informatics Collaboratory - -This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree. -""" -from typing import List -from types import SimpleNamespace -import argparse, os, json, shutil -from tqdm import tqdm -import os.path as path -import numpy as np -from PIL import Image - -# The different types of symbols that appear on audiograms -SYMBOL_CLASS_INDICES = { - "AIR_UNMASKED_LEFT": 0, - "AIR_UNMASKED_RIGHT": 1, - "AIR_MASKED_LEFT": 2, - "AIR_MASKED_RIGHT": 3, - "BONE_UNMASKED_LEFT": 4, - "BONE_UNMASKED_RIGHT": 5, - "BONE_MASKED_LEFT": 6, - "BONE_MASKED_RIGHT": 7, -} - -def extract_audiograms(annotation: dict, image: Image) -> List[tuple]: - """Extracts the bounding boxes of audiograms into a tuple compatible - the YOLOv5 format. - - Parameters - ---------- - annotation : dict - A dictionary containing the annotations for the audiograms in a report. - - image : Image - The image in PIL format corresponding to the annotation. - - Returns - ------- - tuple - A tuple of the form - (class index, x_center, y_center, width, height) where all coordinates - and dimensions are normalized to the width/height of the image. - """ - audiogram_label_tuples = [] - image_width, image_height = image.size - for audiogram in annotation: - bounding_box = audiogram["boundingBox"] - x_center = (bounding_box["x"] + bounding_box["width"] / 2) / image_width - y_center = (bounding_box["y"] + bounding_box["height"] / 2) / image_height - box_width = bounding_box["width"] / image_width - box_height = bounding_box["height"] / image_width - audiogram_label_tuples.append((0, x_center, y_center, box_width, box_height)) - return audiogram_label_tuples - -def extract_symbols(annotation: dict, image: Image) -> List[tuple]: - """Extracts the bounding boxes of the symbols into tuples - compatible the YOLOv5 format. - - Parameters - ---------- - annotation : dict - A dictionary containing the annotations for the audiograms in a report. - - image: Image - The corresponding image. - - Returns - ------- - List[List[tuple]] - A list of lists of tuples, where the inner lists correspond to the different - audiograms that may appear in the report and the tuples are the symbols. - """ - symbol_labels_tuples = [] - for audiogram in annotation: - left = audiogram["boundingBox"]["x"] - top = audiogram["boundingBox"]["y"] - image_width = audiogram["boundingBox"]["width"] - image_height = audiogram["boundingBox"]["height"] - audiogram_labels = [] - for symbol in audiogram["symbols"]: - bounding_box = symbol["boundingBox"] - x_center = (bounding_box["x"] - left + bounding_box["width"] / 2) / image_width - y_center = (bounding_box["y"] - top + bounding_box["height"] / 2) / image_height - box_width = bounding_box["width"] / image_width - box_height = bounding_box["height"] / image_width - audiogram_labels.append((SYMBOL_CLASS_INDICES[symbol["measurementType"]], x_center, y_center, box_width, box_height)) - symbol_labels_tuples.append(audiogram_labels) - return symbol_labels_tuples - -def create_yolov5_file(bboxes: List[tuple], filename: str): - # Turn the bounding boxes into a string with a bounding box - # on each line - file_content = "\n".join([ - f"{bbox[0]} {bbox[1]} {bbox[2]} {bbox[3]} {bbox[4]}" - for bbox in bboxes - ]) - - # Save to a file - with open(filename, "w") as output_file: - output_file.write(file_content) - -def create_directory_structure(data_dir: str): - try: - shutil.rmtree(path.join(data_dir)) - except: - pass - os.mkdir(path.join(data_dir)) - os.mkdir(path.join(data_dir, "images")) - os.mkdir(path.join(data_dir, "images", "train")) - os.mkdir(path.join(data_dir, "images", "validation")) - os.mkdir(path.join(data_dir, "labels")) - os.mkdir(path.join(data_dir, "labels", "train")) - os.mkdir(path.join(data_dir, "labels", "validation")) - - -def all_labels_valid(labels: List[tuple]): - for label in labels: - for value in label[1:]: - if value < 0 or value > 1: - return False - return True - -def main(args: SimpleNamespace): - # Find all the JSON files in the input directory - report_ids = [ - filename.rstrip(".json") - for filename in os.listdir(path.join(args.annotations_dir)) - if filename.endswith(".json") - and path.exists(path.join(args.images_dir, filename.rstrip(".json") + ".jpg")) - ] - - # Shuffle - np.random.seed(seed=42) # for reproducibility of the shuffle - np.random.shuffle(report_ids) - - # Create the directory structure in which the images and annotations - # are to be stored - create_directory_structure(args.data_dir) - - # Iterate through the report ids, extract the annotations in YOLOv5 format - # and place the file in the correct directory, and the image in the correct - # directory. - for i, report_id in enumerate(tqdm(report_ids)): - # Decide if the image is going into the training set or validation set - directory = ( - "train" if i < args.train_frac * len(report_ids) else "validation" - ) - - # Load the annotation` - annotation_content = open( - path.join(args.annotations_dir, f"{report_id}.json") - ) - annotation = json.load(annotation_content) - - # Open the corresponding image to get its dimensions - image = Image.open(os.path.join(args.images_dir, f"{report_id}.jpg")) - width, height = image.size - - # Audiogram labels - audiogram_labels = extract_audiograms(annotation, image) - - if not all_labels_valid(audiogram_labels): - continue - - # Symbol labels - symbol_labels = extract_symbols(annotation, image) - for i, plot_symbols in enumerate(symbol_labels): - if not all_labels_valid(plot_symbols): - continue - x1 = annotation[i]["boundingBox"]["x"] - y1 = annotation[i]["boundingBox"]["y"] - x2 = annotation[i]["boundingBox"]["x"] + annotation[i]["boundingBox"]["width"] - y2 = annotation[i]["boundingBox"]["y"] + annotation[i]["boundingBox"]["height"] - create_yolov5_file( - plot_symbols, - path.join(args.data_dir, "labels", directory, f"{report_id}_{i+1}.txt") - ) - image.crop((x1, y1, x2, y2)).save( - path.join(args.data_dir, "images", directory, f"{report_id}_{i+1}.jpg") - ) - -if __name__ == "__main__": - import argparse - - parser = argparse.ArgumentParser(description=( - "Script that formats the training set for transfer learning via " - "the YOLOv5 model." - )) - parser.add_argument("-d", "--data_dir", type=str, required=True, help=( - "Path to the directory containing the data. It should have 3 " - "subfolders named `images`, `annotations` and `labels`." - )) - parser.add_argument("-a", "--annotations_dir", type=str, required=True, help=( - "Path to the directory containing the annotations in the JSON format." - )) - parser.add_argument("-i", "--images_dir", type=str, required=True, help=( - "Path to the directory containing the images." - )) - parser.add_argument("-f", "--train_frac", type=float, required=True, help=( - "Fraction of images to be used for training. (e.g. 0.8)" - )) - args = parser.parse_args() - - main(args) - diff --git a/spaces/weiwandaixu/ChatGPT3.5/locale/extract_locale.py b/spaces/weiwandaixu/ChatGPT3.5/locale/extract_locale.py deleted file mode 100644 index 32b0924bd6dffe150cb3e481ddadef836b91b83c..0000000000000000000000000000000000000000 --- a/spaces/weiwandaixu/ChatGPT3.5/locale/extract_locale.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -import json -import re - -# Define regular expression patterns -pattern = r'i18n\((\"{3}.*?\"{3}|\".*?\")\)' - -# Load the .py file -with open('ChuanhuChatbot.py', 'r', encoding='utf-8') as f: - contents = f.read() - -# Load the .py files in the modules folder -for filename in os.listdir("modules"): - if filename.endswith(".py"): - with open(os.path.join("modules", filename), "r", encoding="utf-8") as f: - contents += f.read() - -# Matching with regular expressions -matches = re.findall(pattern, contents, re.DOTALL) - -# Convert to key/value pairs -data = {match.strip('()"'): '' for match in matches} - -# Save as a JSON file -with open('labels.json', 'w', encoding='utf-8') as f: - json.dump(data, f, ensure_ascii=False, indent=4) \ No newline at end of file diff --git a/spaces/weizmannscience/MultiDiffusion/constants.py b/spaces/weizmannscience/MultiDiffusion/constants.py deleted file mode 100644 index 32a680f73b71e9630464da705f373da2824f02b2..0000000000000000000000000000000000000000 --- a/spaces/weizmannscience/MultiDiffusion/constants.py +++ /dev/null @@ -1,133 +0,0 @@ -css = """ - .gradio-container { - font-family: 'IBM Plex Sans', sans-serif; - } - .gr-button { - color: white; - border-color: black; - background: black; - } - input[type='range'] { - accent-color: black; - } - .dark input[type='range'] { - accent-color: #dfdfdf; - } - .container { - max-width: 730px; - margin: auto; - padding-top: 1.5rem; - } - #gallery { - min-height: 22rem; - margin-bottom: 15px; - margin-left: auto; - margin-right: auto; - border-bottom-right-radius: .5rem !important; - border-bottom-left-radius: .5rem !important; - } - #gallery>div>.h-full { - min-height: 20rem; - } - .details:hover { - text-decoration: underline; - } - .gr-button { - white-space: nowrap; - } - .gr-button:focus { - border-color: rgb(147 197 253 / var(--tw-border-opacity)); - outline: none; - box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); - --tw-border-opacity: 1; - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); - --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); - --tw-ring-opacity: .5; - } - #advanced-btn { - font-size: .7rem !important; - line-height: 19px; - margin-top: 12px; - margin-bottom: 12px; - padding: 2px 8px; - border-radius: 14px !important; - } - #advanced-options { - display: none; - margin-bottom: 20px; - } - .footer { - margin-bottom: 45px; - margin-top: 35px; - text-align: center; - border-bottom: 1px solid #e5e5e5; - } - .footer>p { - font-size: .8rem; - display: inline-block; - padding: 0 10px; - transform: translateY(10px); - background: white; - } - .dark .footer { - border-color: #303030; - } - .dark .footer>p { - background: #0b0f19; - } - .acknowledgments h4{ - margin: 1.25em 0 .25em 0; - font-weight: bold; - font-size: 115%; - } - .animate-spin { - animation: spin 1s linear infinite; - } - @keyframes spin { - from { - transform: rotate(0deg); - } - to { - transform: rotate(360deg); - } - } - #share-btn-container { - display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; - margin-top: 10px; - margin-left: auto; - } - #share-btn { - all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0; - } - #share-btn * { - all: unset; - } - #share-btn-container div:nth-child(-n+2){ - width: auto !important; - min-height: 0px !important; - } - #share-btn-container .wrap { - display: none !important; - } - - .gr-form{ - flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0; - } - #prompt-container{ - gap: 0; - } - #prompt-text-input, #negative-prompt-text-input{padding: .45rem 0.625rem} - #component-16{border-top-width: 1px!important;margin-top: 1em} - .image_duplication{position: absolute; width: 100px; left: 50px} -""" - - -examples = [ - ["a photo of the dolomites", "", 4096], -] - -num_images_to_gen = 1 - -img_height = 512 -img_width = 4096 \ No newline at end of file diff --git a/spaces/wffcyrus/MetaGPT-v1/metagpt/prompts/structure_action.py b/spaces/wffcyrus/MetaGPT-v1/metagpt/prompts/structure_action.py deleted file mode 100644 index 97c57cf249556cfc2af8f534bbd4fe8284d6a683..0000000000000000000000000000000000000000 --- a/spaces/wffcyrus/MetaGPT-v1/metagpt/prompts/structure_action.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/30 10:12 -@Author : alexanderwu -@File : structure_action.py -""" - -ACTION_SYSTEM = """SYSTEM: -You serve as an assistant that helps me play Minecraft. -I will give you a sentence. Please convert this sentence into one or several actions according to the following instructions. -Each action should be a tuple of four items, written in the form (’verb’, ’object’, ’tools’, ’materials’) -’verb’ is the verb of this action. -’object’ refers to the target object of the action. -’tools’ specifies the tools required for the action. -’material’ specifies the materials required for the action. -If some of the items are not required, set them to be ’None’. -""" - -ACTION_USER = """USER: -The sentence is {sentence}. Generate the action tuple according to the requirements. -""" diff --git a/spaces/wffcyrus/MetaGPT-v1/metagpt/static/assets/style-e2a22de8.css b/spaces/wffcyrus/MetaGPT-v1/metagpt/static/assets/style-e2a22de8.css deleted file mode 100644 index e403fcf2ececcc6debd75709f08e0ecaf65b5501..0000000000000000000000000000000000000000 --- a/spaces/wffcyrus/MetaGPT-v1/metagpt/static/assets/style-e2a22de8.css +++ /dev/null @@ -1 +0,0 @@ -/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */html{line-height:1.15;-webkit-text-size-adjust:100%;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}body{margin:0}main{display:block}h1{margin:.67em 0;font-size:2em}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-size:1em;font-family:monospace,monospace}a{background-color:transparent}abbr[title]{text-decoration:underline;text-decoration:underline dotted;border-bottom:none}b,strong{font-weight:bolder}code,kbd,samp{font-size:1em;font-family:monospace,monospace}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}button,input,optgroup,select,textarea{margin:0;font-size:100%;font-family:inherit;line-height:1.15}button,input{overflow:visible}button,select{text-transform:none}button,[type=button],[type=reset],[type=submit]{-webkit-appearance:button}button::-moz-focus-inner,[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner{padding:0;border-style:none}button:-moz-focusring,[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring{outline:1px dotted ButtonText}fieldset{padding:.35em .75em .625em}legend{display:table;box-sizing:border-box;max-width:100%;padding:0;color:inherit;white-space:normal}progress{vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{outline-offset:-2px;-webkit-appearance:textfield}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}details{display:block}summary{display:list-item}template{display:none}[hidden]{display:none}.arco-icon{display:inline-block;width:1em;height:1em;color:inherit;font-style:normal;vertical-align:-2px;outline:none;stroke:currentColor}.arco-icon-loading,.arco-icon-spin{animation:arco-loading-circle 1s infinite cubic-bezier(0,0,1,1)}@keyframes arco-loading-circle{0%{transform:rotate(0)}to{transform:rotate(360deg)}}.arco-icon-hover{position:relative;display:inline-block;cursor:pointer;line-height:12px}.arco-icon-hover .arco-icon{position:relative}.arco-icon-hover:before{position:absolute;display:block;box-sizing:border-box;background-color:transparent;border-radius:var(--border-radius-circle);transition:background-color .1s cubic-bezier(0,0,1,1);content:""}.arco-icon-hover:hover:before{background-color:var(--color-fill-2)}.arco-icon-hover.arco-icon-hover-disabled:before{opacity:0}.arco-icon-hover:before{top:50%;left:50%;width:20px;height:20px;transform:translate(-50%,-50%)}.arco-icon-hover-size-mini{line-height:12px}.arco-icon-hover-size-mini:before{top:50%;left:50%;width:20px;height:20px;transform:translate(-50%,-50%)}.arco-icon-hover-size-small{line-height:12px}.arco-icon-hover-size-small:before{top:50%;left:50%;width:20px;height:20px;transform:translate(-50%,-50%)}.arco-icon-hover-size-large{line-height:12px}.arco-icon-hover-size-large:before{top:50%;left:50%;width:24px;height:24px;transform:translate(-50%,-50%)}.arco-icon-hover-size-huge{line-height:12px}.arco-icon-hover-size-huge:before{top:50%;left:50%;width:24px;height:24px;transform:translate(-50%,-50%)}.fade-in-standard-enter-from,.fade-in-standard-appear-from{opacity:0}.fade-in-standard-enter-to,.fade-in-standard-appear-to{opacity:1}.fade-in-standard-enter-active,.fade-in-standard-appear-active{transition:opacity .3s cubic-bezier(.34,.69,.1,1)}.fade-in-standard-leave-from{opacity:1}.fade-in-standard-leave-to{opacity:0}.fade-in-standard-leave-active{transition:opacity .3s cubic-bezier(.34,.69,.1,1)}.fade-in-enter-from,.fade-in-appear-from{opacity:0}.fade-in-enter-to,.fade-in-appear-to{opacity:1}.fade-in-enter-active,.fade-in-appear-active{transition:opacity .1s cubic-bezier(0,0,1,1)}.fade-in-leave-from{opacity:1}.fade-in-leave-to{opacity:0}.fade-in-leave-active{transition:opacity .1s cubic-bezier(0,0,1,1)}.zoom-in-enter-from,.zoom-in-appear-from{transform:scale(.5);opacity:0}.zoom-in-enter-to,.zoom-in-appear-to{transform:scale(1);opacity:1}.zoom-in-enter-active,.zoom-in-appear-active{transition:opacity .3s cubic-bezier(.34,.69,.1,1),transform .3s cubic-bezier(.34,.69,.1,1)}.zoom-in-leave-from{transform:scale(1);opacity:1}.zoom-in-leave-to{transform:scale(.5);opacity:0}.zoom-in-leave-active{transition:opacity .3s cubic-bezier(.34,.69,.1,1),transform .3s cubic-bezier(.34,.69,.1,1)}.zoom-in-fade-out-enter-from,.zoom-in-fade-out-appear-from{transform:scale(.5);opacity:0}.zoom-in-fade-out-enter-to,.zoom-in-fade-out-appear-to{transform:scale(1);opacity:1}.zoom-in-fade-out-enter-active,.zoom-in-fade-out-appear-active{transition:opacity .3s cubic-bezier(.3,1.3,.3,1),transform .3s cubic-bezier(.3,1.3,.3,1)}.zoom-in-fade-out-leave-from{transform:scale(1);opacity:1}.zoom-in-fade-out-leave-to{transform:scale(.5);opacity:0}.zoom-in-fade-out-leave-active{transition:opacity .3s cubic-bezier(.3,1.3,.3,1),transform .3s cubic-bezier(.3,1.3,.3,1)}.zoom-in-big-enter-from,.zoom-in-big-appear-from{transform:scale(.5);opacity:0}.zoom-in-big-enter-to,.zoom-in-big-appear-to{transform:scale(1);opacity:1}.zoom-in-big-enter-active,.zoom-in-big-appear-active{transition:opacity .2s cubic-bezier(0,0,1,1),transform .2s cubic-bezier(0,0,1,1)}.zoom-in-big-leave-from{transform:scale(1);opacity:1}.zoom-in-big-leave-to{transform:scale(.2);opacity:0}.zoom-in-big-leave-active{transition:opacity .2s cubic-bezier(0,0,1,1),transform .2s cubic-bezier(0,0,1,1)}.zoom-in-left-enter-from,.zoom-in-left-appear-from{transform:scale(.1);opacity:.1}.zoom-in-left-enter-to,.zoom-in-left-appear-to{transform:scale(1);opacity:1}.zoom-in-left-enter-active,.zoom-in-left-appear-active{transform-origin:0 50%;transition:opacity .3s cubic-bezier(0,0,1,1),transform .3s cubic-bezier(.3,1.3,.3,1)}.zoom-in-left-leave-from{transform:scale(1);opacity:1}.zoom-in-left-leave-to{transform:scale(.1);opacity:.1}.zoom-in-left-leave-active{transform-origin:0 50%;transition:opacity .3s cubic-bezier(0,0,1,1),transform .3s cubic-bezier(.3,1.3,.3,1)}.zoom-in-top-enter-from,.zoom-in-top-appear-from{transform:scaleY(.8) translateZ(0);opacity:0}.zoom-in-top-enter-to,.zoom-in-top-appear-to{transform:scaleY(1) translateZ(0);opacity:1}.zoom-in-top-enter-active,.zoom-in-top-appear-active{transform-origin:0 0;transition:transform .3s cubic-bezier(.3,1.3,.3,1),opacity .3s cubic-bezier(.3,1.3,.3,1)}.zoom-in-top-leave-from{transform:scaleY(1) translateZ(0);opacity:1}.zoom-in-top-leave-to{transform:scaleY(.8) translateZ(0);opacity:0}.zoom-in-top-leave-active{transform-origin:0 0;transition:transform .3s cubic-bezier(.3,1.3,.3,1),opacity .3s cubic-bezier(.3,1.3,.3,1)}.zoom-in-bottom-enter-from,.zoom-in-bottom-appear-from{transform:scaleY(.8) translateZ(0);opacity:0}.zoom-in-bottom-enter-to,.zoom-in-bottom-appear-to{transform:scaleY(1) translateZ(0);opacity:1}.zoom-in-bottom-enter-active,.zoom-in-bottom-appear-active{transform-origin:100% 100%;transition:transform .3s cubic-bezier(.3,1.3,.3,1),opacity .3s cubic-bezier(.3,1.3,.3,1)}.zoom-in-bottom-leave-from{transform:scaleY(1) translateZ(0);opacity:1}.zoom-in-bottom-leave-to{transform:scaleY(.8) translateZ(0);opacity:0}.zoom-in-bottom-leave-active{transform-origin:100% 100%;transition:transform .3s cubic-bezier(.3,1.3,.3,1),opacity .3s cubic-bezier(.3,1.3,.3,1)}.slide-dynamic-origin-enter-from,.slide-dynamic-origin-appear-from{transform:scaleY(.9);transform-origin:0 0;opacity:0}.slide-dynamic-origin-enter-to,.slide-dynamic-origin-appear-to{transform:scaleY(1);transform-origin:0 0;opacity:1}.slide-dynamic-origin-enter-active,.slide-dynamic-origin-appear-active{transition:transform .2s cubic-bezier(.34,.69,.1,1),opacity .2s cubic-bezier(.34,.69,.1,1)}.slide-dynamic-origin-leave-from{transform:scaleY(1);transform-origin:0 0;opacity:1}.slide-dynamic-origin-leave-to{transform:scaleY(.9);transform-origin:0 0;opacity:0}.slide-dynamic-origin-leave-active{transition:transform .2s cubic-bezier(.34,.69,.1,1),opacity .2s cubic-bezier(.34,.69,.1,1)}.slide-left-enter-from,.slide-left-appear-from{transform:translate(-100%)}.slide-left-enter-to,.slide-left-appear-to{transform:translate(0)}.slide-left-enter-active,.slide-left-appear-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-left-leave-from{transform:translate(0)}.slide-left-leave-to{transform:translate(-100%)}.slide-left-leave-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-right-enter-from,.slide-right-appear-from{transform:translate(100%)}.slide-right-enter-to,.slide-right-appear-to{transform:translate(0)}.slide-right-enter-active,.slide-right-appear-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-right-leave-from{transform:translate(0)}.slide-right-leave-to{transform:translate(100%)}.slide-right-leave-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-top-enter-from,.slide-top-appear-from{transform:translateY(-100%)}.slide-top-enter-to,.slide-top-appear-to{transform:translateY(0)}.slide-top-enter-active,.slide-top-appear-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-top-leave-from{transform:translateY(0)}.slide-top-leave-to{transform:translateY(-100%)}.slide-top-leave-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-bottom-enter-from,.slide-bottom-appear-from{transform:translateY(100%)}.slide-bottom-enter-to,.slide-bottom-appear-to{transform:translateY(0)}.slide-bottom-enter-active,.slide-bottom-appear-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-bottom-leave-from{transform:translateY(0)}.slide-bottom-leave-to{transform:translateY(100%)}.slide-bottom-leave-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}body{--red-1: 255,236,232;--red-2: 253,205,197;--red-3: 251,172,163;--red-4: 249,137,129;--red-5: 247,101,96;--red-6: 245,63,63;--red-7: 203,39,45;--red-8: 161,21,30;--red-9: 119,8,19;--red-10: 77,0,10;--orangered-1: 255,243,232;--orangered-2: 253,221,195;--orangered-3: 252,197,159;--orangered-4: 250,172,123;--orangered-5: 249,144,87;--orangered-6: 247,114,52;--orangered-7: 204,81,32;--orangered-8: 162,53,17;--orangered-9: 119,31,6;--orangered-10: 77,14,0;--orange-1: 255,247,232;--orange-2: 255,228,186;--orange-3: 255,207,139;--orange-4: 255,182,93;--orange-5: 255,154,46;--orange-6: 255,125,0;--orange-7: 210,95,0;--orange-8: 166,69,0;--orange-9: 121,46,0;--orange-10: 77,27,0;--gold-1: 255,252,232;--gold-2: 253,244,191;--gold-3: 252,233,150;--gold-4: 250,220,109;--gold-5: 249,204,69;--gold-6: 247,186,30;--gold-7: 204,146,19;--gold-8: 162,109,10;--gold-9: 119,75,4;--gold-10: 77,45,0;--yellow-1: 254,255,232;--yellow-2: 254,254,190;--yellow-3: 253,250,148;--yellow-4: 252,242,107;--yellow-5: 251,232,66;--yellow-6: 250,220,25;--yellow-7: 207,175,15;--yellow-8: 163,132,8;--yellow-9: 120,93,3;--yellow-10: 77,56,0;--lime-1: 252,255,232;--lime-2: 237,248,187;--lime-3: 220,241,144;--lime-4: 201,233,104;--lime-5: 181,226,65;--lime-6: 159,219,29;--lime-7: 126,183,18;--lime-8: 95,148,10;--lime-9: 67,112,4;--lime-10: 42,77,0;--green-1: 232,255,234;--green-2: 175,240,181;--green-3: 123,225,136;--green-4: 76,210,99;--green-5: 35,195,67;--green-6: 0,180,42;--green-7: 0,154,41;--green-8: 0,128,38;--green-9: 0,102,34;--green-10: 0,77,28;--cyan-1: 232,255,251;--cyan-2: 183,244,236;--cyan-3: 137,233,224;--cyan-4: 94,223,214;--cyan-5: 55,212,207;--cyan-6: 20,201,201;--cyan-7: 13,165,170;--cyan-8: 7,130,139;--cyan-9: 3,97,108;--cyan-10: 0,66,77;--blue-1: 232,247,255;--blue-2: 195,231,254;--blue-3: 159,212,253;--blue-4: 123,192,252;--blue-5: 87,169,251;--blue-6: 52,145,250;--blue-7: 32,108,207;--blue-8: 17,75,163;--blue-9: 6,48,120;--blue-10: 0,26,77;--arcoblue-1: 232,243,255;--arcoblue-2: 190,218,255;--arcoblue-3: 148,191,255;--arcoblue-4: 106,161,255;--arcoblue-5: 64,128,255;--arcoblue-6: 22,93,255;--arcoblue-7: 14,66,210;--arcoblue-8: 7,44,166;--arcoblue-9: 3,26,121;--arcoblue-10: 0,13,77;--purple-1: 245,232,255;--purple-2: 221,190,246;--purple-3: 195,150,237;--purple-4: 168,113,227;--purple-5: 141,78,218;--purple-6: 114,46,209;--purple-7: 85,29,176;--purple-8: 60,16,143;--purple-9: 39,6,110;--purple-10: 22,0,77;--pinkpurple-1: 255,232,251;--pinkpurple-2: 247,186,239;--pinkpurple-3: 240,142,230;--pinkpurple-4: 232,101,223;--pinkpurple-5: 225,62,219;--pinkpurple-6: 217,26,217;--pinkpurple-7: 176,16,182;--pinkpurple-8: 138,9,147;--pinkpurple-9: 101,3,112;--pinkpurple-10: 66,0,77;--magenta-1: 255,232,241;--magenta-2: 253,194,219;--magenta-3: 251,157,199;--magenta-4: 249,121,183;--magenta-5: 247,84,168;--magenta-6: 245,49,157;--magenta-7: 203,30,131;--magenta-8: 161,16,105;--magenta-9: 119,6,79;--magenta-10: 77,0,52;--gray-1: 247,248,250;--gray-2: 242,243,245;--gray-3: 229,230,235;--gray-4: 201,205,212;--gray-5: 169,174,184;--gray-6: 134,144,156;--gray-7: 107,119,133;--gray-8: 78,89,105;--gray-9: 39,46,59;--gray-10: 29,33,41;--success-1: var(--green-1);--success-2: var(--green-2);--success-3: var(--green-3);--success-4: var(--green-4);--success-5: var(--green-5);--success-6: var(--green-6);--success-7: var(--green-7);--success-8: var(--green-8);--success-9: var(--green-9);--success-10: var(--green-10);--primary-1: var(--arcoblue-1);--primary-2: var(--arcoblue-2);--primary-3: var(--arcoblue-3);--primary-4: var(--arcoblue-4);--primary-5: var(--arcoblue-5);--primary-6: var(--arcoblue-6);--primary-7: var(--arcoblue-7);--primary-8: var(--arcoblue-8);--primary-9: var(--arcoblue-9);--primary-10: var(--arcoblue-10);--danger-1: var(--red-1);--danger-2: var(--red-2);--danger-3: var(--red-3);--danger-4: var(--red-4);--danger-5: var(--red-5);--danger-6: var(--red-6);--danger-7: var(--red-7);--danger-8: var(--red-8);--danger-9: var(--red-9);--danger-10: var(--red-10);--warning-1: var(--orange-1);--warning-2: var(--orange-2);--warning-3: var(--orange-3);--warning-4: var(--orange-4);--warning-5: var(--orange-5);--warning-6: var(--orange-6);--warning-7: var(--orange-7);--warning-8: var(--orange-8);--warning-9: var(--orange-9);--warning-10: var(--orange-10);--link-1: var(--arcoblue-1);--link-2: var(--arcoblue-2);--link-3: var(--arcoblue-3);--link-4: var(--arcoblue-4);--link-5: var(--arcoblue-5);--link-6: var(--arcoblue-6);--link-7: var(--arcoblue-7);--link-8: var(--arcoblue-8);--link-9: var(--arcoblue-9);--link-10: var(--arcoblue-10)}body[arco-theme=dark]{--red-1: 77,0,10;--red-2: 119,6,17;--red-3: 161,22,31;--red-4: 203,46,52;--red-5: 245,78,78;--red-6: 247,105,101;--red-7: 249,141,134;--red-8: 251,176,167;--red-9: 253,209,202;--red-10: 255,240,236;--orangered-1: 77,14,0;--orangered-2: 119,30,5;--orangered-3: 162,55,20;--orangered-4: 204,87,41;--orangered-5: 247,126,69;--orangered-6: 249,146,90;--orangered-7: 250,173,125;--orangered-8: 252,198,161;--orangered-9: 253,222,197;--orangered-10: 255,244,235;--orange-1: 77,27,0;--orange-2: 121,48,4;--orange-3: 166,75,10;--orange-4: 210,105,19;--orange-5: 255,141,31;--orange-6: 255,150,38;--orange-7: 255,179,87;--orange-8: 255,205,135;--orange-9: 255,227,184;--orange-10: 255,247,232;--gold-1: 77,45,0;--gold-2: 119,75,4;--gold-3: 162,111,15;--gold-4: 204,150,31;--gold-5: 247,192,52;--gold-6: 249,204,68;--gold-7: 250,220,108;--gold-8: 252,233,149;--gold-9: 253,244,190;--gold-10: 255,252,232;--yellow-1: 77,56,0;--yellow-2: 120,94,7;--yellow-3: 163,134,20;--yellow-4: 207,179,37;--yellow-5: 250,225,60;--yellow-6: 251,233,75;--yellow-7: 252,243,116;--yellow-8: 253,250,157;--yellow-9: 254,254,198;--yellow-10: 254,255,240;--lime-1: 42,77,0;--lime-2: 68,112,6;--lime-3: 98,148,18;--lime-4: 132,183,35;--lime-5: 168,219,57;--lime-6: 184,226,75;--lime-7: 203,233,112;--lime-8: 222,241,152;--lime-9: 238,248,194;--lime-10: 253,255,238;--green-1: 0,77,28;--green-2: 4,102,37;--green-3: 10,128,45;--green-4: 18,154,55;--green-5: 29,180,64;--green-6: 39,195,70;--green-7: 80,210,102;--green-8: 126,225,139;--green-9: 178,240,183;--green-10: 235,255,236;--cyan-1: 0,66,77;--cyan-2: 6,97,108;--cyan-3: 17,131,139;--cyan-4: 31,166,170;--cyan-5: 48,201,201;--cyan-6: 63,212,207;--cyan-7: 102,223,215;--cyan-8: 144,233,225;--cyan-9: 190,244,237;--cyan-10: 240,255,252;--blue-1: 0,26,77;--blue-2: 5,47,120;--blue-3: 19,76,163;--blue-4: 41,113,207;--blue-5: 70,154,250;--blue-6: 90,170,251;--blue-7: 125,193,252;--blue-8: 161,213,253;--blue-9: 198,232,254;--blue-10: 234,248,255;--arcoblue-1: 0,13,77;--arcoblue-2: 4,27,121;--arcoblue-3: 14,50,166;--arcoblue-4: 29,77,210;--arcoblue-5: 48,111,255;--arcoblue-6: 60,126,255;--arcoblue-7: 104,159,255;--arcoblue-8: 147,190,255;--arcoblue-9: 190,218,255;--arcoblue-10: 234,244,255;--purple-1: 22,0,77;--purple-2: 39,6,110;--purple-3: 62,19,143;--purple-4: 90,37,176;--purple-5: 123,61,209;--purple-6: 142,81,218;--purple-7: 169,116,227;--purple-8: 197,154,237;--purple-9: 223,194,246;--purple-10: 247,237,255;--pinkpurple-1: 66,0,77;--pinkpurple-2: 101,3,112;--pinkpurple-3: 138,13,147;--pinkpurple-4: 176,27,182;--pinkpurple-5: 217,46,217;--pinkpurple-6: 225,61,219;--pinkpurple-7: 232,102,223;--pinkpurple-8: 240,146,230;--pinkpurple-9: 247,193,240;--pinkpurple-10: 255,242,253;--magenta-1: 77,0,52;--magenta-2: 119,8,80;--magenta-3: 161,23,108;--magenta-4: 203,43,136;--magenta-5: 245,69,166;--magenta-6: 247,86,169;--magenta-7: 249,122,184;--magenta-8: 251,158,200;--magenta-9: 253,195,219;--magenta-10: 255,232,241;--gray-1: 23,23,26;--gray-2: 46,46,48;--gray-3: 72,72,73;--gray-4: 95,95,96;--gray-5: 120,120,122;--gray-6: 146,146,147;--gray-7: 171,171,172;--gray-8: 197,197,197;--gray-9: 223,223,223;--gray-10: 246,246,246;--primary-1: var(--arcoblue-1);--primary-2: var(--arcoblue-2);--primary-3: var(--arcoblue-3);--primary-4: var(--arcoblue-4);--primary-5: var(--arcoblue-5);--primary-6: var(--arcoblue-6);--primary-7: var(--arcoblue-7);--primary-8: var(--arcoblue-8);--primary-9: var(--arcoblue-9);--primary-10: var(--arcoblue-10);--success-1: var(--green-1);--success-2: var(--green-2);--success-3: var(--green-3);--success-4: var(--green-4);--success-5: var(--green-5);--success-6: var(--green-6);--success-7: var(--green-7);--success-8: var(--green-8);--success-9: var(--green-9);--success-10: var(--green-10);--danger-1: var(--red-1);--danger-2: var(--red-2);--danger-3: var(--red-3);--danger-4: var(--red-4);--danger-5: var(--red-5);--danger-6: var(--red-6);--danger-7: var(--red-7);--danger-8: var(--red-8);--danger-9: var(--red-9);--danger-10: var(--red-10);--warning-1: var(--orange-1);--warning-2: var(--orange-2);--warning-3: var(--orange-3);--warning-4: var(--orange-4);--warning-5: var(--orange-5);--warning-6: var(--orange-6);--warning-7: var(--orange-7);--warning-8: var(--orange-8);--warning-9: var(--orange-9);--warning-10: var(--orange-10);--link-1: var(--arcoblue-1);--link-2: var(--arcoblue-2);--link-3: var(--arcoblue-3);--link-4: var(--arcoblue-4);--link-5: var(--arcoblue-5);--link-6: var(--arcoblue-6);--link-7: var(--arcoblue-7);--link-8: var(--arcoblue-8);--link-9: var(--arcoblue-9);--link-10: var(--arcoblue-10)}body{--color-white: #ffffff;--color-black: #000000;--color-border: rgb(var(--gray-3));--color-bg-popup: var(--color-bg-5);--color-bg-1: #fff;--color-bg-2: #fff;--color-bg-3: #fff;--color-bg-4: #fff;--color-bg-5: #fff;--color-bg-white: #fff;--color-neutral-1: rgb(var(--gray-1));--color-neutral-2: rgb(var(--gray-2));--color-neutral-3: rgb(var(--gray-3));--color-neutral-4: rgb(var(--gray-4));--color-neutral-5: rgb(var(--gray-5));--color-neutral-6: rgb(var(--gray-6));--color-neutral-7: rgb(var(--gray-7));--color-neutral-8: rgb(var(--gray-8));--color-neutral-9: rgb(var(--gray-9));--color-neutral-10: rgb(var(--gray-10));--color-text-1: var(--color-neutral-10);--color-text-2: var(--color-neutral-8);--color-text-3: var(--color-neutral-6);--color-text-4: var(--color-neutral-4);--color-border-1: var(--color-neutral-2);--color-border-2: var(--color-neutral-3);--color-border-3: var(--color-neutral-4);--color-border-4: var(--color-neutral-6);--color-fill-1: var(--color-neutral-1);--color-fill-2: var(--color-neutral-2);--color-fill-3: var(--color-neutral-3);--color-fill-4: var(--color-neutral-4);--color-primary-light-1: rgb(var(--primary-1));--color-primary-light-2: rgb(var(--primary-2));--color-primary-light-3: rgb(var(--primary-3));--color-primary-light-4: rgb(var(--primary-4));--color-link-light-1: rgb(var(--link-1));--color-link-light-2: rgb(var(--link-2));--color-link-light-3: rgb(var(--link-3));--color-link-light-4: rgb(var(--link-4));--color-secondary: var(--color-neutral-2);--color-secondary-hover: var(--color-neutral-3);--color-secondary-active: var(--color-neutral-4);--color-secondary-disabled: var(--color-neutral-1);--color-danger-light-1: rgb(var(--danger-1));--color-danger-light-2: rgb(var(--danger-2));--color-danger-light-3: rgb(var(--danger-3));--color-danger-light-4: rgb(var(--danger-4));--color-success-light-1: rgb(var(--success-1));--color-success-light-2: rgb(var(--success-2));--color-success-light-3: rgb(var(--success-3));--color-success-light-4: rgb(var(--success-4));--color-warning-light-1: rgb(var(--warning-1));--color-warning-light-2: rgb(var(--warning-2));--color-warning-light-3: rgb(var(--warning-3));--color-warning-light-4: rgb(var(--warning-4));--border-radius-none: 0;--border-radius-small: 2px;--border-radius-medium: 4px;--border-radius-large: 8px;--border-radius-circle: 50%;--color-tooltip-bg: rgb(var(--gray-10));--color-spin-layer-bg: rgba(255, 255, 255, .6);--color-menu-dark-bg: #232324;--color-menu-light-bg: #ffffff;--color-menu-dark-hover: rgba(255, 255, 255, .04);--color-mask-bg: rgba(29, 33, 41, .6)}body[arco-theme=dark]{--color-white: rgba(255, 255, 255, .9);--color-black: #000000;--color-border: #333335;--color-bg-1: #17171a;--color-bg-2: #232324;--color-bg-3: #2a2a2b;--color-bg-4: #313132;--color-bg-5: #373739;--color-bg-white: #f6f6f6;--color-text-1: rgba(255, 255, 255, .9);--color-text-2: rgba(255, 255, 255, .7);--color-text-3: rgba(255, 255, 255, .5);--color-text-4: rgba(255, 255, 255, .3);--color-fill-1: rgba(255, 255, 255, .04);--color-fill-2: rgba(255, 255, 255, .08);--color-fill-3: rgba(255, 255, 255, .12);--color-fill-4: rgba(255, 255, 255, .16);--color-primary-light-1: rgba(var(--primary-6), .2);--color-primary-light-2: rgba(var(--primary-6), .35);--color-primary-light-3: rgba(var(--primary-6), .5);--color-primary-light-4: rgba(var(--primary-6), .65);--color-secondary: rgba(var(--gray-9), .08);--color-secondary-hover: rgba(var(--gray-8), .16);--color-secondary-active: rgba(var(--gray-7), .24);--color-secondary-disabled: rgba(var(--gray-9), .08);--color-danger-light-1: rgba(var(--danger-6), .2);--color-danger-light-2: rgba(var(--danger-6), .35);--color-danger-light-3: rgba(var(--danger-6), .5);--color-danger-light-4: rgba(var(--danger-6), .65);--color-success-light-1: rgb(var(--success-6), .2);--color-success-light-2: rgb(var(--success-6), .35);--color-success-light-3: rgb(var(--success-6), .5);--color-success-light-4: rgb(var(--success-6), .65);--color-warning-light-1: rgb(var(--warning-6), .2);--color-warning-light-2: rgb(var(--warning-6), .35);--color-warning-light-3: rgb(var(--warning-6), .5);--color-warning-light-4: rgb(var(--warning-6), .65);--color-link-light-1: rgb(var(--link-6), .2);--color-link-light-2: rgb(var(--link-6), .35);--color-link-light-3: rgb(var(--link-6), .5);--color-link-light-4: rgb(var(--link-6), .65);--color-tooltip-bg: #373739;--color-spin-layer-bg: rgba(51, 51, 51, .6);--color-menu-dark-bg: #232324;--color-menu-light-bg: #232324;--color-menu-dark-hover: var(--color-fill-2);--color-mask-bg: rgba(23, 23, 26, .6)}body{font-size:14px;font-family:Inter,-apple-system,BlinkMacSystemFont,PingFang SC,Hiragino Sans GB,noto sans,Microsoft YaHei,Helvetica Neue,Helvetica,Arial,sans-serif}.arco-trigger-wrapper{display:inline-block}.arco-trigger-popup{position:absolute;z-index:1000}.arco-trigger-arrow{position:absolute;z-index:-1;display:block;box-sizing:border-box;width:8px;height:8px;background-color:var(--color-bg-5);content:""}.arco-trigger-popup[trigger-placement=top] .arco-trigger-arrow,.arco-trigger-popup[trigger-placement=tl] .arco-trigger-arrow,.arco-trigger-popup[trigger-placement=tr] .arco-trigger-arrow{border-top:none;border-left:none;border-bottom-right-radius:var(--border-radius-small)}.arco-trigger-popup[trigger-placement=bottom] .arco-trigger-arrow,.arco-trigger-popup[trigger-placement=bl] .arco-trigger-arrow,.arco-trigger-popup[trigger-placement=br] .arco-trigger-arrow{border-right:none;border-bottom:none;border-top-left-radius:var(--border-radius-small)}.arco-trigger-popup[trigger-placement=left] .arco-trigger-arrow,.arco-trigger-popup[trigger-placement=lt] .arco-trigger-arrow,.arco-trigger-popup[trigger-placement=lb] .arco-trigger-arrow{border-bottom:none;border-left:none;border-top-right-radius:var(--border-radius-small)}.arco-trigger-popup[trigger-placement=right] .arco-trigger-arrow,.arco-trigger-popup[trigger-placement=rt] .arco-trigger-arrow,.arco-trigger-popup[trigger-placement=rb] .arco-trigger-arrow{border-top:none;border-right:none;border-bottom-left-radius:var(--border-radius-small)}.arco-auto-tooltip{display:block;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-input-label{display:inline-flex;box-sizing:border-box;width:100%;padding-right:12px;padding-left:12px;color:var(--color-text-1);font-size:14px;background-color:var(--color-fill-2);border:1px solid transparent;border-radius:var(--border-radius-small);cursor:text;transition:color .1s cubic-bezier(0,0,1,1),border-color .1s cubic-bezier(0,0,1,1),background-color .1s cubic-bezier(0,0,1,1);cursor:pointer}.arco-input-label.arco-input-label-search{cursor:text}.arco-input-label.arco-input-label-search .arco-input-label-input,.arco-input-label.arco-input-label-search .arco-input-label-value{pointer-events:none}.arco-input-label:hover{background-color:var(--color-fill-3);border-color:transparent}.arco-input-label:focus-within,.arco-input-label.arco-input-label-focus{background-color:var(--color-bg-2);border-color:rgb(var(--primary-6));box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-input-label.arco-input-label-disabled{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent;cursor:not-allowed}.arco-input-label.arco-input-label-disabled:hover{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent}.arco-input-label.arco-input-label-disabled .arco-input-label-prefix,.arco-input-label.arco-input-label-disabled .arco-input-label-suffix{color:inherit}.arco-input-label.arco-input-label-error{background-color:var(--color-danger-light-1);border-color:transparent}.arco-input-label.arco-input-label-error:hover{background-color:var(--color-danger-light-2);border-color:transparent}.arco-input-label.arco-input-label-error:focus-within,.arco-input-label.arco-input-label-error.arco-input-label-focus{background-color:var(--color-bg-2);border-color:rgb(var(--danger-6));box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-input-label .arco-input-label-prefix,.arco-input-label .arco-input-label-suffix{display:inline-flex;flex-shrink:0;align-items:center;white-space:nowrap;user-select:none}.arco-input-label .arco-input-label-prefix>svg,.arco-input-label .arco-input-label-suffix>svg{font-size:14px}.arco-input-label .arco-input-label-prefix{padding-right:12px;color:var(--color-text-2)}.arco-input-label .arco-input-label-suffix{padding-left:12px;color:var(--color-text-2)}.arco-input-label .arco-input-label-suffix .arco-feedback-icon{display:inline-flex}.arco-input-label .arco-input-label-suffix .arco-feedback-icon-status-validating{color:rgb(var(--primary-6))}.arco-input-label .arco-input-label-suffix .arco-feedback-icon-status-success{color:rgb(var(--success-6))}.arco-input-label .arco-input-label-suffix .arco-feedback-icon-status-warning{color:rgb(var(--warning-6))}.arco-input-label .arco-input-label-suffix .arco-feedback-icon-status-error{color:rgb(var(--danger-6))}.arco-input-label .arco-input-label-clear-btn{align-self:center;color:var(--color-text-2);font-size:12px;visibility:hidden;cursor:pointer}.arco-input-label .arco-input-label-clear-btn>svg{position:relative;transition:color .1s cubic-bezier(0,0,1,1)}.arco-input-label:hover .arco-input-label-clear-btn{visibility:visible}.arco-input-label:not(.arco-input-label-focus) .arco-input-label-icon-hover:hover:before{background-color:var(--color-fill-4)}.arco-input-label .arco-input-label-input{width:100%;padding-right:0;padding-left:0;color:inherit;line-height:1.5715;background:none;border:none;border-radius:0;outline:none;cursor:inherit;-webkit-appearance:none;-webkit-tap-highlight-color:rgba(0,0,0,0)}.arco-input-label .arco-input-label-input::placeholder{color:var(--color-text-3)}.arco-input-label .arco-input-label-input[disabled]::placeholder{color:var(--color-text-4)}.arco-input-label .arco-input-label-input[disabled]{-webkit-text-fill-color:var(--color-text-4)}.arco-input-label .arco-input-label-input-hidden{position:absolute;width:0!important}.arco-input-label .arco-input-label-value{display:flex;align-items:center;box-sizing:border-box;width:100%;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-input-label .arco-input-label-value:after{font-size:0;line-height:0;visibility:hidden;content:"."}.arco-input-label .arco-input-label-value-hidden{display:none}.arco-input-label.arco-input-label-size-mini .arco-input-label-input,.arco-input-label.arco-input-label-size-mini .arco-input-label-value{padding-top:1px;padding-bottom:1px;font-size:12px;line-height:1.667}.arco-input-label.arco-input-label-size-mini .arco-input-label-value{min-height:22px}.arco-input-label.arco-input-label-size-medium .arco-input-label-input,.arco-input-label.arco-input-label-size-medium .arco-input-label-value{padding-top:4px;padding-bottom:4px;font-size:14px;line-height:1.5715}.arco-input-label.arco-input-label-size-medium .arco-input-label-value{min-height:30px}.arco-input-label.arco-input-label-size-small .arco-input-label-input,.arco-input-label.arco-input-label-size-small .arco-input-label-value{padding-top:2px;padding-bottom:2px;font-size:14px;line-height:1.5715}.arco-input-label.arco-input-label-size-small .arco-input-label-value{min-height:26px}.arco-input-label.arco-input-label-size-large .arco-input-label-input,.arco-input-label.arco-input-label-size-large .arco-input-label-value{padding-top:6px;padding-bottom:6px;font-size:14px;line-height:1.5715}.arco-input-label.arco-input-label-size-large .arco-input-label-value{min-height:34px}.arco-picker{position:relative;display:inline-flex;align-items:center;box-sizing:border-box;padding:4px 11px 4px 4px;line-height:1.5715;background-color:var(--color-fill-2);border:1px solid transparent;border-radius:var(--border-radius-small);transition:all .1s cubic-bezier(0,0,1,1)}.arco-picker-input{display:inline-flex;flex:1}.arco-picker input{width:100%;padding:0 0 0 8px;color:var(--color-text-2);line-height:1.5715;text-align:left;background-color:transparent;border:none;outline:none;transition:all .1s cubic-bezier(0,0,1,1)}.arco-picker input::placeholder{color:var(--color-text-3)}.arco-picker input[disabled]{-webkit-text-fill-color:var(--color-text-4)}.arco-picker-has-prefix{padding-left:12px}.arco-picker-prefix{padding-right:4px;color:var(--color-text-2);font-size:14px}.arco-picker-suffix{display:inline-flex;align-items:center;margin-left:4px}.arco-picker-suffix .arco-feedback-icon{display:inline-flex}.arco-picker-suffix .arco-feedback-icon-status-validating{color:rgb(var(--primary-6))}.arco-picker-suffix .arco-feedback-icon-status-success{color:rgb(var(--success-6))}.arco-picker-suffix .arco-feedback-icon-status-warning{color:rgb(var(--warning-6))}.arco-picker-suffix .arco-feedback-icon-status-error{color:rgb(var(--danger-6))}.arco-picker-suffix .arco-feedback-icon{margin-left:4px}.arco-picker-suffix-icon{color:var(--color-text-2)}.arco-picker .arco-picker-clear-icon{display:none;color:var(--color-text-2);font-size:12px}.arco-picker:hover{background-color:var(--color-fill-3);border-color:transparent}.arco-picker:not(.arco-picker-disabled):hover .arco-picker-clear-icon{display:inline-block}.arco-picker:not(.arco-picker-disabled):hover .arco-picker-suffix .arco-picker-clear-icon+span{display:none}.arco-picker input[disabled]{color:var(--color-text-4);cursor:not-allowed}.arco-picker input[disabled]::placeholder{color:var(--color-text-4)}.arco-picker-error{background-color:var(--color-danger-light-1);border-color:transparent}.arco-picker-error:hover{background-color:var(--color-danger-light-2);border-color:transparent}.arco-picker-focused{box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-picker-focused,.arco-picker-focused:hover{background-color:var(--color-bg-2);border-color:rgb(var(--primary-6))}.arco-picker-focused.arco-picker-error{border-color:rgb(var(--danger-6));box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-picker-focused .arco-picker-input-active input,.arco-picker-focused:hover .arco-picker-input-active input{background:var(--color-fill-2)}.arco-picker-disabled,.arco-picker-disabled:hover{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent;cursor:not-allowed}.arco-picker-disabled input[disabled],.arco-picker-disabled:hover input[disabled]{color:var(--color-text-4);cursor:not-allowed}.arco-picker-disabled input[disabled]::placeholder,.arco-picker-disabled:hover input[disabled]::placeholder{color:var(--color-text-4)}.arco-picker-separator{min-width:10px;padding:0 8px;color:var(--color-text-3)}.arco-picker-disabled .arco-picker-separator,.arco-picker-disabled .arco-picker-suffix-icon{color:var(--color-text-4)}.arco-picker-size-mini{height:24px}.arco-picker-size-mini input{font-size:12px}.arco-picker-size-small{height:28px}.arco-picker-size-small input{font-size:14px}.arco-picker-size-medium{height:32px}.arco-picker-size-medium input{font-size:14px}.arco-picker-size-large{height:36px}.arco-picker-size-large input{font-size:14px}.arco-select-view-single{display:inline-flex;box-sizing:border-box;width:100%;padding-right:12px;padding-left:12px;color:var(--color-text-1);font-size:14px;background-color:var(--color-fill-2);border:1px solid transparent;border-radius:var(--border-radius-small);cursor:text;transition:color .1s cubic-bezier(0,0,1,1),border-color .1s cubic-bezier(0,0,1,1),background-color .1s cubic-bezier(0,0,1,1);cursor:pointer}.arco-select-view-single.arco-select-view-search{cursor:text}.arco-select-view-single.arco-select-view-search .arco-select-view-input,.arco-select-view-single.arco-select-view-search .arco-select-view-value{pointer-events:none}.arco-select-view-single:hover{background-color:var(--color-fill-3);border-color:transparent}.arco-select-view-single:focus-within,.arco-select-view-single.arco-select-view-focus{background-color:var(--color-bg-2);border-color:rgb(var(--primary-6));box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-select-view-single.arco-select-view-disabled{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent;cursor:not-allowed}.arco-select-view-single.arco-select-view-disabled:hover{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent}.arco-select-view-single.arco-select-view-disabled .arco-select-view-prefix,.arco-select-view-single.arco-select-view-disabled .arco-select-view-suffix{color:inherit}.arco-select-view-single.arco-select-view-error{background-color:var(--color-danger-light-1);border-color:transparent}.arco-select-view-single.arco-select-view-error:hover{background-color:var(--color-danger-light-2);border-color:transparent}.arco-select-view-single.arco-select-view-error:focus-within,.arco-select-view-single.arco-select-view-error.arco-select-view-single-focus{background-color:var(--color-bg-2);border-color:rgb(var(--danger-6));box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-select-view-single .arco-select-view-prefix,.arco-select-view-single .arco-select-view-suffix{display:inline-flex;flex-shrink:0;align-items:center;white-space:nowrap;user-select:none}.arco-select-view-single .arco-select-view-prefix>svg,.arco-select-view-single .arco-select-view-suffix>svg{font-size:14px}.arco-select-view-single .arco-select-view-prefix{padding-right:12px;color:var(--color-text-2)}.arco-select-view-single .arco-select-view-suffix{padding-left:12px;color:var(--color-text-2)}.arco-select-view-single .arco-select-view-suffix .arco-feedback-icon{display:inline-flex}.arco-select-view-single .arco-select-view-suffix .arco-feedback-icon-status-validating{color:rgb(var(--primary-6))}.arco-select-view-single .arco-select-view-suffix .arco-feedback-icon-status-success{color:rgb(var(--success-6))}.arco-select-view-single .arco-select-view-suffix .arco-feedback-icon-status-warning{color:rgb(var(--warning-6))}.arco-select-view-single .arco-select-view-suffix .arco-feedback-icon-status-error{color:rgb(var(--danger-6))}.arco-select-view-single .arco-select-view-clear-btn{align-self:center;color:var(--color-text-2);font-size:12px;visibility:hidden;cursor:pointer}.arco-select-view-single .arco-select-view-clear-btn>svg{position:relative;transition:color .1s cubic-bezier(0,0,1,1)}.arco-select-view-single:hover .arco-select-view-clear-btn{visibility:visible}.arco-select-view-single:not(.arco-select-view-focus) .arco-select-view-icon-hover:hover:before{background-color:var(--color-fill-4)}.arco-select-view-single .arco-select-view-input{width:100%;padding-right:0;padding-left:0;color:inherit;line-height:1.5715;background:none;border:none;border-radius:0;outline:none;cursor:inherit;-webkit-appearance:none;-webkit-tap-highlight-color:rgba(0,0,0,0)}.arco-select-view-single .arco-select-view-input::placeholder{color:var(--color-text-3)}.arco-select-view-single .arco-select-view-input[disabled]::placeholder{color:var(--color-text-4)}.arco-select-view-single .arco-select-view-input[disabled]{-webkit-text-fill-color:var(--color-text-4)}.arco-select-view-single .arco-select-view-input-hidden{position:absolute;width:0!important}.arco-select-view-single .arco-select-view-value{display:flex;align-items:center;box-sizing:border-box;width:100%;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-select-view-single .arco-select-view-value:after{font-size:0;line-height:0;visibility:hidden;content:"."}.arco-select-view-single .arco-select-view-value-hidden{display:none}.arco-select-view-single.arco-select-view-size-mini .arco-select-view-input,.arco-select-view-single.arco-select-view-size-mini .arco-select-view-value{padding-top:1px;padding-bottom:1px;font-size:12px;line-height:1.667}.arco-select-view-single.arco-select-view-size-mini .arco-select-view-value{min-height:22px}.arco-select-view-single.arco-select-view-size-medium .arco-select-view-input,.arco-select-view-single.arco-select-view-size-medium .arco-select-view-value{padding-top:4px;padding-bottom:4px;font-size:14px;line-height:1.5715}.arco-select-view-single.arco-select-view-size-medium .arco-select-view-value{min-height:30px}.arco-select-view-single.arco-select-view-size-small .arco-select-view-input,.arco-select-view-single.arco-select-view-size-small .arco-select-view-value{padding-top:2px;padding-bottom:2px;font-size:14px;line-height:1.5715}.arco-select-view-single.arco-select-view-size-small .arco-select-view-value{min-height:26px}.arco-select-view-single.arco-select-view-size-large .arco-select-view-input,.arco-select-view-single.arco-select-view-size-large .arco-select-view-value{padding-top:6px;padding-bottom:6px;font-size:14px;line-height:1.5715}.arco-select-view-single.arco-select-view-size-large .arco-select-view-value{min-height:34px}.arco-select-view-multiple{display:inline-flex;box-sizing:border-box;width:100%;padding-right:12px;padding-left:12px;color:var(--color-text-1);font-size:14px;background-color:var(--color-fill-2);border:1px solid transparent;border-radius:var(--border-radius-small);cursor:text;transition:color .1s cubic-bezier(0,0,1,1),border-color .1s cubic-bezier(0,0,1,1),background-color .1s cubic-bezier(0,0,1,1)}.arco-select-view-multiple:hover{background-color:var(--color-fill-3);border-color:transparent}.arco-select-view-multiple:focus-within,.arco-select-view-multiple.arco-select-view-focus{background-color:var(--color-bg-2);border-color:rgb(var(--primary-6));box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-select-view-multiple.arco-select-view-disabled{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent;cursor:not-allowed}.arco-select-view-multiple.arco-select-view-disabled:hover{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent}.arco-select-view-multiple.arco-select-view-disabled .arco-select-view-prefix,.arco-select-view-multiple.arco-select-view-disabled .arco-select-view-suffix{color:inherit}.arco-select-view-multiple.arco-select-view-error{background-color:var(--color-danger-light-1);border-color:transparent}.arco-select-view-multiple.arco-select-view-error:hover{background-color:var(--color-danger-light-2);border-color:transparent}.arco-select-view-multiple.arco-select-view-error:focus-within,.arco-select-view-multiple.arco-select-view-error.arco-select-view-multiple-focus{background-color:var(--color-bg-2);border-color:rgb(var(--danger-6));box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-select-view-multiple .arco-select-view-prefix,.arco-select-view-multiple .arco-select-view-suffix{display:inline-flex;flex-shrink:0;align-items:center;white-space:nowrap;user-select:none}.arco-select-view-multiple .arco-select-view-prefix>svg,.arco-select-view-multiple .arco-select-view-suffix>svg{font-size:14px}.arco-select-view-multiple .arco-select-view-prefix{padding-right:12px;color:var(--color-text-2)}.arco-select-view-multiple .arco-select-view-suffix{padding-left:12px;color:var(--color-text-2)}.arco-select-view-multiple .arco-select-view-suffix .arco-feedback-icon{display:inline-flex}.arco-select-view-multiple .arco-select-view-suffix .arco-feedback-icon-status-validating{color:rgb(var(--primary-6))}.arco-select-view-multiple .arco-select-view-suffix .arco-feedback-icon-status-success{color:rgb(var(--success-6))}.arco-select-view-multiple .arco-select-view-suffix .arco-feedback-icon-status-warning{color:rgb(var(--warning-6))}.arco-select-view-multiple .arco-select-view-suffix .arco-feedback-icon-status-error{color:rgb(var(--danger-6))}.arco-select-view-multiple .arco-select-view-clear-btn{align-self:center;color:var(--color-text-2);font-size:12px;visibility:hidden;cursor:pointer}.arco-select-view-multiple .arco-select-view-clear-btn>svg{position:relative;transition:color .1s cubic-bezier(0,0,1,1)}.arco-select-view-multiple:hover .arco-select-view-clear-btn{visibility:visible}.arco-select-view-multiple:not(.arco-select-view-focus) .arco-select-view-icon-hover:hover:before{background-color:var(--color-fill-4)}.arco-select-view-multiple.arco-select-view-has-tag{padding-right:4px;padding-left:4px}.arco-select-view-multiple.arco-select-view-has-prefix{padding-left:12px}.arco-select-view-multiple.arco-select-view-has-suffix{padding-right:12px}.arco-select-view-multiple .arco-select-view-inner{flex:1;overflow:hidden;line-height:0}.arco-select-view-multiple .arco-select-view-inner .arco-select-view-tag{display:inline-flex;align-items:center;margin-right:4px;color:var(--color-text-1);font-size:12px;white-space:pre-wrap;word-break:break-word;background-color:var(--color-bg-2);border-color:var(--color-fill-3)}.arco-select-view-multiple .arco-select-view-inner .arco-select-view-tag .arco-icon-hover:hover:before{background-color:var(--color-fill-2)}.arco-select-view-multiple .arco-select-view-inner .arco-select-view-tag.arco-tag-custom-color{color:var(--color-white)}.arco-select-view-multiple .arco-select-view-inner .arco-select-view-tag.arco-tag-custom-color .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:#fff3}.arco-select-view-multiple .arco-select-view-inner .arco-select-view-input{width:100%;padding-right:0;padding-left:0;color:inherit;line-height:1.5715;background:none;border:none;border-radius:0;outline:none;cursor:inherit;-webkit-appearance:none;-webkit-tap-highlight-color:rgba(0,0,0,0);box-sizing:border-box}.arco-select-view-multiple .arco-select-view-inner .arco-select-view-input::placeholder{color:var(--color-text-3)}.arco-select-view-multiple .arco-select-view-inner .arco-select-view-input[disabled]::placeholder{color:var(--color-text-4)}.arco-select-view-multiple .arco-select-view-inner .arco-select-view-input[disabled]{-webkit-text-fill-color:var(--color-text-4)}.arco-select-view-multiple .arco-select-view-mirror{position:absolute;top:0;left:0;white-space:pre;visibility:hidden;pointer-events:none}.arco-select-view-multiple.arco-select-view-focus .arco-select-view-tag{background-color:var(--color-fill-2);border-color:var(--color-fill-2)}.arco-select-view-multiple.arco-select-view-focus .arco-select-view-tag .arco-icon-hover:hover:before{background-color:var(--color-fill-3)}.arco-select-view-multiple.arco-select-view-disabled .arco-select-view-tag{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:var(--color-fill-3)}.arco-select-view-multiple.arco-select-view-readonly,.arco-select-view-multiple.arco-select-view-disabled-input{cursor:default}.arco-select-view-multiple.arco-select-view-size-mini{font-size:12px}.arco-select-view-multiple.arco-select-view-size-mini .arco-select-view-inner{padding-top:0;padding-bottom:0}.arco-select-view-multiple.arco-select-view-size-mini .arco-select-view-tag,.arco-select-view-multiple.arco-select-view-size-mini .arco-select-view-input{margin-top:1px;margin-bottom:1px;line-height:18px;vertical-align:middle}.arco-select-view-multiple.arco-select-view-size-mini .arco-select-view-tag{height:auto;min-height:20px}.arco-select-view-multiple.arco-select-view-size-mini .arco-select-view-input{height:20px}.arco-select-view-multiple.arco-select-view-size-medium{font-size:14px}.arco-select-view-multiple.arco-select-view-size-medium .arco-select-view-inner{padding-top:2px;padding-bottom:2px}.arco-select-view-multiple.arco-select-view-size-medium .arco-select-view-tag,.arco-select-view-multiple.arco-select-view-size-medium .arco-select-view-input{margin-top:1px;margin-bottom:1px;line-height:22px;vertical-align:middle}.arco-select-view-multiple.arco-select-view-size-medium .arco-select-view-tag{height:auto;min-height:24px}.arco-select-view-multiple.arco-select-view-size-medium .arco-select-view-input{height:24px}.arco-select-view-multiple.arco-select-view-size-small{font-size:14px}.arco-select-view-multiple.arco-select-view-size-small .arco-select-view-inner{padding-top:2px;padding-bottom:2px}.arco-select-view-multiple.arco-select-view-size-small .arco-select-view-tag,.arco-select-view-multiple.arco-select-view-size-small .arco-select-view-input{margin-top:1px;margin-bottom:1px;line-height:18px;vertical-align:middle}.arco-select-view-multiple.arco-select-view-size-small .arco-select-view-tag{height:auto;min-height:20px}.arco-select-view-multiple.arco-select-view-size-small .arco-select-view-input{height:20px}.arco-select-view-multiple.arco-select-view-size-large{font-size:14px}.arco-select-view-multiple.arco-select-view-size-large .arco-select-view-inner{padding-top:2px;padding-bottom:2px}.arco-select-view-multiple.arco-select-view-size-large .arco-select-view-tag,.arco-select-view-multiple.arco-select-view-size-large .arco-select-view-input{margin-top:1px;margin-bottom:1px;line-height:26px;vertical-align:middle}.arco-select-view-multiple.arco-select-view-size-large .arco-select-view-tag{height:auto;min-height:28px}.arco-select-view-multiple.arco-select-view-size-large .arco-select-view-input{height:28px}.arco-select-view-multiple.arco-select-view-disabled-input{cursor:pointer}.arco-select-view.arco-select-view-borderless{background:none!important;border:none!important;box-shadow:none!important}.arco-select-view-suffix .arco-feedback-icon{margin-left:4px}.arco-select-view-clear-btn svg,.arco-select-view-icon svg{display:block;font-size:12px}.arco-select-view-opened .arco-select-view-arrow-icon{transform:rotate(180deg)}.arco-select-view-expand-icon{transform:rotate(-45deg)}.arco-select-view-clear-btn{display:none;cursor:pointer}.arco-select-view:hover .arco-select-view-clear-btn{display:block}.arco-select-view:hover .arco-select-view-clear-btn~*{display:none}.arco-affix{position:fixed;z-index:999}.arco-alert{display:flex;align-items:center;box-sizing:border-box;width:100%;padding:8px 15px;overflow:hidden;font-size:14px;line-height:1.5715;text-align:left;border-radius:var(--border-radius-small)}.arco-alert-with-title{align-items:flex-start;padding:15px}.arco-alert-normal{background-color:var(--color-neutral-2);border:1px solid transparent}.arco-alert-info{background-color:var(--color-primary-light-1);border:1px solid transparent}.arco-alert-success{background-color:var(--color-success-light-1);border:1px solid transparent}.arco-alert-warning{background-color:var(--color-warning-light-1);border:1px solid transparent}.arco-alert-error{background-color:var(--color-danger-light-1);border:1px solid transparent}.arco-alert-banner{border:none;border-radius:0}.arco-alert-body{position:relative;flex:1}.arco-alert-title{margin-bottom:4px;font-weight:500;font-size:16px;line-height:1.5}.arco-alert-normal .arco-alert-title,.arco-alert-normal .arco-alert-content{color:var(--color-text-1)}.arco-alert-normal.arco-alert-with-title .arco-alert-content{color:var(--color-text-2)}.arco-alert-info .arco-alert-title,.arco-alert-info .arco-alert-content{color:var(--color-text-1)}.arco-alert-info.arco-alert-with-title .arco-alert-content{color:var(--color-text-2)}.arco-alert-success .arco-alert-title,.arco-alert-success .arco-alert-content{color:var(--color-text-1)}.arco-alert-success.arco-alert-with-title .arco-alert-content{color:var(--color-text-2)}.arco-alert-warning .arco-alert-title,.arco-alert-warning .arco-alert-content{color:var(--color-text-1)}.arco-alert-warning.arco-alert-with-title .arco-alert-content{color:var(--color-text-2)}.arco-alert-error .arco-alert-title,.arco-alert-error .arco-alert-content{color:var(--color-text-1)}.arco-alert-error.arco-alert-with-title .arco-alert-content{color:var(--color-text-2)}.arco-alert-icon{margin-right:8px}.arco-alert-icon svg{font-size:16px;vertical-align:-3px}.arco-alert-with-title .arco-alert-icon svg{font-size:18px;vertical-align:-5px}.arco-alert-normal .arco-alert-icon svg{color:var(--color-neutral-4)}.arco-alert-info .arco-alert-icon svg{color:rgb(var(--primary-6))}.arco-alert-success .arco-alert-icon svg{color:rgb(var(--success-6))}.arco-alert-warning .arco-alert-icon svg{color:rgb(var(--warning-6))}.arco-alert-error .arco-alert-icon svg{color:rgb(var(--danger-6))}.arco-alert-close-btn{top:4px;right:0;box-sizing:border-box;margin-left:8px;padding:0;color:var(--color-text-2);font-size:12px;background-color:transparent;border:none;outline:none;cursor:pointer;transition:color .1s cubic-bezier(0,0,1,1)}.arco-alert-close-btn:hover{color:var(--color-text-1)}.arco-alert-action+.arco-alert-close-btn{margin-left:8px}.arco-alert-action{margin-left:8px}.arco-alert-with-title .arco-alert-close-btn{margin-top:0;margin-right:0}.arco-anchor{position:relative;width:150px;overflow:auto}.arco-anchor-line-slider{position:absolute;top:0;left:0;z-index:1;width:2px;height:12px;margin-top:9.0005px;background-color:rgb(var(--primary-6));transition:top .2s cubic-bezier(.34,.69,.1,1)}.arco-anchor-list{position:relative;margin-top:0;margin-bottom:0;margin-left:4px;padding-left:0;list-style:none}.arco-anchor-list:before{position:absolute;left:-4px;width:2px;height:100%;background-color:var(--color-fill-3);content:""}.arco-anchor-sublist{margin-top:0;margin-bottom:0;padding-left:0;list-style:none}.arco-anchor-link-item{margin-bottom:2px}.arco-anchor-link-item .arco-anchor-link{display:block;margin-bottom:2px;padding:4px 8px;overflow:hidden;color:var(--color-text-2);font-size:14px;line-height:1.5715;white-space:nowrap;text-decoration:none;text-overflow:ellipsis;border-radius:var(--border-radius-small);cursor:pointer}.arco-anchor-link-item .arco-anchor-link:hover{color:var(--color-text-1);font-weight:500;background-color:var(--color-fill-2)}.arco-anchor-link-active>.arco-anchor-link{color:var(--color-text-1);font-weight:500;transition:all .1s cubic-bezier(0,0,1,1)}.arco-anchor-link-item .arco-anchor-link-item{margin-left:16px}.arco-anchor-line-less .arco-anchor-list{margin-left:0}.arco-anchor-line-less .arco-anchor-list:before{display:none}.arco-anchor-line-less .arco-anchor-link-active>.arco-anchor-link{color:rgb(var(--primary-6));font-weight:500;background-color:var(--color-fill-2)}.arco-autocomplete-popup .arco-select-popup{background-color:var(--color-bg-popup);border:1px solid var(--color-fill-3);border-radius:var(--border-radius-medium);box-shadow:0 4px 10px #0000001a}.arco-autocomplete-popup .arco-select-popup .arco-select-popup-inner{max-height:200px;padding:4px 0}.arco-autocomplete-popup .arco-select-popup .arco-select-option{height:36px;padding:0 12px;font-size:14px;line-height:36px;color:var(--color-text-1);background-color:var(--color-bg-popup)}.arco-autocomplete-popup .arco-select-popup .arco-select-option-selected{color:var(--color-text-1);background-color:var(--color-bg-popup)}.arco-autocomplete-popup .arco-select-popup .arco-select-option-hover{color:var(--color-text-1);background-color:var(--color-fill-2)}.arco-autocomplete-popup .arco-select-popup .arco-select-option-disabled{color:var(--color-text-4);background-color:var(--color-bg-popup)}.arco-autocomplete-popup .arco-select-popup .arco-select-option-selected{font-weight:500}.arco-avatar{position:relative;display:inline-flex;align-items:center;box-sizing:border-box;width:40px;height:40px;color:var(--color-white);font-size:20px;white-space:nowrap;vertical-align:middle;background-color:var(--color-fill-4)}.arco-avatar-circle{border-radius:var(--border-radius-circle)}.arco-avatar-circle .arco-avatar-image{overflow:hidden;border-radius:var(--border-radius-circle)}.arco-avatar-square{border-radius:var(--border-radius-medium)}.arco-avatar-square .arco-avatar-image{overflow:hidden;border-radius:var(--border-radius-medium)}.arco-avatar-text{position:absolute;left:50%;font-weight:500;line-height:1;transform:translate(-50%);transform-origin:0 center}.arco-avatar-image{display:inline-block;width:100%;height:100%}.arco-avatar-image-icon{display:flex;align-items:center;justify-content:center;width:100%;height:100%}.arco-avatar-image img,.arco-avatar-image picture{width:100%;height:100%}.arco-avatar-trigger-icon-button{position:absolute;right:-4px;bottom:-4px;z-index:1;width:20px;height:20px;color:var(--color-fill-4);font-size:12px;line-height:20px;text-align:center;background-color:var(--color-neutral-2);border-radius:var(--border-radius-circle);transition:background-color .1s cubic-bezier(0,0,1,1)}.arco-avatar-trigger-icon-mask{position:absolute;top:0;left:0;z-index:0;display:flex;align-items:center;justify-content:center;width:100%;height:100%;color:var(--color-white);font-size:16px;background-color:#1d212999;border-radius:var(--border-radius-medium);opacity:0;transition:all .1s cubic-bezier(0,0,1,1)}.arco-avatar-circle .arco-avatar-trigger-icon-mask{border-radius:var(--border-radius-circle)}.arco-avatar-with-trigger-icon{cursor:pointer}.arco-avatar-with-trigger-icon:hover .arco-avatar-trigger-icon-mask{z-index:2;opacity:1}.arco-avatar-with-trigger-icon:hover .arco-avatar-trigger-icon-button{background-color:var(--color-neutral-3)}.arco-avatar-group{display:inline-block;line-height:0}.arco-avatar-group-max-count-avatar{color:var(--color-white);font-size:20px;cursor:default}.arco-avatar-group .arco-avatar{border:2px solid var(--color-bg-2)}.arco-avatar-group .arco-avatar:not(:first-child){margin-left:-10px}.arco-avatar-group-popover .arco-avatar:not(:first-child){margin-left:4px}.arco-back-top{position:fixed;right:24px;bottom:24px;z-index:100}.arco-back-top-btn{width:40px;height:40px;color:var(--color-white);font-size:12px;text-align:center;background-color:rgb(var(--primary-6));border:none;border-radius:var(--border-radius-circle);outline:none;cursor:pointer;transition:all .2s cubic-bezier(0,0,1,1)}.arco-back-top-btn:hover{background-color:rgb(var(--primary-5))}.arco-back-top-btn svg{font-size:14px}.arco-badge{position:relative;display:inline-block;line-height:1}.arco-badge-number,.arco-badge-dot,.arco-badge-text,.arco-badge-custom-dot{position:absolute;top:2px;right:2px;z-index:2;box-sizing:border-box;overflow:hidden;text-align:center;border-radius:20px;transform:translate(50%,-50%);transform-origin:100% 0%}.arco-badge-custom-dot{background-color:var(--color-bg-2)}.arco-badge-number,.arco-badge-text{min-width:20px;height:20px;padding:0 6px;color:var(--color-white);font-weight:500;font-size:12px;line-height:20px;background-color:rgb(var(--danger-6));box-shadow:0 0 0 2px var(--color-bg-2)}.arco-badge-dot{width:6px;height:6px;background-color:rgb(var(--danger-6));border-radius:var(--border-radius-circle);box-shadow:0 0 0 2px var(--color-bg-2)}.arco-badge-no-children .arco-badge-dot,.arco-badge-no-children .arco-badge-number,.arco-badge-no-children .arco-badge-text{position:relative;top:unset;right:unset;display:inline-block;transform:none}.arco-badge-status-wrapper{display:inline-flex;align-items:center}.arco-badge-status-dot{display:inline-block;width:6px;height:6px;border-radius:var(--border-radius-circle)}.arco-badge-status-normal{background-color:var(--color-fill-4)}.arco-badge-status-processing{background-color:rgb(var(--primary-6))}.arco-badge-status-success{background-color:rgb(var(--success-6))}.arco-badge-status-warning{background-color:rgb(var(--warning-6))}.arco-badge-status-danger,.arco-badge-color-red{background-color:rgb(var(--danger-6))}.arco-badge-color-orangered{background-color:#f77234}.arco-badge-color-orange{background-color:rgb(var(--orange-6))}.arco-badge-color-gold{background-color:rgb(var(--gold-6))}.arco-badge-color-lime{background-color:rgb(var(--lime-6))}.arco-badge-color-green{background-color:rgb(var(--success-6))}.arco-badge-color-cyan{background-color:rgb(var(--cyan-6))}.arco-badge-color-arcoblue{background-color:rgb(var(--primary-6))}.arco-badge-color-purple{background-color:rgb(var(--purple-6))}.arco-badge-color-pinkpurple{background-color:rgb(var(--pinkpurple-6))}.arco-badge-color-magenta{background-color:rgb(var(--magenta-6))}.arco-badge-color-gray{background-color:rgb(var(--gray-4))}.arco-badge .arco-badge-status-text{margin-left:8px;color:var(--color-text-1);font-size:12px;line-height:1.5715}.arco-badge-number-text{display:inline-block;animation:arco-badge-scale .5s cubic-bezier(.3,1.3,.3,1)}@keyframes arco-badge-scale{0%{transform:scale(0)}to{transform:scale(1)}}.badge-zoom-enter,.badge-zoom-appear{transform:translate(50%,-50%) scale(.2);transform-origin:center}.badge-zoom-enter-active,.badge-zoom-appear-active{transform:translate(50%,-50%) scale(1);transform-origin:center;opacity:1;transition:opacity .3s cubic-bezier(.3,1.3,.3,1),transform .3s cubic-bezier(.3,1.3,.3,1)}.badge-zoom-exit{transform:translate(50%,-50%) scale(1);transform-origin:center;opacity:1}.badge-zoom-exit-active{transform:translate(50%,-50%) scale(.2);transform-origin:center;opacity:0;transition:opacity .3s cubic-bezier(.3,1.3,.3,1),transform .3s cubic-bezier(.3,1.3,.3,1)}.arco-breadcrumb{display:inline-flex;align-items:center;color:var(--color-text-2);font-size:14px}.arco-breadcrumb-icon{color:var(--color-text-2)}.arco-breadcrumb-item{display:inline-block;padding:0 4px;color:var(--color-text-2);line-height:24px;vertical-align:middle}.arco-breadcrumb-item>.arco-icon{color:var(--color-text-3)}.arco-breadcrumb-item a{display:inline-block;margin:0 -4px;padding:0 4px;color:var(--color-text-2);text-decoration:none;border-radius:var(--border-radius-small);background-color:transparent}.arco-breadcrumb-item a:hover{color:rgb(var(--link-6));background-color:var(--color-fill-2)}.arco-breadcrumb-item:last-child{color:var(--color-text-1);font-weight:500}.arco-breadcrumb-item-ellipses{position:relative;top:-3px;display:inline-block;padding:0 4px;color:var(--color-text-2)}.arco-breadcrumb-item-separator{display:inline-block;margin:0 4px;color:var(--color-text-4);line-height:24px;vertical-align:middle}.arco-breadcrumb-item-with-dropdown{cursor:pointer}.arco-breadcrumb-item-dropdown-icon{margin-left:4px;color:var(--color-text-2);font-size:12px}.arco-breadcrumb-item-dropdown-icon-active svg{transform:rotate(180deg)}.arco-btn{position:relative;display:inline-flex;align-items:center;justify-content:center;box-sizing:border-box;font-weight:400;line-height:1.5715;white-space:nowrap;outline:none;cursor:pointer;transition:all .1s cubic-bezier(0,0,1,1);-webkit-appearance:none;user-select:none}.arco-btn>a:only-child{color:currentColor}.arco-btn:active{transition:none}.arco-btn-long{display:flex;width:100%}.arco-btn-link{display:inline-flex;align-items:center;justify-content:center;text-decoration:none}.arco-btn-link:not([href]){color:var(--color-text-4)}.arco-btn-link:hover{text-decoration:none}.arco-btn-link.arco-btn-only-icon{display:inline-flex;align-items:center;justify-content:center;vertical-align:top}.arco-btn.arco-btn-only-icon .arco-btn-icon{display:flex;justify-content:center}.arco-btn-loading{position:relative;cursor:default}.arco-btn-loading:before{position:absolute;top:-1px;right:-1px;bottom:-1px;left:-1px;z-index:1;display:block;background:#fff;border-radius:inherit;opacity:.4;transition:opacity .1s cubic-bezier(0,0,1,1);content:"";pointer-events:none}.arco-btn-loading-fixed-width{transition:none}.arco-btn-two-chinese-chars>*:not(svg){margin-right:-.3em;letter-spacing:.3em}.arco-btn-outline,.arco-btn-outline[type=button],.arco-btn-outline[type=submit]{color:rgb(var(--primary-6));background-color:transparent;border:1px solid rgb(var(--primary-6))}.arco-btn-outline:hover,.arco-btn-outline[type=button]:hover,.arco-btn-outline[type=submit]:hover{color:rgb(var(--primary-5));background-color:transparent;border-color:rgb(var(--primary-5))}.arco-btn-outline:focus-visible,.arco-btn-outline[type=button]:focus-visible,.arco-btn-outline[type=submit]:focus-visible{box-shadow:0 0 0 .25em rgb(var(--primary-3))}.arco-btn-outline:active,.arco-btn-outline[type=button]:active,.arco-btn-outline[type=submit]:active{color:rgb(var(--primary-7));background-color:transparent;border-color:rgb(var(--primary-7))}.arco-btn-outline.arco-btn-loading,.arco-btn-outline[type=button].arco-btn-loading,.arco-btn-outline[type=submit].arco-btn-loading{color:rgb(var(--primary-6));background-color:transparent;border:1px solid rgb(var(--primary-6))}.arco-btn-outline.arco-btn-disabled,.arco-btn-outline[type=button].arco-btn-disabled,.arco-btn-outline[type=submit].arco-btn-disabled{color:var(--color-primary-light-3);background-color:transparent;border:1px solid var(--color-primary-light-3);cursor:not-allowed}.arco-btn-outline.arco-btn-status-warning{color:rgb(var(--warning-6));background-color:transparent;border-color:rgb(var(--warning-6))}.arco-btn-outline.arco-btn-status-warning:hover{color:rgb(var(--warning-5));background-color:transparent;border-color:rgb(var(--warning-5))}.arco-btn-outline.arco-btn-status-warning:focus-visible{box-shadow:0 0 0 .25em rgb(var(--warning-3))}.arco-btn-outline.arco-btn-status-warning:active{color:rgb(var(--warning-7));background-color:transparent;border-color:rgb(var(--warning-7))}.arco-btn-outline.arco-btn-status-warning.arco-btn-loading{color:rgb(var(--warning-6));background-color:transparent;border-color:rgb(var(--warning-6))}.arco-btn-outline.arco-btn-status-warning.arco-btn-disabled{color:var(--color-warning-light-3);background-color:transparent;border:1px solid var(--color-warning-light-3)}.arco-btn-outline.arco-btn-status-danger{color:rgb(var(--danger-6));background-color:transparent;border-color:rgb(var(--danger-6))}.arco-btn-outline.arco-btn-status-danger:hover{color:rgb(var(--danger-5));background-color:transparent;border-color:rgb(var(--danger-5))}.arco-btn-outline.arco-btn-status-danger:focus-visible{box-shadow:0 0 0 .25em rgb(var(--danger-3))}.arco-btn-outline.arco-btn-status-danger:active{color:rgb(var(--danger-7));background-color:transparent;border-color:rgb(var(--danger-7))}.arco-btn-outline.arco-btn-status-danger.arco-btn-loading{color:rgb(var(--danger-6));background-color:transparent;border-color:rgb(var(--danger-6))}.arco-btn-outline.arco-btn-status-danger.arco-btn-disabled{color:var(--color-danger-light-3);background-color:transparent;border:1px solid var(--color-danger-light-3)}.arco-btn-outline.arco-btn-status-success{color:rgb(var(--success-6));background-color:transparent;border-color:rgb(var(--success-6))}.arco-btn-outline.arco-btn-status-success:hover{color:rgb(var(--success-5));background-color:transparent;border-color:rgb(var(--success-5))}.arco-btn-outline.arco-btn-status-success:focus-visible{box-shadow:0 0 0 .25em rgb(var(--success-3))}.arco-btn-outline.arco-btn-status-success:active{color:rgb(var(--success-7));background-color:transparent;border-color:rgb(var(--success-7))}.arco-btn-outline.arco-btn-status-success.arco-btn-loading{color:rgb(var(--success-6));background-color:transparent;border-color:rgb(var(--success-6))}.arco-btn-outline.arco-btn-status-success.arco-btn-disabled{color:var(--color-success-light-3);background-color:transparent;border:1px solid var(--color-success-light-3)}.arco-btn-primary,.arco-btn-primary[type=button],.arco-btn-primary[type=submit]{color:#fff;background-color:rgb(var(--primary-6));border:1px solid transparent}.arco-btn-primary:hover,.arco-btn-primary[type=button]:hover,.arco-btn-primary[type=submit]:hover{color:#fff;background-color:rgb(var(--primary-5));border-color:transparent}.arco-btn-primary:focus-visible,.arco-btn-primary[type=button]:focus-visible,.arco-btn-primary[type=submit]:focus-visible{box-shadow:0 0 0 .25em rgb(var(--primary-3))}.arco-btn-primary:active,.arco-btn-primary[type=button]:active,.arco-btn-primary[type=submit]:active{color:#fff;background-color:rgb(var(--primary-7));border-color:transparent}.arco-btn-primary.arco-btn-loading,.arco-btn-primary[type=button].arco-btn-loading,.arco-btn-primary[type=submit].arco-btn-loading{color:#fff;background-color:rgb(var(--primary-6));border:1px solid transparent}.arco-btn-primary.arco-btn-disabled,.arco-btn-primary[type=button].arco-btn-disabled,.arco-btn-primary[type=submit].arco-btn-disabled{color:#fff;background-color:var(--color-primary-light-3);border:1px solid transparent;cursor:not-allowed}.arco-btn-primary.arco-btn-status-warning{color:#fff;background-color:rgb(var(--warning-6));border-color:transparent}.arco-btn-primary.arco-btn-status-warning:hover{color:#fff;background-color:rgb(var(--warning-5));border-color:transparent}.arco-btn-primary.arco-btn-status-warning:focus-visible{box-shadow:0 0 0 .25em rgb(var(--warning-3))}.arco-btn-primary.arco-btn-status-warning:active{color:#fff;background-color:rgb(var(--warning-7));border-color:transparent}.arco-btn-primary.arco-btn-status-warning.arco-btn-loading{color:#fff;background-color:rgb(var(--warning-6));border-color:transparent}.arco-btn-primary.arco-btn-status-warning.arco-btn-disabled{color:#fff;background-color:var(--color-warning-light-3);border:1px solid transparent}.arco-btn-primary.arco-btn-status-danger{color:#fff;background-color:rgb(var(--danger-6));border-color:transparent}.arco-btn-primary.arco-btn-status-danger:hover{color:#fff;background-color:rgb(var(--danger-5));border-color:transparent}.arco-btn-primary.arco-btn-status-danger:focus-visible{box-shadow:0 0 0 .25em rgb(var(--danger-3))}.arco-btn-primary.arco-btn-status-danger:active{color:#fff;background-color:rgb(var(--danger-7));border-color:transparent}.arco-btn-primary.arco-btn-status-danger.arco-btn-loading{color:#fff;background-color:rgb(var(--danger-6));border-color:transparent}.arco-btn-primary.arco-btn-status-danger.arco-btn-disabled{color:#fff;background-color:var(--color-danger-light-3);border:1px solid transparent}.arco-btn-primary.arco-btn-status-success{color:#fff;background-color:rgb(var(--success-6));border-color:transparent}.arco-btn-primary.arco-btn-status-success:hover{color:#fff;background-color:rgb(var(--success-5));border-color:transparent}.arco-btn-primary.arco-btn-status-success:focus-visible{box-shadow:0 0 0 .25em rgb(var(--success-3))}.arco-btn-primary.arco-btn-status-success:active{color:#fff;background-color:rgb(var(--success-7));border-color:transparent}.arco-btn-primary.arco-btn-status-success.arco-btn-loading{color:#fff;background-color:rgb(var(--success-6));border-color:transparent}.arco-btn-primary.arco-btn-status-success.arco-btn-disabled{color:#fff;background-color:var(--color-success-light-3);border:1px solid transparent}.arco-btn-secondary,.arco-btn-secondary[type=button],.arco-btn-secondary[type=submit]{color:var(--color-text-2);background-color:var(--color-secondary);border:1px solid transparent}.arco-btn-secondary:hover,.arco-btn-secondary[type=button]:hover,.arco-btn-secondary[type=submit]:hover{color:var(--color-text-2);background-color:var(--color-secondary-hover);border-color:transparent}.arco-btn-secondary:focus-visible,.arco-btn-secondary[type=button]:focus-visible,.arco-btn-secondary[type=submit]:focus-visible{box-shadow:0 0 0 .25em var(--color-neutral-4)}.arco-btn-secondary:active,.arco-btn-secondary[type=button]:active,.arco-btn-secondary[type=submit]:active{color:var(--color-text-2);background-color:var(--color-secondary-active);border-color:transparent}.arco-btn-secondary.arco-btn-loading,.arco-btn-secondary[type=button].arco-btn-loading,.arco-btn-secondary[type=submit].arco-btn-loading{color:var(--color-text-2);background-color:var(--color-secondary);border:1px solid transparent}.arco-btn-secondary.arco-btn-disabled,.arco-btn-secondary[type=button].arco-btn-disabled,.arco-btn-secondary[type=submit].arco-btn-disabled{color:var(--color-text-4);background-color:var(--color-secondary-disabled);border:1px solid transparent;cursor:not-allowed}.arco-btn-secondary.arco-btn-status-warning{color:rgb(var(--warning-6));background-color:var(--color-warning-light-1);border-color:transparent}.arco-btn-secondary.arco-btn-status-warning:hover{color:rgb(var(--warning-6));background-color:var(--color-warning-light-2);border-color:transparent}.arco-btn-secondary.arco-btn-status-warning:focus-visible{box-shadow:0 0 0 .25em rgb(var(--warning-3))}.arco-btn-secondary.arco-btn-status-warning:active{color:rgb(var(--warning-6));background-color:var(--color-warning-light-3);border-color:transparent}.arco-btn-secondary.arco-btn-status-warning.arco-btn-loading{color:rgb(var(--warning-6));background-color:var(--color-warning-light-1);border-color:transparent}.arco-btn-secondary.arco-btn-status-warning.arco-btn-disabled{color:var(--color-warning-light-3);background-color:var(--color-warning-light-1);border:1px solid transparent}.arco-btn-secondary.arco-btn-status-danger{color:rgb(var(--danger-6));background-color:var(--color-danger-light-1);border-color:transparent}.arco-btn-secondary.arco-btn-status-danger:hover{color:rgb(var(--danger-6));background-color:var(--color-danger-light-2);border-color:transparent}.arco-btn-secondary.arco-btn-status-danger:focus-visible{box-shadow:0 0 0 .25em rgb(var(--danger-3))}.arco-btn-secondary.arco-btn-status-danger:active{color:rgb(var(--danger-6));background-color:var(--color-danger-light-3);border-color:transparent}.arco-btn-secondary.arco-btn-status-danger.arco-btn-loading{color:rgb(var(--danger-6));background-color:var(--color-danger-light-1);border-color:transparent}.arco-btn-secondary.arco-btn-status-danger.arco-btn-disabled{color:var(--color-danger-light-3);background-color:var(--color-danger-light-1);border:1px solid transparent}.arco-btn-secondary.arco-btn-status-success{color:rgb(var(--success-6));background-color:var(--color-success-light-1);border-color:transparent}.arco-btn-secondary.arco-btn-status-success:hover{color:rgb(var(--success-6));background-color:var(--color-success-light-2);border-color:transparent}.arco-btn-secondary.arco-btn-status-success:focus-visible{box-shadow:0 0 0 .25em rgb(var(--success-3))}.arco-btn-secondary.arco-btn-status-success:active{color:rgb(var(--success-6));background-color:var(--color-success-light-3);border-color:transparent}.arco-btn-secondary.arco-btn-status-success.arco-btn-loading{color:rgb(var(--success-6));background-color:var(--color-success-light-1);border-color:transparent}.arco-btn-secondary.arco-btn-status-success.arco-btn-disabled{color:var(--color-success-light-3);background-color:var(--color-success-light-1);border:1px solid transparent}.arco-btn-dashed,.arco-btn-dashed[type=button],.arco-btn-dashed[type=submit]{color:var(--color-text-2);background-color:var(--color-fill-2);border:1px dashed var(--color-neutral-3)}.arco-btn-dashed:hover,.arco-btn-dashed[type=button]:hover,.arco-btn-dashed[type=submit]:hover{color:var(--color-text-2);background-color:var(--color-fill-3);border-color:var(--color-neutral-4)}.arco-btn-dashed:focus-visible,.arco-btn-dashed[type=button]:focus-visible,.arco-btn-dashed[type=submit]:focus-visible{box-shadow:0 0 0 .25em var(--color-neutral-4)}.arco-btn-dashed:active,.arco-btn-dashed[type=button]:active,.arco-btn-dashed[type=submit]:active{color:var(--color-text-2);background-color:var(--color-fill-4);border-color:var(--color-neutral-5)}.arco-btn-dashed.arco-btn-loading,.arco-btn-dashed[type=button].arco-btn-loading,.arco-btn-dashed[type=submit].arco-btn-loading{color:var(--color-text-2);background-color:var(--color-fill-2);border:1px dashed var(--color-neutral-3)}.arco-btn-dashed.arco-btn-disabled,.arco-btn-dashed[type=button].arco-btn-disabled,.arco-btn-dashed[type=submit].arco-btn-disabled{color:var(--color-text-4);background-color:var(--color-fill-2);border:1px dashed var(--color-neutral-3);cursor:not-allowed}.arco-btn-dashed.arco-btn-status-warning{color:rgb(var(--warning-6));background-color:var(--color-warning-light-1);border-color:var(--color-warning-light-2)}.arco-btn-dashed.arco-btn-status-warning:hover{color:rgb(var(--warning-6));background-color:var(--color-warning-light-2);border-color:var(--color-warning-light-3)}.arco-btn-dashed.arco-btn-status-warning:focus-visible{box-shadow:0 0 0 .25em rgb(var(--warning-3))}.arco-btn-dashed.arco-btn-status-warning:active{color:rgb(var(--warning-6));background-color:var(--color-warning-light-3);border-color:var(--color-warning-light-4)}.arco-btn-dashed.arco-btn-status-warning.arco-btn-loading{color:rgb(var(--warning-6));background-color:var(--color-warning-light-1);border-color:var(--color-warning-light-2)}.arco-btn-dashed.arco-btn-status-warning.arco-btn-disabled{color:var(--color-warning-light-3);background-color:var(--color-warning-light-1);border:1px dashed var(--color-warning-light-2)}.arco-btn-dashed.arco-btn-status-danger{color:rgb(var(--danger-6));background-color:var(--color-danger-light-1);border-color:var(--color-danger-light-2)}.arco-btn-dashed.arco-btn-status-danger:hover{color:rgb(var(--danger-6));background-color:var(--color-danger-light-2);border-color:var(--color-danger-light-3)}.arco-btn-dashed.arco-btn-status-danger:focus-visible{box-shadow:0 0 0 .25em rgb(var(--danger-3))}.arco-btn-dashed.arco-btn-status-danger:active{color:rgb(var(--danger-6));background-color:var(--color-danger-light-3);border-color:var(--color-danger-light-4)}.arco-btn-dashed.arco-btn-status-danger.arco-btn-loading{color:rgb(var(--danger-6));background-color:var(--color-danger-light-1);border-color:var(--color-danger-light-2)}.arco-btn-dashed.arco-btn-status-danger.arco-btn-disabled{color:var(--color-danger-light-3);background-color:var(--color-danger-light-1);border:1px dashed var(--color-danger-light-2)}.arco-btn-dashed.arco-btn-status-success{color:rgb(var(--success-6));background-color:var(--color-success-light-1);border-color:var(--color-success-light-2)}.arco-btn-dashed.arco-btn-status-success:hover{color:rgb(var(--success-6));background-color:var(--color-success-light-2);border-color:var(--color-success-light-3)}.arco-btn-dashed.arco-btn-status-success:focus-visible{box-shadow:0 0 0 .25em rgb(var(--success-3))}.arco-btn-dashed.arco-btn-status-success:active{color:rgb(var(--success-6));background-color:var(--color-success-light-3);border-color:var(--color-success-light-4)}.arco-btn-dashed.arco-btn-status-success.arco-btn-loading{color:rgb(var(--success-6));background-color:var(--color-success-light-1);border-color:var(--color-success-light-2)}.arco-btn-dashed.arco-btn-status-success.arco-btn-disabled{color:var(--color-success-light-3);background-color:var(--color-success-light-1);border:1px dashed var(--color-success-light-2)}.arco-btn-text,.arco-btn-text[type=button],.arco-btn-text[type=submit]{color:rgb(var(--primary-6));background-color:transparent;border:1px solid transparent}.arco-btn-text:hover,.arco-btn-text[type=button]:hover,.arco-btn-text[type=submit]:hover{color:rgb(var(--primary-6));background-color:var(--color-fill-2);border-color:transparent}.arco-btn-text:focus-visible,.arco-btn-text[type=button]:focus-visible,.arco-btn-text[type=submit]:focus-visible{box-shadow:0 0 0 .25em var(--color-neutral-4)}.arco-btn-text:active,.arco-btn-text[type=button]:active,.arco-btn-text[type=submit]:active{color:rgb(var(--primary-6));background-color:var(--color-fill-3);border-color:transparent}.arco-btn-text.arco-btn-loading,.arco-btn-text[type=button].arco-btn-loading,.arco-btn-text[type=submit].arco-btn-loading{color:rgb(var(--primary-6));background-color:transparent;border:1px solid transparent}.arco-btn-text.arco-btn-disabled,.arco-btn-text[type=button].arco-btn-disabled,.arco-btn-text[type=submit].arco-btn-disabled{color:var(--color-primary-light-3);background-color:transparent;border:1px solid transparent;cursor:not-allowed}.arco-btn-text.arco-btn-status-warning{color:rgb(var(--warning-6));background-color:transparent;border-color:transparent}.arco-btn-text.arco-btn-status-warning:hover{color:rgb(var(--warning-6));background-color:var(--color-fill-2);border-color:transparent}.arco-btn-text.arco-btn-status-warning:focus-visible{box-shadow:0 0 0 .25em rgb(var(--warning-3))}.arco-btn-text.arco-btn-status-warning:active{color:rgb(var(--warning-6));background-color:var(--color-fill-3);border-color:transparent}.arco-btn-text.arco-btn-status-warning.arco-btn-loading{color:rgb(var(--warning-6));background-color:transparent;border-color:transparent}.arco-btn-text.arco-btn-status-warning.arco-btn-disabled{color:var(--color-warning-light-3);background-color:transparent;border:1px solid transparent}.arco-btn-text.arco-btn-status-danger{color:rgb(var(--danger-6));background-color:transparent;border-color:transparent}.arco-btn-text.arco-btn-status-danger:hover{color:rgb(var(--danger-6));background-color:var(--color-fill-2);border-color:transparent}.arco-btn-text.arco-btn-status-danger:focus-visible{box-shadow:0 0 0 .25em rgb(var(--danger-3))}.arco-btn-text.arco-btn-status-danger:active{color:rgb(var(--danger-6));background-color:var(--color-fill-3);border-color:transparent}.arco-btn-text.arco-btn-status-danger.arco-btn-loading{color:rgb(var(--danger-6));background-color:transparent;border-color:transparent}.arco-btn-text.arco-btn-status-danger.arco-btn-disabled{color:var(--color-danger-light-3);background-color:transparent;border:1px solid transparent}.arco-btn-text.arco-btn-status-success{color:rgb(var(--success-6));background-color:transparent;border-color:transparent}.arco-btn-text.arco-btn-status-success:hover{color:rgb(var(--success-6));background-color:var(--color-fill-2);border-color:transparent}.arco-btn-text.arco-btn-status-success:focus-visible{box-shadow:0 0 0 .25em rgb(var(--success-3))}.arco-btn-text.arco-btn-status-success:active{color:rgb(var(--success-6));background-color:var(--color-fill-3);border-color:transparent}.arco-btn-text.arco-btn-status-success.arco-btn-loading{color:rgb(var(--success-6));background-color:transparent;border-color:transparent}.arco-btn-text.arco-btn-status-success.arco-btn-disabled{color:var(--color-success-light-3);background-color:transparent;border:1px solid transparent}.arco-btn-size-mini{height:24px;padding:0 11px;font-size:12px;border-radius:var(--border-radius-small)}.arco-btn-size-mini:not(.arco-btn-only-icon) .arco-btn-icon{margin-right:4px}.arco-btn-size-mini svg{vertical-align:-1px}.arco-btn-size-mini.arco-btn-loading-fixed-width.arco-btn-loading{padding-right:3px;padding-left:3px}.arco-btn-size-mini.arco-btn-only-icon{width:24px;height:24px;padding:0}.arco-btn-size-mini.arco-btn-shape-circle{width:24px;height:24px;padding:0;text-align:center;border-radius:var(--border-radius-circle)}.arco-btn-size-mini.arco-btn-shape-round{border-radius:12px}.arco-btn-size-small{height:28px;padding:0 15px;font-size:14px;border-radius:var(--border-radius-small)}.arco-btn-size-small:not(.arco-btn-only-icon) .arco-btn-icon{margin-right:6px}.arco-btn-size-small svg{vertical-align:-2px}.arco-btn-size-small.arco-btn-loading-fixed-width.arco-btn-loading{padding-right:5px;padding-left:5px}.arco-btn-size-small.arco-btn-only-icon{width:28px;height:28px;padding:0}.arco-btn-size-small.arco-btn-shape-circle{width:28px;height:28px;padding:0;text-align:center;border-radius:var(--border-radius-circle)}.arco-btn-size-small.arco-btn-shape-round{border-radius:14px}.arco-btn-size-medium{height:32px;padding:0 15px;font-size:14px;border-radius:var(--border-radius-small)}.arco-btn-size-medium:not(.arco-btn-only-icon) .arco-btn-icon{margin-right:8px}.arco-btn-size-medium svg{vertical-align:-2px}.arco-btn-size-medium.arco-btn-loading-fixed-width.arco-btn-loading{padding-right:4px;padding-left:4px}.arco-btn-size-medium.arco-btn-only-icon{width:32px;height:32px;padding:0}.arco-btn-size-medium.arco-btn-shape-circle{width:32px;height:32px;padding:0;text-align:center;border-radius:var(--border-radius-circle)}.arco-btn-size-medium.arco-btn-shape-round{border-radius:16px}.arco-btn-size-large{height:36px;padding:0 19px;font-size:14px;border-radius:var(--border-radius-small)}.arco-btn-size-large:not(.arco-btn-only-icon) .arco-btn-icon{margin-right:8px}.arco-btn-size-large svg{vertical-align:-2px}.arco-btn-size-large.arco-btn-loading-fixed-width.arco-btn-loading{padding-right:8px;padding-left:8px}.arco-btn-size-large.arco-btn-only-icon{width:36px;height:36px;padding:0}.arco-btn-size-large.arco-btn-shape-circle{width:36px;height:36px;padding:0;text-align:center;border-radius:var(--border-radius-circle)}.arco-btn-size-large.arco-btn-shape-round{border-radius:18px}.arco-btn-group{display:inline-flex;align-items:center}.arco-btn-group .arco-btn-outline:not(:first-child),.arco-btn-group .arco-btn-dashed:not(:first-child){margin-left:-1px}.arco-btn-group .arco-btn-primary:not(:last-child){border-right:1px solid rgb(var(--primary-5))}.arco-btn-group .arco-btn-secondary:not(:last-child){border-right:1px solid var(--color-secondary-hover)}.arco-btn-group .arco-btn-status-warning:not(:last-child){border-right:1px solid rgb(var(--warning-5))}.arco-btn-group .arco-btn-status-danger:not(:last-child){border-right:1px solid rgb(var(--danger-5))}.arco-btn-group .arco-btn-status-success:not(:last-child){border-right:1px solid rgb(var(--success-5))}.arco-btn-group .arco-btn-outline:hover,.arco-btn-group .arco-btn-dashed:hover,.arco-btn-group .arco-btn-outline:active,.arco-btn-group .arco-btn-dashed:active{z-index:2}.arco-btn-group .arco-btn:first-child{border-top-right-radius:0;border-bottom-right-radius:0}.arco-btn-group .arco-btn:last-child{border-top-left-radius:0;border-bottom-left-radius:0}.arco-btn-group .arco-btn:not(:first-child):not(:last-child){border-radius:0}body[arco-theme=dark] .arco-btn-primary.arco-btn-disabled{color:#ffffff4d}.arco-calendar{box-sizing:border-box;border:1px solid var(--color-neutral-3)}.arco-calendar-header{display:flex;padding:24px}.arco-calendar-header-left{position:relative;display:flex;flex:1;align-items:center;height:28px;line-height:28px}.arco-calendar-header-right{position:relative;height:28px}.arco-calendar-header-value{color:var(--color-text-1);font-weight:500;font-size:20px}.arco-calendar-header-icon{width:28px;height:28px;margin-right:12px;color:var(--color-text-2);font-size:12px;line-height:28px;text-align:center;background-color:var(--color-bg-5);border-radius:50%;transition:all .1s cubic-bezier(0,0,1,1);user-select:none}.arco-calendar-header-icon:not(:first-child){margin:0 12px}.arco-calendar-header-icon:focus-visible{box-shadow:0 0 0 2px var(--color-primary-light-3)}.arco-calendar-header-icon:not(.arco-calendar-header-icon-hidden){cursor:pointer}.arco-calendar-header-icon:not(.arco-calendar-header-icon-hidden):hover{background-color:var(--color-fill-3)}.arco-calendar .arco-calendar-header-value-year{width:100px;margin-right:8px}.arco-calendar .arco-calendar-header-value-month{width:76px;margin-right:32px}.arco-calendar-month{width:100%}.arco-calendar-month-row{display:flex;height:100px}.arco-calendar-month-row .arco-calendar-cell{flex:1;overflow:hidden;border-bottom:1px solid var(--color-neutral-3)}.arco-calendar-month-row:last-child .arco-calendar-cell{border-bottom:unset}.arco-calendar-month-cell-body{box-sizing:border-box}.arco-calendar-mode-month:not(.arco-calendar-panel) .arco-calendar-cell:not(:last-child){border-right:1px solid var(--color-neutral-3)}.arco-calendar-week-list{display:flex;box-sizing:border-box;width:100%;padding:0;border-bottom:1px solid var(--color-neutral-3)}.arco-calendar-week-list-item{flex:1;padding:20px 16px;color:#7d7d7f;text-align:left}.arco-calendar-cell .arco-calendar-date{box-sizing:border-box;width:100%;height:100%;padding:10px;cursor:pointer}.arco-calendar-cell .arco-calendar-date-circle{width:28px;height:28px;line-height:28px;text-align:center;border-radius:50%}.arco-calendar-date-content{height:70px;overflow-y:auto}.arco-calendar-cell-today .arco-calendar-date-circle{box-sizing:border-box;border:1px solid rgb(var(--primary-6))}.arco-calendar-date-value{color:var(--color-text-4);font-weight:500;font-size:16px}.arco-calendar-cell-in-view .arco-calendar-date-value{color:var(--color-text-1)}.arco-calendar-mode-month .arco-calendar-cell-selected .arco-calendar-date-circle,.arco-calendar-mode-year .arco-calendar-cell-selected .arco-calendar-cell-selected .arco-calendar-date-circle{color:#fff;background-color:rgb(var(--primary-6));border:1px solid rgb(var(--primary-6))}.arco-calendar-mode-year:not(.arco-calendar-panel){min-width:820px}.arco-calendar-mode-year .arco-calendar-header{border-bottom:1px solid var(--color-neutral-3)}.arco-calendar-mode-year .arco-calendar-body{padding:12px}.arco-calendar-mode-year .arco-calendar-year-row{display:flex}.arco-calendar-year-row>.arco-calendar-cell{flex:1;padding:20px 8px}.arco-calendar-year-row>.arco-calendar-cell:not(:last-child){border-right:1px solid var(--color-neutral-3)}.arco-calendar-year-row:not(:last-child)>.arco-calendar-cell{border-bottom:1px solid var(--color-neutral-3)}.arco-calendar-month-with-days .arco-calendar-month-row{height:26px}.arco-calendar-month-with-days .arco-calendar-cell{border-bottom:0}.arco-calendar-month-with-days .arco-calendar-month-cell-body{padding:0}.arco-calendar-month-with-days .arco-calendar-month-title{padding:10px 6px;color:var(--color-text-1);font-weight:500;font-size:16px}.arco-calendar-month-cell{width:100%;font-size:12px}.arco-calendar-month-cell .arco-calendar-week-list{padding:0;border-bottom:unset}.arco-calendar-month-cell .arco-calendar-week-list-item{padding:6px;color:#7d7d7f;text-align:center}.arco-calendar-month-cell .arco-calendar-cell{text-align:center}.arco-calendar-month-cell .arco-calendar-date{padding:2px}.arco-calendar-month-cell .arco-calendar-date-value{font-size:14px}.arco-calendar-month-cell .arco-calendar-date-circle{display:inline-block;width:22px;height:22px;line-height:22px;text-align:center;border-radius:50%}.arco-calendar-panel{background-color:var(--color-bg-5);border:1px solid var(--color-neutral-3)}.arco-calendar-panel .arco-calendar-header{padding:8px 16px;border-bottom:1px solid var(--color-neutral-3)}.arco-calendar-panel .arco-calendar-header-value{flex:1;font-size:14px;line-height:24px;text-align:center}.arco-calendar-panel .arco-calendar-header-icon{width:24px;height:24px;margin-right:2px;margin-left:2px;line-height:24px}.arco-calendar-panel .arco-calendar-body{padding:14px 16px}.arco-calendar-panel .arco-calendar-month-cell-body{padding:0}.arco-calendar-panel .arco-calendar-month-row{height:unset}.arco-calendar-panel .arco-calendar-week-list{padding:0;border-bottom:unset}.arco-calendar-panel .arco-calendar-week-list-item{height:32px;padding:0;font-weight:400;line-height:32px;text-align:center}.arco-calendar-panel .arco-calendar-cell,.arco-calendar-panel .arco-calendar-year-row .arco-calendar-cell{box-sizing:border-box;padding:2px 0;text-align:center;border-right:0;border-bottom:0}.arco-calendar-panel .arco-calendar-cell .arco-calendar-date{display:flex;justify-content:center;padding:4px 0}.arco-calendar-panel .arco-calendar-cell .arco-calendar-date-value{min-width:24px;height:24px;font-size:14px;line-height:24px;cursor:pointer}.arco-calendar-panel.arco-calendar-mode-year .arco-calendar-cell{padding:4px 0}.arco-calendar-panel.arco-calendar-mode-year .arco-calendar-cell .arco-calendar-date{padding:4px}.arco-calendar-panel.arco-calendar-mode-year .arco-calendar-cell .arco-calendar-date-value{width:100%;border-radius:12px}.arco-calendar-panel .arco-calendar-cell-selected .arco-calendar-date-value{color:var(--color-white);background-color:rgb(var(--primary-6));border-radius:50%}.arco-calendar-panel .arco-calendar-cell:not(.arco-calendar-cell-selected):not(.arco-calendar-cell-range-start):not(.arco-calendar-cell-range-end):not(.arco-calendar-cell-hover-range-start):not(.arco-calendar-cell-hover-range-end):not(.arco-calendar-cell-disabled):not(.arco-calendar-cell-week) .arco-calendar-date-value:hover{color:rgb(var(--primary-6));background-color:var(--color-primary-light-1);border-radius:50%}.arco-calendar-panel.arco-calendar-mode-year .arco-calendar-cell:not(.arco-calendar-cell-selected):not(.arco-calendar-cell-range-start):not(.arco-calendar-cell-range-end):not(.arco-calendar-cell-hover-range-start):not(.arco-calendar-cell-hover-range-end):not(.arco-calendar-cell-disabled) .arco-calendar-date-value:hover{border-radius:12px}.arco-calendar-panel .arco-calendar-cell-today{position:relative}.arco-calendar-panel .arco-calendar-cell-today:after{position:absolute;bottom:0;left:50%;display:block;width:4px;height:4px;margin-left:-2px;background-color:rgb(var(--primary-6));border-radius:50%;content:""}.arco-calendar-cell-in-range .arco-calendar-date{background-color:var(--color-primary-light-1)}.arco-calendar-cell-range-start .arco-calendar-date{border-radius:16px 0 0 16px}.arco-calendar-cell-range-end .arco-calendar-date{border-radius:0 16px 16px 0}.arco-calendar-cell-in-range-near-hover .arco-calendar-date{border-radius:0}.arco-calendar-cell-range-start .arco-calendar-date-value,.arco-calendar-cell-range-end .arco-calendar-date-value{color:var(--color-white);background-color:rgb(var(--primary-6));border-radius:50%}.arco-calendar-cell-hover-in-range .arco-calendar-date{background-color:var(--color-primary-light-1)}.arco-calendar-cell-hover-range-start .arco-calendar-date{border-radius:16px 0 0 16px}.arco-calendar-cell-hover-range-end .arco-calendar-date{border-radius:0 16px 16px 0}.arco-calendar-cell-hover-range-start .arco-calendar-date-value,.arco-calendar-cell-hover-range-end .arco-calendar-date-value{color:var(--color-text-1);background-color:var(--color-primary-light-2);border-radius:50%}.arco-calendar-panel .arco-calendar-cell-disabled>.arco-calendar-date{background-color:var(--color-fill-1);cursor:not-allowed}.arco-calendar-panel .arco-calendar-cell-disabled>.arco-calendar-date>.arco-calendar-date-value{color:var(--color-text-4);background-color:var(--color-fill-1);cursor:not-allowed}.arco-calendar-panel .arco-calendar-footer-btn-wrapper{height:38px;color:var(--color-text-1);line-height:38px;text-align:center;border-top:1px solid var(--color-neutral-3);cursor:pointer}.arco-calendar-rtl{direction:rtl}.arco-calendar-rtl .arco-calendar-header-icon{margin-right:0;margin-left:12px;transform:scaleX(-1)}.arco-calendar-rtl .arco-calendar-week-list-item{text-align:right}.arco-calendar-rtl.arco-calendar-mode-month:not(.arco-calendar-panel) .arco-calendar-cell:not(:last-child){border-right:0;border-left:1px solid var(--color-neutral-3)}.arco-calendar-rtl .arco-calendar-header-value-year{margin-right:0;margin-left:8px}.arco-calendar-rtl .arco-calendar-header-value-month{margin-right:0;margin-left:32px}.arco-card{position:relative;background:var(--color-bg-2);border-radius:var(--border-radius-none);transition:box-shadow .2s cubic-bezier(0,0,1,1)}.arco-card-header{position:relative;display:flex;align-items:center;justify-content:space-between;box-sizing:border-box;overflow:hidden;border-bottom:1px solid var(--color-neutral-3)}.arco-card-header-no-title:before{display:block;content:" "}.arco-card-header-title{flex:1;color:var(--color-text-1);font-weight:500;line-height:1.5715;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-card-header-extra{color:rgb(var(--primary-6));overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-card-body{color:var(--color-text-2)}.arco-card-cover{overflow:hidden}.arco-card-cover>*{display:block;width:100%}.arco-card-actions{display:flex;align-items:center;justify-content:space-between;margin-top:20px}.arco-card-actions:before{visibility:hidden;content:""}.arco-card-actions-right{display:flex;align-items:center}.arco-card-actions-item{display:flex;align-items:center;justify-content:center;color:var(--color-text-2);cursor:pointer;transition:color .2s cubic-bezier(0,0,1,1);overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-card-actions-item:hover{color:rgb(var(--primary-6))}.arco-card-actions-item:not(:last-child){margin-right:12px}.arco-card-meta-footer{display:flex;align-items:center;justify-content:space-between}.arco-card-meta-footer:last-child{margin-top:20px}.arco-card-meta-footer-only-actions:before{visibility:hidden;content:""}.arco-card-meta-footer .arco-card-actions{margin-top:0}.arco-card-meta-title{color:var(--color-text-1);font-weight:500;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-card-meta-description:not(:first-child){margin-top:4px}.arco-card-grid{position:relative;box-sizing:border-box;width:33.33%;box-shadow:1px 0 0 0 var(--color-neutral-3),0 1px 0 0 var(--color-neutral-3),1px 1px 0 0 var(--color-neutral-3),1px 0 0 0 var(--color-neutral-3) inset,0 1px 0 0 var(--color-neutral-3) inset}.arco-card-grid:before{position:absolute;top:0;right:0;bottom:0;left:0;transition:box-shadow .2s cubic-bezier(0,0,1,1);content:"";pointer-events:none}.arco-card-grid-hoverable:hover{z-index:1}.arco-card-grid-hoverable:hover:before{box-shadow:0 4px 10px rgb(var(--gray-2))}.arco-card-grid .arco-card{background:none;box-shadow:none}.arco-card-contain-grid:not(.arco-card-loading)>.arco-card-body{display:flex;flex-wrap:wrap;margin:0 -1px;padding:0}.arco-card-hoverable:hover{box-shadow:0 4px 10px rgb(var(--gray-2))}.arco-card-bordered{border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-small)}.arco-card-bordered .arco-card-cover{border-radius:var(--border-radius-small) var(--border-radius-small) 0 0}.arco-card-loading .arco-card-body{overflow:hidden;text-align:center}.arco-card-size-medium{font-size:14px}.arco-card-size-medium .arco-card-header{height:46px;padding:10px 16px}.arco-card-size-medium .arco-card-header-title,.arco-card-size-medium .arco-card-meta-title{font-size:16px}.arco-card-size-medium .arco-card-header-extra{font-size:14px}.arco-card-size-medium .arco-card-body{padding:16px}.arco-card-size-small{font-size:14px}.arco-card-size-small .arco-card-header{height:40px;padding:8px 16px}.arco-card-size-small .arco-card-header-title,.arco-card-size-small .arco-card-meta-title{font-size:16px}.arco-card-size-small .arco-card-header-extra{font-size:14px}.arco-card-size-small .arco-card-body{padding:12px 16px}body[arco-theme=dark] .arco-card-grid-hoverable:hover:before,body[arco-theme=dark] .arco-card-hoverable:hover{box-shadow:0 4px 10px rgba(var(--gray-1),40%)}@keyframes arco-carousel-slide-x-in{0%{transform:translate(100%)}to{transform:translate(0)}}@keyframes arco-carousel-slide-x-out{0%{transform:translate(0)}to{transform:translate(-100%)}}@keyframes arco-carousel-slide-x-in-reverse{0%{transform:translate(-100%)}to{transform:translate(0)}}@keyframes arco-carousel-slide-x-out-reverse{0%{transform:translate(0)}to{transform:translate(100%)}}@keyframes arco-carousel-slide-y-in{0%{transform:translateY(100%)}to{transform:translateY(0)}}@keyframes arco-carousel-slide-y-out{0%{transform:translateY(0)}to{transform:translateY(-100%)}}@keyframes arco-carousel-slide-y-in-reverse{0%{transform:translateY(-100%)}to{transform:translateY(0)}}@keyframes arco-carousel-slide-y-out-reverse{0%{transform:translateY(0)}to{transform:translateY(100%)}}@keyframes arco-carousel-card-bottom-to-middle{0%{transform:translate(0) translateZ(-400px);opacity:0}to{transform:translate(0) translateZ(-200px);opacity:.4}}@keyframes arco-carousel-card-middle-to-bottom{0%{transform:translate(-100%) translateZ(-200px);opacity:.4}to{transform:translate(-100%) translateZ(-400px);opacity:0}}@keyframes arco-carousel-card-top-to-middle{0%{transform:translate(-50%) translateZ(0);opacity:1}to{transform:translate(-100%) translateZ(-200px);opacity:.4}}@keyframes arco-carousel-card-middle-to-top{0%{transform:translate(0) translateZ(-200px);opacity:.4}to{transform:translate(-50%) translateZ(0);opacity:1}}@keyframes arco-carousel-card-bottom-to-middle-reverse{0%{transform:translate(-100%) translateZ(-400px);opacity:0}to{transform:translate(-100%) translateZ(-200px);opacity:.4}}@keyframes arco-carousel-card-middle-to-bottom-reverse{0%{transform:translate(0) translateZ(-200px);opacity:.4}to{transform:translate(0) translateZ(-400px);opacity:0}}@keyframes arco-carousel-card-top-to-middle-reverse{0%{transform:translate(-50%) translateZ(0);opacity:1}to{transform:translate(0) translateZ(-200px);opacity:.4}}@keyframes arco-carousel-card-middle-to-top-reverse{0%{transform:translate(-100%) translateZ(-200px);opacity:.4}to{transform:translate(-50%) translateZ(0);opacity:1}}.arco-carousel{position:relative}.arco-carousel-indicator-position-outer{margin-bottom:30px}.arco-carousel-slide,.arco-carousel-card,.arco-carousel-fade{position:relative;width:100%;height:100%;overflow:hidden}.arco-carousel-slide>*,.arco-carousel-card>*,.arco-carousel-fade>*{position:absolute;top:0;left:0;width:100%;height:100%;overflow:hidden}.arco-carousel-item-current{z-index:1}.arco-carousel-slide>*:not(.arco-carousel-item-current){display:none;visibility:hidden}.arco-carousel-slide.arco-carousel-horizontal .arco-carousel-item-slide-out{display:block;animation:arco-carousel-slide-x-out}.arco-carousel-slide.arco-carousel-horizontal .arco-carousel-item-slide-in{display:block;animation:arco-carousel-slide-x-in}.arco-carousel-slide.arco-carousel-horizontal.arco-carousel-negative .arco-carousel-item-slide-out{animation:arco-carousel-slide-x-out-reverse}.arco-carousel-slide.arco-carousel-horizontal.arco-carousel-negative .arco-carousel-item-slide-in{animation:arco-carousel-slide-x-in-reverse}.arco-carousel-slide.arco-carousel-vertical .arco-carousel-item-slide-out{display:block;animation:arco-carousel-slide-y-out}.arco-carousel-slide.arco-carousel-vertical .arco-carousel-item-slide-in{display:block;animation:arco-carousel-slide-y-in}.arco-carousel-slide.arco-carousel-vertical.arco-carousel-negative .arco-carousel-item-slide-out{animation:arco-carousel-slide-y-out-reverse}.arco-carousel-slide.arco-carousel-vertical.arco-carousel-negative .arco-carousel-item-slide-in{animation:arco-carousel-slide-y-in-reverse}.arco-carousel-card{perspective:800px}.arco-carousel-card>*{left:50%;transform:translate(-50%) translateZ(-400px);opacity:0;animation:arco-carousel-card-middle-to-bottom}.arco-carousel-card .arco-carousel-item-prev{transform:translate(-100%) translateZ(-200px);opacity:.4;animation:arco-carousel-card-top-to-middle}.arco-carousel-card .arco-carousel-item-next{transform:translate(0) translateZ(-200px);opacity:.4;animation:arco-carousel-card-bottom-to-middle}.arco-carousel-card .arco-carousel-item-current{transform:translate(-50%) translateZ(0);opacity:1;animation:arco-carousel-card-middle-to-top}.arco-carousel-card.arco-carousel-negative>*{animation:arco-carousel-card-middle-to-bottom-reverse}.arco-carousel-card.arco-carousel-negative .arco-carousel-item-prev{animation:arco-carousel-card-bottom-to-middle-reverse}.arco-carousel-card.arco-carousel-negative .arco-carousel-item-next{animation:arco-carousel-card-top-to-middle-reverse}.arco-carousel-card.arco-carousel-negative .arco-carousel-item-current{animation:arco-carousel-card-middle-to-top-reverse}.arco-carousel-fade>*{left:50%;transform:translate(-50%);opacity:0}.arco-carousel-fade .arco-carousel-item-current{opacity:1}.arco-carousel-indicator{position:absolute;display:flex;margin:0;padding:0}.arco-carousel-indicator-wrapper{position:absolute;z-index:2}.arco-carousel-indicator-wrapper-top{top:0;right:0;left:0;height:48px;background:linear-gradient(180deg,rgba(0,0,0,.15) 0%,rgba(0,0,0,0) 87%)}.arco-carousel-indicator-wrapper-bottom{right:0;bottom:0;left:0;height:48px;background:linear-gradient(180deg,rgba(0,0,0,0) 13%,rgba(0,0,0,.15) 100%)}.arco-carousel-indicator-wrapper-left{top:0;left:0;width:48px;height:100%;background:linear-gradient(90deg,rgba(0,0,0,.15) 0%,rgba(0,0,0,0) 87%)}.arco-carousel-indicator-wrapper-right{top:0;right:0;width:48px;height:100%;background:linear-gradient(90deg,rgba(0,0,0,0) 13%,rgba(0,0,0,.15) 100%)}.arco-carousel-indicator-wrapper-outer{right:0;left:0;background:none}.arco-carousel-indicator-bottom{bottom:12px;left:50%;transform:translate(-50%)}.arco-carousel-indicator-top{top:12px;left:50%;transform:translate(-50%)}.arco-carousel-indicator-left{top:50%;left:12px;transform:translate(-50%,-50%) rotate(90deg)}.arco-carousel-indicator-right{top:50%;right:12px;transform:translate(50%,-50%) rotate(90deg)}.arco-carousel-indicator-outer{left:50%;padding:4px;background-color:transparent;border-radius:20px;transform:translate(-50%)}.arco-carousel-indicator-outer.arco-carousel-indicator-dot{bottom:-22px}.arco-carousel-indicator-outer.arco-carousel-indicator-line{bottom:-20px}.arco-carousel-indicator-outer.arco-carousel-indicator-slider{bottom:-16px;padding:0;background-color:rgba(var(--gray-4),.5)}.arco-carousel-indicator-outer .arco-carousel-indicator-item{background-color:rgba(var(--gray-4),.5)}.arco-carousel-indicator-outer .arco-carousel-indicator-item:hover,.arco-carousel-indicator-outer .arco-carousel-indicator-item-active{background-color:var(--color-fill-4)}.arco-carousel-indicator-item{display:inline-block;background-color:#ffffff4d;border-radius:var(--border-radius-medium);cursor:pointer}.arco-carousel-indicator-item:hover,.arco-carousel-indicator-item-active{background-color:var(--color-white)}.arco-carousel-indicator-dot .arco-carousel-indicator-item{width:6px;height:6px;border-radius:50%}.arco-carousel-indicator-dot .arco-carousel-indicator-item:not(:last-child){margin-right:8px}.arco-carousel-indicator-line .arco-carousel-indicator-item{width:12px;height:4px}.arco-carousel-indicator-line .arco-carousel-indicator-item:not(:last-child){margin-right:8px}.arco-carousel-indicator-slider{width:48px;height:4px;background-color:#ffffff4d;border-radius:var(--border-radius-medium);cursor:pointer}.arco-carousel-indicator-slider .arco-carousel-indicator-item{position:absolute;top:0;height:100%;transition:left .3s}.arco-carousel-arrow>div{position:absolute;z-index:2;display:flex;align-items:center;justify-content:center;width:24px;height:24px;color:var(--color-white);background-color:#ffffff4d;border-radius:50%;cursor:pointer}.arco-carousel-arrow>div>svg{color:var(--color-white);font-size:14px}.arco-carousel-arrow>div:hover{background-color:#ffffff80}.arco-carousel-arrow-left{top:50%;left:12px;transform:translateY(-50%)}.arco-carousel-arrow-right{top:50%;right:12px;transform:translateY(-50%)}.arco-carousel-arrow-top{top:12px;left:50%;transform:translate(-50%)}.arco-carousel-arrow-bottom{bottom:12px;left:50%;transform:translate(-50%)}.arco-carousel-arrow-hover div{opacity:0;transition:all .3s}.arco-carousel:hover .arco-carousel-arrow-hover div{opacity:1}body[arco-theme=dark] .arco-carousel-arrow>div{background-color:#17171a4d}body[arco-theme=dark] .arco-carousel-arrow>div:hover{background-color:#17171a80}body[arco-theme=dark] .arco-carousel-indicator-item,body[arco-theme=dark] .arco-carousel-indicator-slider{background-color:#17171a4d}body[arco-theme=dark] .arco-carousel-indicator-item-active,body[arco-theme=dark] .arco-carousel-indicator-item:hover{background-color:var(--color-white)}body[arco-theme=dark] .arco-carousel-indicator-outer.arco-carousel-indicator-slider{background-color:rgba(var(--gray-4),.5)}body[arco-theme=dark] .arco-carousel-indicator-outer .arco-carousel-indicator-item:hover,body[arco-theme=dark] .arco-carousel-indicator-outer .arco-carousel-indicator-item-active{background-color:var(--color-fill-4)}.arco-cascader-panel{display:inline-flex;box-sizing:border-box;height:200px;overflow:hidden;white-space:nowrap;list-style:none;background-color:var(--color-bg-popup);border:1px solid var(--color-fill-3);border-radius:var(--border-radius-medium);box-shadow:0 4px 10px #0000001a}.arco-cascader-search-panel{justify-content:center;width:100%;overflow:auto}.arco-cascader-popup-trigger-hover .arco-cascader-list-item{transition:fontweight 0s}.arco-cascader-highlight{font-weight:500}.arco-cascader-panel-column{position:relative;display:inline-flex;flex-direction:column;min-width:120px;height:100%;max-height:200px;background-color:var(--color-bg-popup)}.arco-cascader-panel-column-loading{display:inline-flex;align-items:center;justify-content:center}.arco-cascader-panel-column:not(:last-of-type){border-right:1px solid var(--color-fill-3)}.arco-cascader-column-content{flex:1;max-height:200px;overflow-y:auto}.arco-cascader-list-wrapper{position:relative;display:flex;flex-direction:column;box-sizing:border-box;height:100%;padding:4px 0}.arco-cascader-list-wrapper-with-footer{padding-bottom:0}.arco-cascader-list-empty{display:flex;align-items:center;height:100%}.arco-cascader-list{flex:1;box-sizing:border-box;margin:0;padding:0;list-style:none}.arco-cascader-list-multiple .arco-cascader-option-label,.arco-cascader-list-strictly .arco-cascader-option-label{padding-left:0}.arco-cascader-list-multiple .arco-cascader-option,.arco-cascader-list-strictly .arco-cascader-option{padding-left:12px}.arco-cascader-list-multiple .arco-cascader-option .arco-checkbox,.arco-cascader-list-strictly .arco-cascader-option .arco-checkbox,.arco-cascader-list-multiple .arco-cascader-option .arco-radio,.arco-cascader-list-strictly .arco-cascader-option .arco-radio{margin-right:8px;padding-left:0}.arco-cascader-search-list.arco-cascader-list-multiple .arco-cascader-option-label{padding-right:12px}.arco-cascader-list-footer{box-sizing:border-box;height:36px;padding-left:12px;line-height:36px;border-top:1px solid var(--color-fill-3)}.arco-cascader-option,.arco-cascader-search-option{position:relative;display:flex;box-sizing:border-box;min-width:100px;height:36px;color:var(--color-text-1);font-size:14px;line-height:36px;background-color:transparent;cursor:pointer}.arco-cascader-option-label,.arco-cascader-search-option-label{flex-grow:1;padding-right:34px;padding-left:12px}.arco-cascader-option .arco-icon-right,.arco-cascader-search-option .arco-icon-right,.arco-cascader-option .arco-icon-check,.arco-cascader-search-option .arco-icon-check{position:absolute;top:50%;right:10px;color:var(--color-text-2);font-size:12px;transform:translateY(-50%)}.arco-cascader-option .arco-icon-check,.arco-cascader-search-option .arco-icon-check{color:rgb(var(--primary-6))}.arco-cascader-option .arco-icon-loading,.arco-cascader-search-option .arco-icon-loading{position:absolute;top:50%;right:10px;margin-top:-6px;color:rgb(var(--primary-6));font-size:12px}.arco-cascader-option:hover,.arco-cascader-search-option-hover{color:var(--color-text-1);background-color:var(--color-fill-2)}.arco-cascader-option:hover .arco-checkbox:not(.arco-checkbox-disabled):not(.arco-checkbox-checked):hover .arco-checkbox-icon-hover:before,.arco-cascader-search-option-hover .arco-checkbox:not(.arco-checkbox-disabled):not(.arco-checkbox-checked):hover .arco-checkbox-icon-hover:before{background-color:var(--color-fill-3)}.arco-cascader-option:hover .arco-radio:not(.arco-radio-disabled):not(.arco-radio-checked):hover .arco-radio-icon-hover:before,.arco-cascader-search-option-hover .arco-radio:not(.arco-radio-disabled):not(.arco-radio-checked):hover .arco-radio-icon-hover:before{background-color:var(--color-fill-3)}.arco-cascader-option-disabled,.arco-cascader-search-option-disabled,.arco-cascader-option-disabled:hover,.arco-cascader-search-option-disabled:hover{color:var(--color-text-4);background-color:transparent;cursor:not-allowed}.arco-cascader-option-disabled .arco-icon-right,.arco-cascader-search-option-disabled .arco-icon-right,.arco-cascader-option-disabled:hover .arco-icon-right,.arco-cascader-search-option-disabled:hover .arco-icon-right{color:inherit}.arco-cascader-option-disabled .arco-icon-check,.arco-cascader-search-option-disabled .arco-icon-check,.arco-cascader-option-disabled:hover .arco-icon-check,.arco-cascader-search-option-disabled:hover .arco-icon-check{color:var(--color-primary-light-3)}.arco-cascader-option-active{color:var(--color-text-1);background-color:var(--color-fill-2);transition:all .2s cubic-bezier(0,0,1,1)}.arco-cascader-option-active:hover{color:var(--color-text-1);background-color:var(--color-fill-2)}.arco-cascader-option-active.arco-cascader-option-disabled,.arco-cascader-option-active.arco-cascader-option-disabled:hover{color:var(--color-text-4);background-color:var(--color-fill-2)}.cascader-slide-enter-active,.cascader-slide-leave-active{transition:margin .3s cubic-bezier(.34,.69,.1,1)}.cascader-slide-enter-from,.cascader-slide-leave-to{margin-left:-120px}.cascader-slide-enter-to,.cascader-slide-leave-from{margin-left:0}.arco-icon-hover.arco-checkbox-icon-hover:before{width:24px;height:24px}.arco-checkbox{position:relative;display:inline-flex;align-items:center;box-sizing:border-box;padding-left:5px;font-size:14px;line-height:unset;cursor:pointer}.arco-checkbox>input[type=checkbox]{position:absolute;top:0;left:0;width:0;height:0;opacity:0}.arco-checkbox>input[type=checkbox]:focus-visible+.arco-checkbox-icon-hover:before{background-color:var(--color-fill-2)}.arco-checkbox:hover .arco-checkbox-icon-hover:before{background-color:var(--color-fill-2)}.arco-checkbox-label{margin-left:8px;color:var(--color-text-1)}.arco-checkbox-icon{position:relative;box-sizing:border-box;width:14px;height:14px;background-color:var(--color-bg-2);border:2px solid var(--color-fill-3);border-radius:var(--border-radius-small);user-select:none}.arco-checkbox-icon:after{position:absolute;top:50%;left:50%;display:block;width:6px;height:2px;background:var(--color-white);border-radius:.5px;transform:translate(-50%) translateY(-50%) scale(0);content:""}.arco-checkbox-icon-check{position:relative;display:block;width:8px;height:100%;margin:0 auto;color:var(--color-white);transform:scale(0);transform-origin:center 75%}.arco-checkbox:hover .arco-checkbox-icon{border-color:var(--color-fill-4);transition:border-color .1s cubic-bezier(0,0,1,1),transform .3s cubic-bezier(.3,1.3,.3,1)}.arco-checkbox-checked:hover .arco-checkbox-icon,.arco-checkbox-indeterminate:hover .arco-checkbox-icon{transition:transform .3s cubic-bezier(.3,1.3,.3,1)}.arco-checkbox-checked .arco-checkbox-icon{background-color:rgb(var(--primary-6));border-color:transparent}.arco-checkbox-checked .arco-checkbox-icon-check{transform:scale(1);transition:transform .3s cubic-bezier(.3,1.3,.3,1)}.arco-checkbox-indeterminate .arco-checkbox-icon{background-color:rgb(var(--primary-6));border-color:transparent}.arco-checkbox-indeterminate .arco-checkbox-icon svg{transform:scale(0)}.arco-checkbox-indeterminate .arco-checkbox-icon:after{transform:translate(-50%) translateY(-50%) scale(1);transition:transform .3s cubic-bezier(.3,1.3,.3,1)}.arco-checkbox.arco-checkbox-disabled,.arco-checkbox.arco-checkbox-disabled .arco-checkbox-icon-hover{cursor:not-allowed}.arco-checkbox.arco-checkbox-disabled:hover .arco-checkbox-mask{border-color:var(--color-fill-3)}.arco-checkbox-checked:hover .arco-checkbox-icon,.arco-checkbox-indeterminate:hover .arco-checkbox-icon{border-color:transparent}.arco-checkbox-disabled .arco-checkbox-icon{background-color:var(--color-fill-2);border-color:var(--color-fill-3)}.arco-checkbox-disabled.arco-checkbox-checked .arco-checkbox-icon,.arco-checkbox-disabled.arco-checkbox-checked:hover .arco-checkbox-icon{background-color:var(--color-primary-light-3);border-color:transparent}.arco-checkbox-disabled:hover .arco-checkbox-icon-hover:before,.arco-checkbox-checked:hover .arco-checkbox-icon-hover:before,.arco-checkbox-indeterminate:hover .arco-checkbox-icon-hover:before{background-color:transparent}.arco-checkbox-disabled:hover .arco-checkbox-icon{border-color:var(--color-fill-3)}.arco-checkbox-disabled .arco-checkbox-label{color:var(--color-text-4)}.arco-checkbox-disabled .arco-checkbox-icon-check{color:var(--color-fill-3)}.arco-checkbox-group{display:inline-block}.arco-checkbox-group .arco-checkbox{margin-right:16px}.arco-checkbox-group-direction-vertical .arco-checkbox{display:flex;margin-right:0;line-height:32px}.arco-icon-hover.arco-collapse-item-icon-hover:before{width:16px;height:16px}.arco-icon-hover.arco-collapse-item-icon-hover:hover:before{background-color:var(--color-fill-2)}.arco-collapse{overflow:hidden;line-height:1.5715;border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-medium)}.arco-collapse-item{box-sizing:border-box;border-bottom:1px solid var(--color-border-2)}.arco-collapse-item-active>.arco-collapse-item-header{background-color:var(--color-bg-2);border-color:var(--color-neutral-3);transition:border-color 0s ease 0s}.arco-collapse-item-active>.arco-collapse-item-header .arco-collapse-item-header-title{font-weight:500}.arco-collapse-item-active>.arco-collapse-item-header .arco-collapse-item-expand-icon{transform:rotate(90deg)}.arco-collapse-item-active>.arco-collapse-item-header .arco-collapse-item-icon-right .arco-collapse-item-expand-icon{transform:rotate(-90deg)}.arco-collapse-item-header{position:relative;display:flex;align-items:center;justify-content:space-between;box-sizing:border-box;padding-top:8px;padding-bottom:8px;overflow:hidden;color:var(--color-text-1);font-size:14px;line-height:24px;background-color:var(--color-bg-2);border-bottom:1px solid transparent;cursor:pointer;transition:border-color 0s ease .19s}.arco-collapse-item-header-left{padding-right:13px;padding-left:34px}.arco-collapse-item-header-right{padding-right:34px;padding-left:13px}.arco-collapse-item-header-right+.arco-collapse-item-content{padding-left:13px}.arco-collapse-item-header-disabled{color:var(--color-text-4);background-color:var(--color-bg-2);cursor:not-allowed}.arco-collapse-item-header-disabled .arco-collapse-item-header-icon{color:var(--color-text-4)}.arco-collapse-item-header-title{display:inline}.arco-collapse-item-header-extra{float:right}.arco-collapse-item .arco-collapse-item-icon-hover{position:absolute;top:50%;left:13px;text-align:center;transform:translateY(-50%)}.arco-collapse-item .arco-collapse-item-icon-right{right:13px;left:unset}.arco-collapse-item .arco-collapse-item-icon-right>.arco-collapse-item-header-icon-down{transform:rotate(-90deg)}.arco-collapse-item .arco-collapse-item-expand-icon{position:relative;display:block;color:var(--color-neutral-7);font-size:14px;vertical-align:middle;transition:transform .2s cubic-bezier(.34,.69,.1,1)}.arco-collapse-item-content{position:relative;padding-right:13px;padding-left:34px;overflow:hidden;color:var(--color-text-1);font-size:14px;background-color:var(--color-fill-1)}.arco-collapse-item-content-expanded{display:block;height:auto}.arco-collapse-item-content-box{padding:8px 0}.arco-collapse-item.arco-collapse-item-disabled>.arco-collapse-item-content{color:var(--color-text-4)}.arco-collapse-item-no-icon>.arco-collapse-item-header{padding-right:13px;padding-left:13px}.arco-collapse-item:last-of-type{border-bottom:none}.arco-collapse.arco-collapse-borderless{border:none}.arco-collapse:after{display:table;clear:both;content:""}.collapse-slider-enter-from,.collapse-slider-leave-to{height:0}.collapse-slider-enter-active,.collapse-slider-leave-active{transition:height .2s cubic-bezier(.34,.69,.1,1)}.arco-comment{display:flex;flex-wrap:nowrap;font-size:14px;line-height:1.5715}.arco-comment:not(:first-of-type),.arco-comment-inner-comment{margin-top:20px}.arco-comment-inner{flex:1}.arco-comment-avatar{flex-shrink:0;margin-right:12px;cursor:pointer}.arco-comment-avatar>img{width:32px;height:32px;border-radius:var(--border-radius-circle)}.arco-comment-author{margin-right:8px;color:var(--color-text-2);font-size:14px}.arco-comment-datetime{color:var(--color-text-3);font-size:12px}.arco-comment-content{color:var(--color-text-1)}.arco-comment-title-align-right{display:flex;justify-content:space-between}.arco-comment-actions{margin-top:8px;color:var(--color-text-2);font-size:14px}.arco-comment-actions>*:not(:last-child){margin-right:8px}.arco-comment-actions-align-right{display:flex;justify-content:flex-end}.arco-picker-container,.arco-picker-range-container{box-sizing:border-box;min-height:60px;overflow:hidden;background-color:var(--color-bg-popup);border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-medium);box-shadow:0 2px 5px #0000001a}.arco-picker-container-shortcuts-placement-left,.arco-picker-range-container-shortcuts-placement-left,.arco-picker-container-shortcuts-placement-right,.arco-picker-range-container-shortcuts-placement-right{display:flex;align-items:flex-start}.arco-picker-container-shortcuts-placement-left>.arco-picker-shortcuts,.arco-picker-range-container-shortcuts-placement-left>.arco-picker-shortcuts,.arco-picker-container-shortcuts-placement-right>.arco-picker-shortcuts,.arco-picker-range-container-shortcuts-placement-right>.arco-picker-shortcuts{display:flex;flex-direction:column;box-sizing:border-box;padding:5px 8px;overflow-x:hidden;overflow-y:auto}.arco-picker-container-shortcuts-placement-left>.arco-picker-shortcuts>*,.arco-picker-range-container-shortcuts-placement-left>.arco-picker-shortcuts>*,.arco-picker-container-shortcuts-placement-right>.arco-picker-shortcuts>*,.arco-picker-range-container-shortcuts-placement-right>.arco-picker-shortcuts>*{margin:5px 0}.arco-picker-container-shortcuts-placement-left .arco-picker-panel-wrapper,.arco-picker-range-container-shortcuts-placement-left .arco-picker-panel-wrapper,.arco-picker-container-shortcuts-placement-left .arco-picker-range-panel-wrapper,.arco-picker-range-container-shortcuts-placement-left .arco-picker-range-panel-wrapper{border-left:1px solid var(--color-neutral-3)}.arco-picker-container-shortcuts-placement-right .arco-picker-panel-wrapper,.arco-picker-range-container-shortcuts-placement-right .arco-picker-panel-wrapper,.arco-picker-container-shortcuts-placement-right .arco-picker-range-panel-wrapper,.arco-picker-range-container-shortcuts-placement-right .arco-picker-range-panel-wrapper{border-right:1px solid var(--color-neutral-3)}.arco-picker-panel-only,.arco-picker-range-panel-only{box-shadow:none}.arco-picker-panel-only .arco-panel-date-inner,.arco-picker-range-panel-only .arco-panel-date-inner,.arco-picker-range-panel-only .arco-panel-date{width:100%}.arco-picker-header{display:flex;padding:8px 16px;border-bottom:1px solid var(--color-neutral-3)}.arco-picker-header-title{flex:1;color:var(--color-text-1);font-size:14px;line-height:24px;text-align:center}.arco-picker-header-icon{width:24px;height:24px;margin-right:2px;margin-left:2px;color:var(--color-text-2);font-size:12px;line-height:24px;text-align:center;background-color:var(--color-bg-popup);border-radius:50%;transition:all .1s cubic-bezier(0,0,1,1);user-select:none}.arco-picker-header-icon:not(.arco-picker-header-icon-hidden){cursor:pointer}.arco-picker-header-icon:not(.arco-picker-header-icon-hidden):hover{background-color:var(--color-fill-3)}.arco-picker-header-label{padding:2px;border-radius:2px;cursor:pointer;transition:all .1s}.arco-picker-header-label:hover{background-color:var(--color-fill-3)}.arco-picker-body{padding:14px 16px}.arco-picker-week-list{display:flex;box-sizing:border-box;width:100%;padding:14px 16px 0}.arco-picker-week-list-item{flex:1;height:32px;padding:0;color:#7d7d7f;font-weight:400;line-height:32px;text-align:center}.arco-picker-row{display:flex;padding:2px 0}.arco-picker-cell{flex:1}.arco-picker-cell .arco-picker-date{display:flex;justify-content:center;box-sizing:border-box;width:100%;height:100%;padding:4px 0;cursor:pointer}.arco-picker-date-value{min-width:24px;height:24px;color:var(--color-text-4);font-size:14px;line-height:24px;text-align:center;border-radius:var(--border-radius-circle);cursor:pointer}.arco-picker-cell-in-view .arco-picker-date-value{color:var(--color-text-1);font-weight:500}.arco-picker-cell-selected .arco-picker-date-value{color:var(--color-white);background-color:rgb(var(--primary-6));transition:background-color .1s cubic-bezier(0,0,1,1)}.arco-picker-cell-in-view:not(.arco-picker-cell-selected):not(.arco-picker-cell-range-start):not(.arco-picker-cell-range-end):not(.arco-picker-cell-disabled):not(.arco-picker-cell-week) .arco-picker-date-value:hover{color:var(--color-text-1);background-color:var(--color-fill-3)}.arco-picker-cell-today{position:relative}.arco-picker-cell-today:after{position:absolute;bottom:-2px;left:50%;display:block;width:4px;height:4px;margin-left:-2px;background-color:rgb(var(--primary-6));border-radius:50%;content:""}.arco-picker-cell-in-range .arco-picker-date{background-color:var(--color-primary-light-1)}.arco-picker-cell-range-start .arco-picker-date{border-top-left-radius:24px;border-bottom-left-radius:24px}.arco-picker-cell-range-end .arco-picker-date{border-top-right-radius:24px;border-bottom-right-radius:24px}.arco-picker-cell-in-range-near-hover .arco-picker-date{border-radius:0}.arco-picker-cell-range-start .arco-picker-date-value,.arco-picker-cell-range-end .arco-picker-date-value{color:var(--color-white);background-color:rgb(var(--primary-6));border-radius:var(--border-radius-circle)}.arco-picker-cell-hover-in-range .arco-picker-date{background-color:var(--color-primary-light-1)}.arco-picker-cell-hover-range-start .arco-picker-date{border-radius:24px 0 0 24px}.arco-picker-cell-hover-range-end .arco-picker-date{border-radius:0 24px 24px 0}.arco-picker-cell-hover-range-start .arco-picker-date-value,.arco-picker-cell-hover-range-end .arco-picker-date-value{color:var(--color-text-1);background-color:var(--color-primary-light-2);border-radius:50%}.arco-picker-cell-disabled .arco-picker-date{background-color:var(--color-fill-1);cursor:not-allowed}.arco-picker-cell-disabled .arco-picker-date-value{color:var(--color-text-4);background-color:transparent;cursor:not-allowed}.arco-picker-footer{width:min-content;min-width:100%}.arco-picker-footer-btn-wrapper{display:flex;align-items:center;justify-content:space-between;box-sizing:border-box;padding:3px 8px;border-top:1px solid var(--color-neutral-3)}.arco-picker-footer-btn-wrapper :only-child{margin-left:auto}.arco-picker-footer-extra-wrapper{box-sizing:border-box;padding:8px 24px;color:var(--color-text-1);font-size:12px;border-top:1px solid var(--color-neutral-3)}.arco-picker-footer-now-wrapper{box-sizing:border-box;height:36px;line-height:36px;text-align:center;border-top:1px solid var(--color-neutral-3)}.arco-picker-btn-confirm{margin:5px 0}.arco-picker-shortcuts{flex:1}.arco-picker-shortcuts>*{margin:5px 10px 5px 0}.arco-panel-date{display:flex;box-sizing:border-box}.arco-panel-date-inner{width:265px}.arco-panel-date-inner .arco-picker-body{padding-top:0}.arco-panel-date-timepicker{display:flex;flex-direction:column;border-left:1px solid var(--color-neutral-3)}.arco-panel-date-timepicker-title{width:100%;height:40px;color:var(--color-text-1);font-weight:400;font-size:14px;line-height:40px;text-align:center;border-bottom:1px solid var(--color-neutral-3)}.arco-panel-date-timepicker .arco-timepicker{height:276px;padding:0 6px;overflow:hidden}.arco-panel-date-timepicker .arco-timepicker-column{box-sizing:border-box;width:auto;height:100%;padding:0 4px}.arco-panel-date-timepicker .arco-timepicker-column::-webkit-scrollbar{width:0}.arco-panel-date-timepicker .arco-timepicker-column:not(:last-child){border-right:0}.arco-panel-date-timepicker .arco-timepicker ul:after{height:244px}.arco-panel-date-timepicker .arco-timepicker-cell{width:36px}.arco-panel-date-timepicker .arco-timepicker-cell-inner{padding-left:10px}.arco-panel-date-footer{border-right:1px solid var(--color-neutral-3)}.arco-panel-date-with-view-tabs{flex-direction:column;min-width:265px}.arco-panel-date-with-view-tabs .arco-panel-date-timepicker .arco-timepicker-column{flex:1}.arco-panel-date-with-view-tabs .arco-panel-date-timepicker .arco-timepicker-column::-webkit-scrollbar{width:0}.arco-panel-date-with-view-tabs .arco-panel-date-timepicker .arco-timepicker-cell{width:100%;text-align:center}.arco-panel-date-with-view-tabs .arco-panel-date-timepicker .arco-timepicker-cell-inner{padding-left:0}.arco-panel-date-view-tabs{display:flex;border-top:1px solid var(--color-neutral-3)}.arco-panel-date-view-tab-pane{flex:1;height:50px;color:var(--color-text-4);font-size:14px;line-height:50px;text-align:center;border-right:1px solid var(--color-neutral-3);cursor:pointer}.arco-panel-date-view-tab-pane:last-child{border-right:none}.arco-panel-date-view-tab-pane-text{margin-left:8px}.arco-panel-date-view-tab-pane-active{color:var(--color-text-1)}.arco-panel-month,.arco-panel-quarter,.arco-panel-year{box-sizing:border-box;width:265px}.arco-panel-month .arco-picker-date,.arco-panel-quarter .arco-picker-date,.arco-panel-year .arco-picker-date{padding:4px}.arco-panel-month .arco-picker-date-value,.arco-panel-quarter .arco-picker-date-value,.arco-panel-year .arco-picker-date-value{width:100%;border-radius:24px}.arco-panel-month .arco-picker-cell:not(.arco-picker-cell-selected):not(.arco-picker-cell-range-start):not(.arco-picker-cell-range-end):not(.arco-picker-cell-disabled):not(.arco-picker-cell-week) .arco-picker-date-value:hover,.arco-panel-quarter .arco-picker-cell:not(.arco-picker-cell-selected):not(.arco-picker-cell-range-start):not(.arco-picker-cell-range-end):not(.arco-picker-cell-disabled):not(.arco-picker-cell-week) .arco-picker-date-value:hover,.arco-panel-year .arco-picker-cell:not(.arco-picker-cell-selected):not(.arco-picker-cell-range-start):not(.arco-picker-cell-range-end):not(.arco-picker-cell-disabled):not(.arco-picker-cell-week) .arco-picker-date-value:hover{border-radius:24px}.arco-panel-year{box-sizing:border-box;width:265px}.arco-panel-week{box-sizing:border-box}.arco-panel-week-wrapper{display:flex}.arco-panel-week-inner{width:298px}.arco-panel-week-inner .arco-picker-body{padding-top:0}.arco-panel-week .arco-picker-row-week{cursor:pointer}.arco-panel-week .arco-picker-row-week .arco-picker-date-value{width:100%;border-radius:0}.arco-panel-week .arco-picker-cell .arco-picker-date{border-radius:0}.arco-panel-week .arco-picker-cell:nth-child(2) .arco-picker-date{padding-left:4px;border-top-left-radius:24px;border-bottom-left-radius:24px}.arco-panel-week .arco-picker-cell:nth-child(2) .arco-picker-date .arco-picker-date-value{border-top-left-radius:24px;border-bottom-left-radius:24px}.arco-panel-week .arco-picker-cell:nth-child(8) .arco-picker-date{padding-right:4px;border-top-right-radius:24px;border-bottom-right-radius:24px}.arco-panel-week .arco-picker-cell:nth-child(8) .arco-picker-date .arco-picker-date-value{border-top-right-radius:24px;border-bottom-right-radius:24px}.arco-panel-week .arco-picker-row-week:hover .arco-picker-cell:not(.arco-picker-cell-week):not(.arco-picker-cell-selected):not(.arco-picker-cell-range-start):not(.arco-picker-cell-range-end) .arco-picker-date-value{background-color:var(--color-fill-3)}.arco-panel-quarter{box-sizing:border-box;width:265px}.arco-picker-range-wrapper{display:flex}.arco-datepicker-shortcuts-wrapper{box-sizing:border-box;width:106px;height:100%;max-height:300px;margin:10px 0 0;padding:0;overflow-y:auto;list-style:none}.arco-datepicker-shortcuts-wrapper>li{box-sizing:border-box;width:100%;padding:6px 16px;cursor:pointer}.arco-datepicker-shortcuts-wrapper>li:hover{color:rgb(var(--primary-6))}.arco-descriptions-table{width:100%;border-collapse:collapse}.arco-descriptions-table-layout-fixed table{table-layout:fixed}.arco-descriptions-title{margin-bottom:16px;color:var(--color-text-1);font-weight:500;font-size:16px;line-height:1.5715}.arco-descriptions-item,.arco-descriptions-item-label,.arco-descriptions-item-value{box-sizing:border-box;font-size:14px;line-height:1.5715;text-align:left}.arco-descriptions-table-layout-fixed .arco-descriptions-item-label{width:auto}.arco-descriptions-item-label-block{width:1px;padding:0 4px 12px 0;color:var(--color-text-3);font-weight:500;white-space:nowrap}.arco-descriptions-item-value-block{padding:0 4px 12px 0;color:var(--color-text-1);font-weight:400;white-space:pre-wrap;word-break:break-word}.arco-descriptions-item-label-inline,.arco-descriptions-item-value-inline{box-sizing:border-box;font-size:14px;line-height:1.5715;text-align:left}.arco-descriptions-item-label-inline{margin-bottom:2px;color:var(--color-text-3);font-weight:500}.arco-descriptions-item-value-inline{color:var(--color-text-1);font-weight:400}.arco-descriptions-layout-inline-horizontal .arco-descriptions-item-label-inline{margin-right:4px}.arco-descriptions-layout-inline-horizontal .arco-descriptions-item-label-inline,.arco-descriptions-layout-inline-horizontal .arco-descriptions-item-value-inline{display:inline-block;margin-bottom:0}.arco-descriptions-border.arco-descriptions-layout-inline-vertical .arco-descriptions-item{padding:12px 20px}.arco-descriptions-border .arco-descriptions-body{overflow:hidden;border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-medium)}.arco-descriptions-border .arco-descriptions-row:not(:last-child){border-bottom:1px solid var(--color-neutral-3)}.arco-descriptions-border .arco-descriptions-item,.arco-descriptions-border .arco-descriptions-item-label-block,.arco-descriptions-border .arco-descriptions-item-value-block{padding:7px 20px;border-right:1px solid var(--color-neutral-3)}.arco-descriptions-border .arco-descriptions-item-label-block{background-color:var(--color-fill-1)}.arco-descriptions-border .arco-descriptions-item-value-block:last-child{border-right:none}.arco-descriptions-border .arco-descriptions-item:last-child{border-right:none}.arco-descriptions-border.arco-descriptions-layout-vertical .arco-descriptions-item-label-block:last-child{border-right:none}.arco-descriptions-layout-vertical:not(.arco-descriptions-border) .arco-descriptions-item-value-block:first-child{padding-left:0}.arco-descriptions-size-mini .arco-descriptions-title{margin-bottom:6px}.arco-descriptions-size-mini .arco-descriptions-item-label-block,.arco-descriptions-size-mini .arco-descriptions-item-value-block{padding-right:20px;padding-bottom:2px;font-size:12px}.arco-descriptions-size-mini.arco-descriptions-border .arco-descriptions-item-label-block,.arco-descriptions-size-mini.arco-descriptions-border .arco-descriptions-item-value-block{padding:3px 20px}.arco-descriptions-size-mini.arco-descriptions-border.arco-descriptions-layout-inline-vertical .arco-descriptions-item{padding:8px 20px}.arco-descriptions-size-small .arco-descriptions-title{margin-bottom:8px}.arco-descriptions-size-small .arco-descriptions-item-label-block,.arco-descriptions-size-small .arco-descriptions-item-value-block{padding-right:20px;padding-bottom:4px;font-size:14px}.arco-descriptions-size-small.arco-descriptions-border .arco-descriptions-item-label-block,.arco-descriptions-size-small.arco-descriptions-border .arco-descriptions-item-value-block{padding:3px 20px}.arco-descriptions-size-small.arco-descriptions-border.arco-descriptions-layout-inline-vertical .arco-descriptions-item{padding:8px 20px}.arco-descriptions-size-medium .arco-descriptions-title{margin-bottom:12px}.arco-descriptions-size-medium .arco-descriptions-item-label-block,.arco-descriptions-size-medium .arco-descriptions-item-value-block{padding-right:20px;padding-bottom:8px;font-size:14px}.arco-descriptions-size-medium.arco-descriptions-border .arco-descriptions-item-label-block,.arco-descriptions-size-medium.arco-descriptions-border .arco-descriptions-item-value-block{padding:5px 20px}.arco-descriptions-size-medium.arco-descriptions-border.arco-descriptions-layout-inline-vertical .arco-descriptions-item{padding:10px 20px}.arco-descriptions-size-large .arco-descriptions-title{margin-bottom:20px}.arco-descriptions-size-large .arco-descriptions-item-label-block,.arco-descriptions-size-large .arco-descriptions-item-value-block{padding-right:20px;padding-bottom:16px;font-size:14px}.arco-descriptions-size-large.arco-descriptions-border .arco-descriptions-item-label-block,.arco-descriptions-size-large.arco-descriptions-border .arco-descriptions-item-value-block{padding:9px 20px}.arco-descriptions-size-large.arco-descriptions-border.arco-descriptions-layout-inline-vertical .arco-descriptions-item{padding:14px 20px}.arco-divider-horizontal{position:relative;clear:both;width:100%;min-width:100%;max-width:100%;margin:20px 0;border-bottom:1px solid var(--color-neutral-3)}.arco-divider-horizontal.arco-divider-with-text{margin:20px 0}.arco-divider-vertical{display:inline-block;min-width:1px;max-width:1px;height:1em;margin:0 12px;vertical-align:middle;border-left:1px solid var(--color-neutral-3)}.arco-divider-text{position:absolute;top:50%;box-sizing:border-box;padding:0 16px;color:var(--color-text-1);font-weight:500;font-size:14px;line-height:2;background:var(--color-bg-2);transform:translateY(-50%)}.arco-divider-text-center{left:50%;transform:translate(-50%,-50%)}.arco-divider-text-left{left:24px}.arco-divider-text-right{right:24px}.arco-drawer-container{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1001}.arco-drawer-mask{position:absolute;top:0;right:0;bottom:0;left:0;background-color:var(--color-mask-bg)}.arco-drawer{position:absolute;display:flex;flex-direction:column;width:100%;height:100%;overflow:auto;line-height:1.5715;background-color:var(--color-bg-3)}.arco-drawer-header{display:flex;flex-shrink:0;align-items:center;box-sizing:border-box;width:100%;height:48px;padding:0 16px;border-bottom:1px solid var(--color-neutral-3)}.arco-drawer-header .arco-drawer-title{margin-right:auto;color:var(--color-text-1);font-weight:500;font-size:16px;text-align:left}.arco-drawer-header .arco-drawer-close-btn{margin-left:8px;color:var(--color-text-1);font-size:12px;cursor:pointer}.arco-drawer-footer{flex-shrink:0;box-sizing:border-box;padding:16px;text-align:right;border-top:1px solid var(--color-neutral-3)}.arco-drawer-footer>.arco-btn{margin-left:12px}.arco-drawer-body{position:relative;flex:1;box-sizing:border-box;height:100%;padding:12px 16px;overflow:auto;color:var(--color-text-1)}.fade-drawer-enter-from,.fade-drawer-appear-from{opacity:0}.fade-drawer-enter-to,.fade-drawer-appear-to{opacity:1}.fade-drawer-enter-active,.fade-drawer-appear-active{transition:opacity .3s cubic-bezier(.34,.69,.1,1)}.fade-drawer-leave-from{opacity:1}.fade-drawer-leave-to{opacity:0}.fade-drawer-leave-active{transition:opacity .3s cubic-bezier(.34,.69,.1,1)}.slide-left-drawer-enter-from,.slide-left-drawer-appear-from{transform:translate(-100%)}.slide-left-drawer-enter-to,.slide-left-drawer-appear-to{transform:translate(0)}.slide-left-drawer-enter-active,.slide-left-drawer-appear-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-left-drawer-leave-from{transform:translate(0)}.slide-left-drawer-leave-to{transform:translate(-100%)}.slide-left-drawer-leave-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-right-drawer-enter-from,.slide-right-drawer-appear-from{transform:translate(100%)}.slide-right-drawer-enter-to,.slide-right-drawer-appear-to{transform:translate(0)}.slide-right-drawer-enter-active,.slide-right-drawer-appear-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-right-drawer-leave-from{transform:translate(0)}.slide-right-drawer-leave-to{transform:translate(100%)}.slide-right-drawer-leave-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-top-drawer-enter,.slide-top-drawer-appear,.slide-top-drawer-enter-from,.slide-top-drawer-appear-from{transform:translateY(-100%)}.slide-top-drawer-enter-to,.slide-top-drawer-appear-to{transform:translateY(0)}.slide-top-drawer-enter-active,.slide-top-drawer-appear-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-top-drawer-leave-from{transform:translateY(0)}.slide-top-drawer-leave-to{transform:translateY(-100%)}.slide-top-drawer-leave-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-bottom-drawer-enter-from,.slide-bottom-drawer-appear-from{transform:translateY(100%)}.slide-bottom-drawer-enter-to,.slide-bottom-drawer-appear-to{transform:translateY(0)}.slide-bottom-drawer-enter-active,.slide-bottom-drawer-appear-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.slide-bottom-drawer-leave-from{transform:translateY(0)}.slide-bottom-drawer-leave-to{transform:translateY(100%)}.slide-bottom-drawer-leave-active{transition:transform .3s cubic-bezier(.34,.69,.1,1)}.arco-dropdown{box-sizing:border-box;padding:4px 0;background-color:var(--color-bg-popup);border:1px solid var(--color-fill-3);border-radius:var(--border-radius-medium);box-shadow:0 4px 10px #0000001a}.arco-dropdown-list{margin-top:0;margin-bottom:0;padding-left:0;list-style:none}.arco-dropdown-list-wrapper{max-height:200px;overflow-y:auto}.arco-dropdown-option{position:relative;z-index:1;display:flex;align-items:center;box-sizing:border-box;width:100%;padding:0 12px;color:var(--color-text-1);font-size:14px;line-height:36px;text-align:left;background-color:transparent;cursor:pointer}.arco-dropdown-option-content{overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-dropdown-option-has-suffix{justify-content:space-between}.arco-dropdown-option-active,.arco-dropdown-option:not(.arco-dropdown-option-disabled):hover{color:var(--color-text-1);background-color:var(--color-fill-2);transition:all .1s cubic-bezier(0,0,1,1)}.arco-dropdown-option-disabled{color:var(--color-text-4);background-color:transparent;cursor:not-allowed}.arco-dropdown-option-icon{display:inline-flex;margin-right:8px}.arco-dropdown-option-suffix{margin-left:12px}.arco-dropdown-group:first-child .arco-dropdown-group-title{margin-top:8px}.arco-dropdown-group-title{box-sizing:border-box;width:100%;margin-top:8px;padding:0 12px;color:var(--color-text-3);font-size:12px;line-height:20px;cursor:default;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-dropdown-submenu{margin-top:-4px}.arco-dropdown.arco-dropdown-has-footer{padding-bottom:0}.arco-dropdown-footer{border-top:1px solid var(--color-fill-3)}.arco-empty{box-sizing:border-box;width:100%;padding:10px 0;text-align:center}.arco-empty-image{margin-bottom:4px;color:rgb(var(--gray-5));font-size:48px;line-height:1}.arco-empty-image img{height:80px}.arco-empty .arco-empty-description{color:rgb(var(--gray-5));font-size:14px}.arco-form-item-status-validating .arco-input-wrapper:not(.arco-input-disabled),.arco-form-item-status-validating .arco-textarea-wrapper:not(.arco-textarea-disabled){background-color:var(--color-fill-2);border-color:transparent}.arco-form-item-status-validating .arco-input-wrapper:not(.arco-input-disabled):hover,.arco-form-item-status-validating .arco-textarea-wrapper:not(.arco-textarea-disabled):hover{background-color:var(--color-fill-3);border-color:transparent}.arco-form-item-status-validating .arco-input-wrapper:not(.arco-input-disabled).arco-input-focus,.arco-form-item-status-validating .arco-textarea-wrapper:not(.arco-textarea-disabled).arco-textarea-focus{background-color:var(--color-bg-2);border-color:rgb(var(--primary-6));box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-form-item-status-validating .arco-select-view:not(.arco-select-view-disabled),.arco-form-item-status-validating .arco-input-tag:not(.arco-input-tag-disabled){background-color:var(--color-fill-2);border-color:transparent}.arco-form-item-status-validating .arco-select-view:not(.arco-select-view-disabled):hover,.arco-form-item-status-validating .arco-input-tag:not(.arco-input-tag-disabled):hover{background-color:var(--color-fill-3);border-color:transparent}.arco-form-item-status-validating .arco-select-view:not(.arco-select-view-disabled).arco-select-view-focus,.arco-form-item-status-validating .arco-input-tag:not(.arco-input-tag-disabled).arco-input-tag-focus{background-color:var(--color-bg-2);border-color:rgb(var(--primary-6));box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-form-item-status-validating .arco-picker:not(.arco-picker-disabled){border-color:transparent;background-color:var(--color-fill-2)}.arco-form-item-status-validating .arco-picker:not(.arco-picker-disabled):hover{border-color:transparent;background-color:var(--color-fill-3)}.arco-form-item-status-validating .arco-picker-focused:not(.arco-picker-disabled),.arco-form-item-status-validating .arco-picker-focused:not(.arco-picker-disabled):hover{border-color:rgb(var(--primary-6));background-color:var(--color-bg-2);box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-form-item-status-validating .arco-form-item-message-help,.arco-form-item-status-validating .arco-form-item-feedback{color:rgb(var(--primary-6))}.arco-form-item-status-success .arco-input-wrapper:not(.arco-input-disabled),.arco-form-item-status-success .arco-textarea-wrapper:not(.arco-textarea-disabled){background-color:var(--color-fill-2);border-color:transparent}.arco-form-item-status-success .arco-input-wrapper:not(.arco-input-disabled):hover,.arco-form-item-status-success .arco-textarea-wrapper:not(.arco-textarea-disabled):hover{background-color:var(--color-fill-3);border-color:transparent}.arco-form-item-status-success .arco-input-wrapper:not(.arco-input-disabled).arco-input-focus,.arco-form-item-status-success .arco-textarea-wrapper:not(.arco-textarea-disabled).arco-textarea-focus{background-color:var(--color-bg-2);border-color:rgb(var(--success-6));box-shadow:0 0 0 0 var(--color-success-light-2)}.arco-form-item-status-success .arco-select-view:not(.arco-select-view-disabled),.arco-form-item-status-success .arco-input-tag:not(.arco-input-tag-disabled){background-color:var(--color-fill-2);border-color:transparent}.arco-form-item-status-success .arco-select-view:not(.arco-select-view-disabled):hover,.arco-form-item-status-success .arco-input-tag:not(.arco-input-tag-disabled):hover{background-color:var(--color-fill-3);border-color:transparent}.arco-form-item-status-success .arco-select-view:not(.arco-select-view-disabled).arco-select-view-focus,.arco-form-item-status-success .arco-input-tag:not(.arco-input-tag-disabled).arco-input-tag-focus{background-color:var(--color-bg-2);border-color:rgb(var(--success-6));box-shadow:0 0 0 0 var(--color-success-light-2)}.arco-form-item-status-success .arco-picker:not(.arco-picker-disabled){border-color:transparent;background-color:var(--color-fill-2)}.arco-form-item-status-success .arco-picker:not(.arco-picker-disabled):hover{border-color:transparent;background-color:var(--color-fill-3)}.arco-form-item-status-success .arco-picker-focused:not(.arco-picker-disabled),.arco-form-item-status-success .arco-picker-focused:not(.arco-picker-disabled):hover{border-color:rgb(var(--success-6));background-color:var(--color-bg-2);box-shadow:0 0 0 0 var(--color-success-light-2)}.arco-form-item-status-success .arco-form-item-message-help,.arco-form-item-status-success .arco-form-item-feedback{color:rgb(var(--success-6))}.arco-form-item-status-warning .arco-input-wrapper:not(.arco-input-disabled),.arco-form-item-status-warning .arco-textarea-wrapper:not(.arco-textarea-disabled){background-color:var(--color-warning-light-1);border-color:transparent}.arco-form-item-status-warning .arco-input-wrapper:not(.arco-input-disabled):hover,.arco-form-item-status-warning .arco-textarea-wrapper:not(.arco-textarea-disabled):hover{background-color:var(--color-warning-light-2);border-color:transparent}.arco-form-item-status-warning .arco-input-wrapper:not(.arco-input-disabled).arco-input-focus,.arco-form-item-status-warning .arco-textarea-wrapper:not(.arco-textarea-disabled).arco-textarea-focus{background-color:var(--color-bg-2);border-color:rgb(var(--warning-6));box-shadow:0 0 0 0 var(--color-warning-light-2)}.arco-form-item-status-warning .arco-select-view:not(.arco-select-view-disabled),.arco-form-item-status-warning .arco-input-tag:not(.arco-input-tag-disabled){background-color:var(--color-warning-light-1);border-color:transparent}.arco-form-item-status-warning .arco-select-view:not(.arco-select-view-disabled):hover,.arco-form-item-status-warning .arco-input-tag:not(.arco-input-tag-disabled):hover{background-color:var(--color-warning-light-2);border-color:transparent}.arco-form-item-status-warning .arco-select-view:not(.arco-select-view-disabled).arco-select-view-focus,.arco-form-item-status-warning .arco-input-tag:not(.arco-input-tag-disabled).arco-input-tag-focus{background-color:var(--color-bg-2);border-color:rgb(var(--warning-6));box-shadow:0 0 0 0 var(--color-warning-light-2)}.arco-form-item-status-warning .arco-picker:not(.arco-picker-disabled){border-color:transparent;background-color:var(--color-warning-light-1)}.arco-form-item-status-warning .arco-picker:not(.arco-picker-disabled):hover{border-color:transparent;background-color:var(--color-warning-light-2)}.arco-form-item-status-warning .arco-picker-focused:not(.arco-picker-disabled),.arco-form-item-status-warning .arco-picker-focused:not(.arco-picker-disabled):hover{border-color:rgb(var(--warning-6));background-color:var(--color-bg-2);box-shadow:0 0 0 0 var(--color-warning-light-2)}.arco-form-item-status-warning .arco-form-item-message-help,.arco-form-item-status-warning .arco-form-item-feedback{color:rgb(var(--warning-6))}.arco-form-item-status-error .arco-input-wrapper:not(.arco-input-disabled),.arco-form-item-status-error .arco-textarea-wrapper:not(.arco-textarea-disabled){background-color:var(--color-danger-light-1);border-color:transparent}.arco-form-item-status-error .arco-input-wrapper:not(.arco-input-disabled):hover,.arco-form-item-status-error .arco-textarea-wrapper:not(.arco-textarea-disabled):hover{background-color:var(--color-danger-light-2);border-color:transparent}.arco-form-item-status-error .arco-input-wrapper:not(.arco-input-disabled).arco-input-focus,.arco-form-item-status-error .arco-textarea-wrapper:not(.arco-textarea-disabled).arco-textarea-focus{background-color:var(--color-bg-2);border-color:rgb(var(--danger-6));box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-form-item-status-error .arco-select-view:not(.arco-select-view-disabled),.arco-form-item-status-error .arco-input-tag:not(.arco-input-tag-disabled){background-color:var(--color-danger-light-1);border-color:transparent}.arco-form-item-status-error .arco-select-view:not(.arco-select-view-disabled):hover,.arco-form-item-status-error .arco-input-tag:not(.arco-input-tag-disabled):hover{background-color:var(--color-danger-light-2);border-color:transparent}.arco-form-item-status-error .arco-select-view:not(.arco-select-view-disabled).arco-select-view-focus,.arco-form-item-status-error .arco-input-tag:not(.arco-input-tag-disabled).arco-input-tag-focus{background-color:var(--color-bg-2);border-color:rgb(var(--danger-6));box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-form-item-status-error .arco-picker:not(.arco-picker-disabled){border-color:transparent;background-color:var(--color-danger-light-1)}.arco-form-item-status-error .arco-picker:not(.arco-picker-disabled):hover{border-color:transparent;background-color:var(--color-danger-light-2)}.arco-form-item-status-error .arco-picker-focused:not(.arco-picker-disabled),.arco-form-item-status-error .arco-picker-focused:not(.arco-picker-disabled):hover{border-color:rgb(var(--danger-6));background-color:var(--color-bg-2);box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-form-item-status-error .arco-form-item-message-help,.arco-form-item-status-error .arco-form-item-feedback{color:rgb(var(--danger-6))}.arco-form-item-control-children{position:relative}.arco-form-item-feedback{position:absolute;top:50%;right:9px;font-size:14px;transform:translateY(-50%)}.arco-form-item-feedback .arco-icon-loading{font-size:12px}.arco-form-item-has-feedback .arco-input,.arco-form-item-has-feedback .arco-input-inner-wrapper,.arco-form-item-has-feedback .arco-textarea{padding-right:28px}.arco-form-item-has-feedback .arco-input-number-mode-embed .arco-input-number-step-layer{right:24px}.arco-form-item-has-feedback .arco-select.arco-select-multiple .arco-select-view,.arco-form-item-has-feedback .arco-select.arco-select-single .arco-select-view{padding-right:28px}.arco-form-item-has-feedback .arco-select.arco-select-multiple .arco-select-suffix{padding-right:0}.arco-form-item-has-feedback .arco-cascader.arco-cascader-multiple .arco-cascader-view,.arco-form-item-has-feedback .arco-cascader.arco-cascader-single .arco-cascader-view{padding-right:28px}.arco-form-item-has-feedback .arco-cascader.arco-cascader-multiple .arco-cascader-suffix{padding-right:0}.arco-form-item-has-feedback .arco-tree-select.arco-tree-select-multiple .arco-tree-select-view,.arco-form-item-has-feedback .arco-tree-select.arco-tree-select-single .arco-tree-select-view{padding-right:28px}.arco-form-item-has-feedback .arco-tree-select.arco-tree-select-multiple .arco-tree-select-suffix{padding-right:0}.arco-form-item-has-feedback .arco-picker{padding-right:28px}.arco-form-item-has-feedback .arco-picker-suffix .arco-picker-suffix-icon,.arco-form-item-has-feedback .arco-picker-suffix .arco-picker-clear-icon{margin-right:0;margin-left:0}.arco-form{display:flex;flex-direction:column;width:100%}.arco-form-layout-inline{flex-direction:row;flex-wrap:wrap}.arco-form-layout-inline .arco-form-item{width:auto;margin-bottom:8px}.arco-form-auto-label-width .arco-form-item-label-col>.arco-form-item-label{white-space:nowrap}.arco-form-item{display:flex;align-items:flex-start;justify-content:flex-start;width:100%;margin-bottom:20px}.arco-form-item-layout-vertical{display:block}.arco-form-item-layout-vertical>.arco-form-item-label-col{justify-content:flex-start;margin-bottom:8px;padding:0;line-height:1.5715;white-space:normal}.arco-form-item-layout-inline{margin-right:24px}.arco-form-item-label-col{padding-right:16px}.arco-form-item.arco-form-item-error,.arco-form-item.arco-form-item-has-help{margin-bottom:0}.arco-form-item-wrapper-flex.arco-col{flex:1}.arco-form-size-mini .arco-form-item-label-col{line-height:24px}.arco-form-size-mini .arco-form-item-label-col>.arco-form-item-label{font-size:12px}.arco-form-size-mini .arco-form-item-content,.arco-form-size-mini .arco-form-item-wrapper-col{min-height:24px}.arco-form-size-small .arco-form-item-label-col{line-height:28px}.arco-form-size-small .arco-form-item-label-col>.arco-form-item-label{font-size:14px}.arco-form-size-small .arco-form-item-content,.arco-form-size-small .arco-form-item-wrapper-col{min-height:28px}.arco-form-size-large .arco-form-item-label-col{line-height:36px}.arco-form-size-large .arco-form-item-label-col>.arco-form-item-label{font-size:14px}.arco-form-size-large .arco-form-item-content,.arco-form-size-large .arco-form-item-wrapper-col{min-height:36px}.arco-form-item-extra{margin-top:4px;color:var(--color-text-3);font-size:12px}.arco-form-item-message{min-height:20px;color:rgb(var(--danger-6));font-size:12px;line-height:20px}.arco-form-item-message-help{color:var(--color-text-3)}.arco-form-item-message+.arco-form-item-extra{margin-top:0;margin-bottom:4px}.arco-form-item-label-col{display:flex;flex-shrink:0;justify-content:flex-end;line-height:32px;white-space:nowrap}.arco-form-item-label-col-left{justify-content:flex-start}.arco-form-item-label-col>.arco-form-item-label{max-width:100%;color:var(--color-text-2);font-size:14px;white-space:normal}.arco-form-item-label-col.arco-form-item-label-col-flex{box-sizing:content-box}.arco-form-item-wrapper-col{display:flex;flex-direction:column;align-items:flex-start;width:100%;min-width:0;min-height:32px}.arco-form-item-content{flex:1;max-width:100%;min-height:32px}.arco-form-item-content-wrapper{display:flex;align-items:center;justify-content:flex-start;width:100%}.arco-form-item-content-flex{display:flex;align-items:center;justify-content:flex-start}.arco-form .arco-slider{display:block}.arco-form-item-label-required-symbol{color:rgb(var(--danger-6));font-size:12px;line-height:1}.arco-form-item-label-required-symbol svg{display:inline-block;transform:scale(.5)}.arco-form-item-label-tooltip{margin-left:4px;color:var(--color-text-4)}.form-blink-enter-from,.form-blink-appear-from{opacity:0}.form-blink-enter-to,.form-blink-appear-to{opacity:1}.form-blink-enter-active,.form-blink-appear-active{transition:opacity .3s cubic-bezier(0,0,1,1);animation:arco-form-blink .5s cubic-bezier(0,0,1,1)}@keyframes arco-form-blink{0%{opacity:1}50%{opacity:.2}to{opacity:1}}.arco-row{display:flex;flex-flow:row wrap}.arco-row-nowrap{flex-wrap:nowrap}.arco-row-align-start{align-items:flex-start}.arco-row-align-center{align-items:center}.arco-row-align-end{align-items:flex-end}.arco-row-justify-start{justify-content:flex-start}.arco-row-justify-center{justify-content:center}.arco-row-justify-end{justify-content:flex-end}.arco-row-justify-space-around{justify-content:space-around}.arco-row-justify-space-between{justify-content:space-between}.arco-col{box-sizing:border-box}.arco-col-1{flex:0 0 4.16666667%;width:4.16666667%}.arco-col-2{flex:0 0 8.33333333%;width:8.33333333%}.arco-col-3{flex:0 0 12.5%;width:12.5%}.arco-col-4{flex:0 0 16.66666667%;width:16.66666667%}.arco-col-5{flex:0 0 20.83333333%;width:20.83333333%}.arco-col-6{flex:0 0 25%;width:25%}.arco-col-7{flex:0 0 29.16666667%;width:29.16666667%}.arco-col-8{flex:0 0 33.33333333%;width:33.33333333%}.arco-col-9{flex:0 0 37.5%;width:37.5%}.arco-col-10{flex:0 0 41.66666667%;width:41.66666667%}.arco-col-11{flex:0 0 45.83333333%;width:45.83333333%}.arco-col-12{flex:0 0 50%;width:50%}.arco-col-13{flex:0 0 54.16666667%;width:54.16666667%}.arco-col-14{flex:0 0 58.33333333%;width:58.33333333%}.arco-col-15{flex:0 0 62.5%;width:62.5%}.arco-col-16{flex:0 0 66.66666667%;width:66.66666667%}.arco-col-17{flex:0 0 70.83333333%;width:70.83333333%}.arco-col-18{flex:0 0 75%;width:75%}.arco-col-19{flex:0 0 79.16666667%;width:79.16666667%}.arco-col-20{flex:0 0 83.33333333%;width:83.33333333%}.arco-col-21{flex:0 0 87.5%;width:87.5%}.arco-col-22{flex:0 0 91.66666667%;width:91.66666667%}.arco-col-23{flex:0 0 95.83333333%;width:95.83333333%}.arco-col-24{flex:0 0 100%;width:100%}.arco-col-offset-1{margin-left:4.16666667%}.arco-col-offset-2{margin-left:8.33333333%}.arco-col-offset-3{margin-left:12.5%}.arco-col-offset-4{margin-left:16.66666667%}.arco-col-offset-5{margin-left:20.83333333%}.arco-col-offset-6{margin-left:25%}.arco-col-offset-7{margin-left:29.16666667%}.arco-col-offset-8{margin-left:33.33333333%}.arco-col-offset-9{margin-left:37.5%}.arco-col-offset-10{margin-left:41.66666667%}.arco-col-offset-11{margin-left:45.83333333%}.arco-col-offset-12{margin-left:50%}.arco-col-offset-13{margin-left:54.16666667%}.arco-col-offset-14{margin-left:58.33333333%}.arco-col-offset-15{margin-left:62.5%}.arco-col-offset-16{margin-left:66.66666667%}.arco-col-offset-17{margin-left:70.83333333%}.arco-col-offset-18{margin-left:75%}.arco-col-offset-19{margin-left:79.16666667%}.arco-col-offset-20{margin-left:83.33333333%}.arco-col-offset-21{margin-left:87.5%}.arco-col-offset-22{margin-left:91.66666667%}.arco-col-offset-23{margin-left:95.83333333%}.arco-col-order-1{order:1}.arco-col-order-2{order:2}.arco-col-order-3{order:3}.arco-col-order-4{order:4}.arco-col-order-5{order:5}.arco-col-order-6{order:6}.arco-col-order-7{order:7}.arco-col-order-8{order:8}.arco-col-order-9{order:9}.arco-col-order-10{order:10}.arco-col-order-11{order:11}.arco-col-order-12{order:12}.arco-col-order-13{order:13}.arco-col-order-14{order:14}.arco-col-order-15{order:15}.arco-col-order-16{order:16}.arco-col-order-17{order:17}.arco-col-order-18{order:18}.arco-col-order-19{order:19}.arco-col-order-20{order:20}.arco-col-order-21{order:21}.arco-col-order-22{order:22}.arco-col-order-23{order:23}.arco-col-order-24{order:24}.arco-col-xs-1{flex:0 0 4.16666667%;width:4.16666667%}.arco-col-xs-2{flex:0 0 8.33333333%;width:8.33333333%}.arco-col-xs-3{flex:0 0 12.5%;width:12.5%}.arco-col-xs-4{flex:0 0 16.66666667%;width:16.66666667%}.arco-col-xs-5{flex:0 0 20.83333333%;width:20.83333333%}.arco-col-xs-6{flex:0 0 25%;width:25%}.arco-col-xs-7{flex:0 0 29.16666667%;width:29.16666667%}.arco-col-xs-8{flex:0 0 33.33333333%;width:33.33333333%}.arco-col-xs-9{flex:0 0 37.5%;width:37.5%}.arco-col-xs-10{flex:0 0 41.66666667%;width:41.66666667%}.arco-col-xs-11{flex:0 0 45.83333333%;width:45.83333333%}.arco-col-xs-12{flex:0 0 50%;width:50%}.arco-col-xs-13{flex:0 0 54.16666667%;width:54.16666667%}.arco-col-xs-14{flex:0 0 58.33333333%;width:58.33333333%}.arco-col-xs-15{flex:0 0 62.5%;width:62.5%}.arco-col-xs-16{flex:0 0 66.66666667%;width:66.66666667%}.arco-col-xs-17{flex:0 0 70.83333333%;width:70.83333333%}.arco-col-xs-18{flex:0 0 75%;width:75%}.arco-col-xs-19{flex:0 0 79.16666667%;width:79.16666667%}.arco-col-xs-20{flex:0 0 83.33333333%;width:83.33333333%}.arco-col-xs-21{flex:0 0 87.5%;width:87.5%}.arco-col-xs-22{flex:0 0 91.66666667%;width:91.66666667%}.arco-col-xs-23{flex:0 0 95.83333333%;width:95.83333333%}.arco-col-xs-24{flex:0 0 100%;width:100%}.arco-col-xs-offset-1{margin-left:4.16666667%}.arco-col-xs-offset-2{margin-left:8.33333333%}.arco-col-xs-offset-3{margin-left:12.5%}.arco-col-xs-offset-4{margin-left:16.66666667%}.arco-col-xs-offset-5{margin-left:20.83333333%}.arco-col-xs-offset-6{margin-left:25%}.arco-col-xs-offset-7{margin-left:29.16666667%}.arco-col-xs-offset-8{margin-left:33.33333333%}.arco-col-xs-offset-9{margin-left:37.5%}.arco-col-xs-offset-10{margin-left:41.66666667%}.arco-col-xs-offset-11{margin-left:45.83333333%}.arco-col-xs-offset-12{margin-left:50%}.arco-col-xs-offset-13{margin-left:54.16666667%}.arco-col-xs-offset-14{margin-left:58.33333333%}.arco-col-xs-offset-15{margin-left:62.5%}.arco-col-xs-offset-16{margin-left:66.66666667%}.arco-col-xs-offset-17{margin-left:70.83333333%}.arco-col-xs-offset-18{margin-left:75%}.arco-col-xs-offset-19{margin-left:79.16666667%}.arco-col-xs-offset-20{margin-left:83.33333333%}.arco-col-xs-offset-21{margin-left:87.5%}.arco-col-xs-offset-22{margin-left:91.66666667%}.arco-col-xs-offset-23{margin-left:95.83333333%}.arco-col-xs-order-1{order:1}.arco-col-xs-order-2{order:2}.arco-col-xs-order-3{order:3}.arco-col-xs-order-4{order:4}.arco-col-xs-order-5{order:5}.arco-col-xs-order-6{order:6}.arco-col-xs-order-7{order:7}.arco-col-xs-order-8{order:8}.arco-col-xs-order-9{order:9}.arco-col-xs-order-10{order:10}.arco-col-xs-order-11{order:11}.arco-col-xs-order-12{order:12}.arco-col-xs-order-13{order:13}.arco-col-xs-order-14{order:14}.arco-col-xs-order-15{order:15}.arco-col-xs-order-16{order:16}.arco-col-xs-order-17{order:17}.arco-col-xs-order-18{order:18}.arco-col-xs-order-19{order:19}.arco-col-xs-order-20{order:20}.arco-col-xs-order-21{order:21}.arco-col-xs-order-22{order:22}.arco-col-xs-order-23{order:23}.arco-col-xs-order-24{order:24}@media (min-width: 576px){.arco-col-sm-1{flex:0 0 4.16666667%;width:4.16666667%}.arco-col-sm-2{flex:0 0 8.33333333%;width:8.33333333%}.arco-col-sm-3{flex:0 0 12.5%;width:12.5%}.arco-col-sm-4{flex:0 0 16.66666667%;width:16.66666667%}.arco-col-sm-5{flex:0 0 20.83333333%;width:20.83333333%}.arco-col-sm-6{flex:0 0 25%;width:25%}.arco-col-sm-7{flex:0 0 29.16666667%;width:29.16666667%}.arco-col-sm-8{flex:0 0 33.33333333%;width:33.33333333%}.arco-col-sm-9{flex:0 0 37.5%;width:37.5%}.arco-col-sm-10{flex:0 0 41.66666667%;width:41.66666667%}.arco-col-sm-11{flex:0 0 45.83333333%;width:45.83333333%}.arco-col-sm-12{flex:0 0 50%;width:50%}.arco-col-sm-13{flex:0 0 54.16666667%;width:54.16666667%}.arco-col-sm-14{flex:0 0 58.33333333%;width:58.33333333%}.arco-col-sm-15{flex:0 0 62.5%;width:62.5%}.arco-col-sm-16{flex:0 0 66.66666667%;width:66.66666667%}.arco-col-sm-17{flex:0 0 70.83333333%;width:70.83333333%}.arco-col-sm-18{flex:0 0 75%;width:75%}.arco-col-sm-19{flex:0 0 79.16666667%;width:79.16666667%}.arco-col-sm-20{flex:0 0 83.33333333%;width:83.33333333%}.arco-col-sm-21{flex:0 0 87.5%;width:87.5%}.arco-col-sm-22{flex:0 0 91.66666667%;width:91.66666667%}.arco-col-sm-23{flex:0 0 95.83333333%;width:95.83333333%}.arco-col-sm-24{flex:0 0 100%;width:100%}.arco-col-sm-offset-1{margin-left:4.16666667%}.arco-col-sm-offset-2{margin-left:8.33333333%}.arco-col-sm-offset-3{margin-left:12.5%}.arco-col-sm-offset-4{margin-left:16.66666667%}.arco-col-sm-offset-5{margin-left:20.83333333%}.arco-col-sm-offset-6{margin-left:25%}.arco-col-sm-offset-7{margin-left:29.16666667%}.arco-col-sm-offset-8{margin-left:33.33333333%}.arco-col-sm-offset-9{margin-left:37.5%}.arco-col-sm-offset-10{margin-left:41.66666667%}.arco-col-sm-offset-11{margin-left:45.83333333%}.arco-col-sm-offset-12{margin-left:50%}.arco-col-sm-offset-13{margin-left:54.16666667%}.arco-col-sm-offset-14{margin-left:58.33333333%}.arco-col-sm-offset-15{margin-left:62.5%}.arco-col-sm-offset-16{margin-left:66.66666667%}.arco-col-sm-offset-17{margin-left:70.83333333%}.arco-col-sm-offset-18{margin-left:75%}.arco-col-sm-offset-19{margin-left:79.16666667%}.arco-col-sm-offset-20{margin-left:83.33333333%}.arco-col-sm-offset-21{margin-left:87.5%}.arco-col-sm-offset-22{margin-left:91.66666667%}.arco-col-sm-offset-23{margin-left:95.83333333%}.arco-col-sm-order-1{order:1}.arco-col-sm-order-2{order:2}.arco-col-sm-order-3{order:3}.arco-col-sm-order-4{order:4}.arco-col-sm-order-5{order:5}.arco-col-sm-order-6{order:6}.arco-col-sm-order-7{order:7}.arco-col-sm-order-8{order:8}.arco-col-sm-order-9{order:9}.arco-col-sm-order-10{order:10}.arco-col-sm-order-11{order:11}.arco-col-sm-order-12{order:12}.arco-col-sm-order-13{order:13}.arco-col-sm-order-14{order:14}.arco-col-sm-order-15{order:15}.arco-col-sm-order-16{order:16}.arco-col-sm-order-17{order:17}.arco-col-sm-order-18{order:18}.arco-col-sm-order-19{order:19}.arco-col-sm-order-20{order:20}.arco-col-sm-order-21{order:21}.arco-col-sm-order-22{order:22}.arco-col-sm-order-23{order:23}.arco-col-sm-order-24{order:24}}@media (min-width: 768px){.arco-col-md-1{flex:0 0 4.16666667%;width:4.16666667%}.arco-col-md-2{flex:0 0 8.33333333%;width:8.33333333%}.arco-col-md-3{flex:0 0 12.5%;width:12.5%}.arco-col-md-4{flex:0 0 16.66666667%;width:16.66666667%}.arco-col-md-5{flex:0 0 20.83333333%;width:20.83333333%}.arco-col-md-6{flex:0 0 25%;width:25%}.arco-col-md-7{flex:0 0 29.16666667%;width:29.16666667%}.arco-col-md-8{flex:0 0 33.33333333%;width:33.33333333%}.arco-col-md-9{flex:0 0 37.5%;width:37.5%}.arco-col-md-10{flex:0 0 41.66666667%;width:41.66666667%}.arco-col-md-11{flex:0 0 45.83333333%;width:45.83333333%}.arco-col-md-12{flex:0 0 50%;width:50%}.arco-col-md-13{flex:0 0 54.16666667%;width:54.16666667%}.arco-col-md-14{flex:0 0 58.33333333%;width:58.33333333%}.arco-col-md-15{flex:0 0 62.5%;width:62.5%}.arco-col-md-16{flex:0 0 66.66666667%;width:66.66666667%}.arco-col-md-17{flex:0 0 70.83333333%;width:70.83333333%}.arco-col-md-18{flex:0 0 75%;width:75%}.arco-col-md-19{flex:0 0 79.16666667%;width:79.16666667%}.arco-col-md-20{flex:0 0 83.33333333%;width:83.33333333%}.arco-col-md-21{flex:0 0 87.5%;width:87.5%}.arco-col-md-22{flex:0 0 91.66666667%;width:91.66666667%}.arco-col-md-23{flex:0 0 95.83333333%;width:95.83333333%}.arco-col-md-24{flex:0 0 100%;width:100%}.arco-col-md-offset-1{margin-left:4.16666667%}.arco-col-md-offset-2{margin-left:8.33333333%}.arco-col-md-offset-3{margin-left:12.5%}.arco-col-md-offset-4{margin-left:16.66666667%}.arco-col-md-offset-5{margin-left:20.83333333%}.arco-col-md-offset-6{margin-left:25%}.arco-col-md-offset-7{margin-left:29.16666667%}.arco-col-md-offset-8{margin-left:33.33333333%}.arco-col-md-offset-9{margin-left:37.5%}.arco-col-md-offset-10{margin-left:41.66666667%}.arco-col-md-offset-11{margin-left:45.83333333%}.arco-col-md-offset-12{margin-left:50%}.arco-col-md-offset-13{margin-left:54.16666667%}.arco-col-md-offset-14{margin-left:58.33333333%}.arco-col-md-offset-15{margin-left:62.5%}.arco-col-md-offset-16{margin-left:66.66666667%}.arco-col-md-offset-17{margin-left:70.83333333%}.arco-col-md-offset-18{margin-left:75%}.arco-col-md-offset-19{margin-left:79.16666667%}.arco-col-md-offset-20{margin-left:83.33333333%}.arco-col-md-offset-21{margin-left:87.5%}.arco-col-md-offset-22{margin-left:91.66666667%}.arco-col-md-offset-23{margin-left:95.83333333%}.arco-col-md-order-1{order:1}.arco-col-md-order-2{order:2}.arco-col-md-order-3{order:3}.arco-col-md-order-4{order:4}.arco-col-md-order-5{order:5}.arco-col-md-order-6{order:6}.arco-col-md-order-7{order:7}.arco-col-md-order-8{order:8}.arco-col-md-order-9{order:9}.arco-col-md-order-10{order:10}.arco-col-md-order-11{order:11}.arco-col-md-order-12{order:12}.arco-col-md-order-13{order:13}.arco-col-md-order-14{order:14}.arco-col-md-order-15{order:15}.arco-col-md-order-16{order:16}.arco-col-md-order-17{order:17}.arco-col-md-order-18{order:18}.arco-col-md-order-19{order:19}.arco-col-md-order-20{order:20}.arco-col-md-order-21{order:21}.arco-col-md-order-22{order:22}.arco-col-md-order-23{order:23}.arco-col-md-order-24{order:24}}@media (min-width: 992px){.arco-col-lg-1{flex:0 0 4.16666667%;width:4.16666667%}.arco-col-lg-2{flex:0 0 8.33333333%;width:8.33333333%}.arco-col-lg-3{flex:0 0 12.5%;width:12.5%}.arco-col-lg-4{flex:0 0 16.66666667%;width:16.66666667%}.arco-col-lg-5{flex:0 0 20.83333333%;width:20.83333333%}.arco-col-lg-6{flex:0 0 25%;width:25%}.arco-col-lg-7{flex:0 0 29.16666667%;width:29.16666667%}.arco-col-lg-8{flex:0 0 33.33333333%;width:33.33333333%}.arco-col-lg-9{flex:0 0 37.5%;width:37.5%}.arco-col-lg-10{flex:0 0 41.66666667%;width:41.66666667%}.arco-col-lg-11{flex:0 0 45.83333333%;width:45.83333333%}.arco-col-lg-12{flex:0 0 50%;width:50%}.arco-col-lg-13{flex:0 0 54.16666667%;width:54.16666667%}.arco-col-lg-14{flex:0 0 58.33333333%;width:58.33333333%}.arco-col-lg-15{flex:0 0 62.5%;width:62.5%}.arco-col-lg-16{flex:0 0 66.66666667%;width:66.66666667%}.arco-col-lg-17{flex:0 0 70.83333333%;width:70.83333333%}.arco-col-lg-18{flex:0 0 75%;width:75%}.arco-col-lg-19{flex:0 0 79.16666667%;width:79.16666667%}.arco-col-lg-20{flex:0 0 83.33333333%;width:83.33333333%}.arco-col-lg-21{flex:0 0 87.5%;width:87.5%}.arco-col-lg-22{flex:0 0 91.66666667%;width:91.66666667%}.arco-col-lg-23{flex:0 0 95.83333333%;width:95.83333333%}.arco-col-lg-24{flex:0 0 100%;width:100%}.arco-col-lg-offset-1{margin-left:4.16666667%}.arco-col-lg-offset-2{margin-left:8.33333333%}.arco-col-lg-offset-3{margin-left:12.5%}.arco-col-lg-offset-4{margin-left:16.66666667%}.arco-col-lg-offset-5{margin-left:20.83333333%}.arco-col-lg-offset-6{margin-left:25%}.arco-col-lg-offset-7{margin-left:29.16666667%}.arco-col-lg-offset-8{margin-left:33.33333333%}.arco-col-lg-offset-9{margin-left:37.5%}.arco-col-lg-offset-10{margin-left:41.66666667%}.arco-col-lg-offset-11{margin-left:45.83333333%}.arco-col-lg-offset-12{margin-left:50%}.arco-col-lg-offset-13{margin-left:54.16666667%}.arco-col-lg-offset-14{margin-left:58.33333333%}.arco-col-lg-offset-15{margin-left:62.5%}.arco-col-lg-offset-16{margin-left:66.66666667%}.arco-col-lg-offset-17{margin-left:70.83333333%}.arco-col-lg-offset-18{margin-left:75%}.arco-col-lg-offset-19{margin-left:79.16666667%}.arco-col-lg-offset-20{margin-left:83.33333333%}.arco-col-lg-offset-21{margin-left:87.5%}.arco-col-lg-offset-22{margin-left:91.66666667%}.arco-col-lg-offset-23{margin-left:95.83333333%}.arco-col-lg-order-1{order:1}.arco-col-lg-order-2{order:2}.arco-col-lg-order-3{order:3}.arco-col-lg-order-4{order:4}.arco-col-lg-order-5{order:5}.arco-col-lg-order-6{order:6}.arco-col-lg-order-7{order:7}.arco-col-lg-order-8{order:8}.arco-col-lg-order-9{order:9}.arco-col-lg-order-10{order:10}.arco-col-lg-order-11{order:11}.arco-col-lg-order-12{order:12}.arco-col-lg-order-13{order:13}.arco-col-lg-order-14{order:14}.arco-col-lg-order-15{order:15}.arco-col-lg-order-16{order:16}.arco-col-lg-order-17{order:17}.arco-col-lg-order-18{order:18}.arco-col-lg-order-19{order:19}.arco-col-lg-order-20{order:20}.arco-col-lg-order-21{order:21}.arco-col-lg-order-22{order:22}.arco-col-lg-order-23{order:23}.arco-col-lg-order-24{order:24}}@media (min-width: 1200px){.arco-col-xl-1{flex:0 0 4.16666667%;width:4.16666667%}.arco-col-xl-2{flex:0 0 8.33333333%;width:8.33333333%}.arco-col-xl-3{flex:0 0 12.5%;width:12.5%}.arco-col-xl-4{flex:0 0 16.66666667%;width:16.66666667%}.arco-col-xl-5{flex:0 0 20.83333333%;width:20.83333333%}.arco-col-xl-6{flex:0 0 25%;width:25%}.arco-col-xl-7{flex:0 0 29.16666667%;width:29.16666667%}.arco-col-xl-8{flex:0 0 33.33333333%;width:33.33333333%}.arco-col-xl-9{flex:0 0 37.5%;width:37.5%}.arco-col-xl-10{flex:0 0 41.66666667%;width:41.66666667%}.arco-col-xl-11{flex:0 0 45.83333333%;width:45.83333333%}.arco-col-xl-12{flex:0 0 50%;width:50%}.arco-col-xl-13{flex:0 0 54.16666667%;width:54.16666667%}.arco-col-xl-14{flex:0 0 58.33333333%;width:58.33333333%}.arco-col-xl-15{flex:0 0 62.5%;width:62.5%}.arco-col-xl-16{flex:0 0 66.66666667%;width:66.66666667%}.arco-col-xl-17{flex:0 0 70.83333333%;width:70.83333333%}.arco-col-xl-18{flex:0 0 75%;width:75%}.arco-col-xl-19{flex:0 0 79.16666667%;width:79.16666667%}.arco-col-xl-20{flex:0 0 83.33333333%;width:83.33333333%}.arco-col-xl-21{flex:0 0 87.5%;width:87.5%}.arco-col-xl-22{flex:0 0 91.66666667%;width:91.66666667%}.arco-col-xl-23{flex:0 0 95.83333333%;width:95.83333333%}.arco-col-xl-24{flex:0 0 100%;width:100%}.arco-col-xl-offset-1{margin-left:4.16666667%}.arco-col-xl-offset-2{margin-left:8.33333333%}.arco-col-xl-offset-3{margin-left:12.5%}.arco-col-xl-offset-4{margin-left:16.66666667%}.arco-col-xl-offset-5{margin-left:20.83333333%}.arco-col-xl-offset-6{margin-left:25%}.arco-col-xl-offset-7{margin-left:29.16666667%}.arco-col-xl-offset-8{margin-left:33.33333333%}.arco-col-xl-offset-9{margin-left:37.5%}.arco-col-xl-offset-10{margin-left:41.66666667%}.arco-col-xl-offset-11{margin-left:45.83333333%}.arco-col-xl-offset-12{margin-left:50%}.arco-col-xl-offset-13{margin-left:54.16666667%}.arco-col-xl-offset-14{margin-left:58.33333333%}.arco-col-xl-offset-15{margin-left:62.5%}.arco-col-xl-offset-16{margin-left:66.66666667%}.arco-col-xl-offset-17{margin-left:70.83333333%}.arco-col-xl-offset-18{margin-left:75%}.arco-col-xl-offset-19{margin-left:79.16666667%}.arco-col-xl-offset-20{margin-left:83.33333333%}.arco-col-xl-offset-21{margin-left:87.5%}.arco-col-xl-offset-22{margin-left:91.66666667%}.arco-col-xl-offset-23{margin-left:95.83333333%}.arco-col-xl-order-1{order:1}.arco-col-xl-order-2{order:2}.arco-col-xl-order-3{order:3}.arco-col-xl-order-4{order:4}.arco-col-xl-order-5{order:5}.arco-col-xl-order-6{order:6}.arco-col-xl-order-7{order:7}.arco-col-xl-order-8{order:8}.arco-col-xl-order-9{order:9}.arco-col-xl-order-10{order:10}.arco-col-xl-order-11{order:11}.arco-col-xl-order-12{order:12}.arco-col-xl-order-13{order:13}.arco-col-xl-order-14{order:14}.arco-col-xl-order-15{order:15}.arco-col-xl-order-16{order:16}.arco-col-xl-order-17{order:17}.arco-col-xl-order-18{order:18}.arco-col-xl-order-19{order:19}.arco-col-xl-order-20{order:20}.arco-col-xl-order-21{order:21}.arco-col-xl-order-22{order:22}.arco-col-xl-order-23{order:23}.arco-col-xl-order-24{order:24}}@media (min-width: 1600px){.arco-col-xxl-1{flex:0 0 4.16666667%;width:4.16666667%}.arco-col-xxl-2{flex:0 0 8.33333333%;width:8.33333333%}.arco-col-xxl-3{flex:0 0 12.5%;width:12.5%}.arco-col-xxl-4{flex:0 0 16.66666667%;width:16.66666667%}.arco-col-xxl-5{flex:0 0 20.83333333%;width:20.83333333%}.arco-col-xxl-6{flex:0 0 25%;width:25%}.arco-col-xxl-7{flex:0 0 29.16666667%;width:29.16666667%}.arco-col-xxl-8{flex:0 0 33.33333333%;width:33.33333333%}.arco-col-xxl-9{flex:0 0 37.5%;width:37.5%}.arco-col-xxl-10{flex:0 0 41.66666667%;width:41.66666667%}.arco-col-xxl-11{flex:0 0 45.83333333%;width:45.83333333%}.arco-col-xxl-12{flex:0 0 50%;width:50%}.arco-col-xxl-13{flex:0 0 54.16666667%;width:54.16666667%}.arco-col-xxl-14{flex:0 0 58.33333333%;width:58.33333333%}.arco-col-xxl-15{flex:0 0 62.5%;width:62.5%}.arco-col-xxl-16{flex:0 0 66.66666667%;width:66.66666667%}.arco-col-xxl-17{flex:0 0 70.83333333%;width:70.83333333%}.arco-col-xxl-18{flex:0 0 75%;width:75%}.arco-col-xxl-19{flex:0 0 79.16666667%;width:79.16666667%}.arco-col-xxl-20{flex:0 0 83.33333333%;width:83.33333333%}.arco-col-xxl-21{flex:0 0 87.5%;width:87.5%}.arco-col-xxl-22{flex:0 0 91.66666667%;width:91.66666667%}.arco-col-xxl-23{flex:0 0 95.83333333%;width:95.83333333%}.arco-col-xxl-24{flex:0 0 100%;width:100%}.arco-col-xxl-offset-1{margin-left:4.16666667%}.arco-col-xxl-offset-2{margin-left:8.33333333%}.arco-col-xxl-offset-3{margin-left:12.5%}.arco-col-xxl-offset-4{margin-left:16.66666667%}.arco-col-xxl-offset-5{margin-left:20.83333333%}.arco-col-xxl-offset-6{margin-left:25%}.arco-col-xxl-offset-7{margin-left:29.16666667%}.arco-col-xxl-offset-8{margin-left:33.33333333%}.arco-col-xxl-offset-9{margin-left:37.5%}.arco-col-xxl-offset-10{margin-left:41.66666667%}.arco-col-xxl-offset-11{margin-left:45.83333333%}.arco-col-xxl-offset-12{margin-left:50%}.arco-col-xxl-offset-13{margin-left:54.16666667%}.arco-col-xxl-offset-14{margin-left:58.33333333%}.arco-col-xxl-offset-15{margin-left:62.5%}.arco-col-xxl-offset-16{margin-left:66.66666667%}.arco-col-xxl-offset-17{margin-left:70.83333333%}.arco-col-xxl-offset-18{margin-left:75%}.arco-col-xxl-offset-19{margin-left:79.16666667%}.arco-col-xxl-offset-20{margin-left:83.33333333%}.arco-col-xxl-offset-21{margin-left:87.5%}.arco-col-xxl-offset-22{margin-left:91.66666667%}.arco-col-xxl-offset-23{margin-left:95.83333333%}.arco-col-xxl-order-1{order:1}.arco-col-xxl-order-2{order:2}.arco-col-xxl-order-3{order:3}.arco-col-xxl-order-4{order:4}.arco-col-xxl-order-5{order:5}.arco-col-xxl-order-6{order:6}.arco-col-xxl-order-7{order:7}.arco-col-xxl-order-8{order:8}.arco-col-xxl-order-9{order:9}.arco-col-xxl-order-10{order:10}.arco-col-xxl-order-11{order:11}.arco-col-xxl-order-12{order:12}.arco-col-xxl-order-13{order:13}.arco-col-xxl-order-14{order:14}.arco-col-xxl-order-15{order:15}.arco-col-xxl-order-16{order:16}.arco-col-xxl-order-17{order:17}.arco-col-xxl-order-18{order:18}.arco-col-xxl-order-19{order:19}.arco-col-xxl-order-20{order:20}.arco-col-xxl-order-21{order:21}.arco-col-xxl-order-22{order:22}.arco-col-xxl-order-23{order:23}.arco-col-xxl-order-24{order:24}}.arco-grid{display:grid}.arco-image-trigger{padding:6px 4px;background:var(--color-bg-5);border:1px solid var(--color-neutral-3);border-radius:4px}.arco-image-trigger .arco-trigger-arrow{background-color:var(--color-bg-5);border:1px solid var(--color-neutral-3)}.arco-image{position:relative;display:inline-block;border-radius:var(--border-radius-small)}.arco-image-img{vertical-align:middle;border-radius:inherit}.arco-image-overlay{position:absolute;top:0;left:0;width:100%;height:100%}.arco-image-footer{display:flex;width:100%;max-width:100%}.arco-image-footer-caption{flex:1 1 auto}.arco-image-footer-caption-title{font-weight:500;font-size:16px}.arco-image-footer-caption-description{font-size:14px}.arco-image-footer-extra{flex:0 0 auto;padding-left:12px}.arco-image-with-footer-inner .arco-image-footer{position:absolute;bottom:0;left:0;align-items:center;box-sizing:border-box;padding:9px 16px;color:var(--color-white);background:linear-gradient(360deg,rgba(0,0,0,.3) 0%,rgba(0,0,0,0) 100%);border-bottom-right-radius:var(--border-radius-small);border-bottom-left-radius:var(--border-radius-small)}.arco-image-with-footer-inner .arco-image-footer-caption-title,.arco-image-with-footer-inner .arco-image-footer-caption-description{color:var(--color-white)}.arco-image-with-footer-outer .arco-image-footer{margin-top:4px;color:var(--color-neutral-8)}.arco-image-with-footer-outer .arco-image-footer-caption-title{color:var(--color-text-1)}.arco-image-with-footer-outer .arco-image-footer-caption-description{color:var(--color-neutral-6)}.arco-image-error{display:flex;flex-direction:column;align-items:center;justify-content:center;box-sizing:border-box;width:100%;height:100%;color:var(--color-neutral-4);background-color:var(--color-neutral-1)}.arco-image-error-icon{width:60px;max-width:100%;height:60px;max-height:100%}.arco-image-error-icon>svg{width:100%;height:100%}.arco-image-error-alt{padding:8px 16px;font-size:12px;line-height:1.6667;text-align:center}.arco-image-loader{position:absolute;top:0;left:0;width:100%;height:100%;background-color:var(--color-neutral-1)}.arco-image-loader-spin{position:absolute;top:50%;left:50%;color:rgb(var(--primary-6));font-size:32px;text-align:center;transform:translate(-50%,-50%)}.arco-image-loader-spin-text{color:var(--color-neutral-6);font-size:16px}.arco-image-simple.arco-image-with-footer-inner .arco-image-footer{padding:12px 16px}.arco-image-loading .arco-image-img,.arco-image-loading-error .arco-image-img{visibility:hidden}.arco-image-preview{position:fixed;top:0;left:0;z-index:1001;width:100%;height:100%}.arco-image-preview-hide{display:none}.arco-image-preview-mask,.arco-image-preview-wrapper{position:absolute;top:0;left:0;width:100%;height:100%}.arco-image-preview-mask{background-color:var(--color-mask-bg)}.arco-image-preview-img-container{width:100%;height:100%;text-align:center}.arco-image-preview-img-container:before{display:inline-block;width:0;height:100%;vertical-align:middle;content:""}.arco-image-preview-img-container .arco-image-preview-img{display:inline-block;max-width:100%;max-height:100%;vertical-align:middle;cursor:grab;user-select:none}.arco-image-preview-img-container .arco-image-preview-img.arco-image-preview-img-moving{cursor:grabbing}.arco-image-preview-scale-value{box-sizing:border-box;padding:7px 10px;color:var(--color-white);font-size:12px;line-height:initial;background-color:#ffffff14;position:absolute;top:50%;left:50%;transform:translate(-50%,-50%)}.arco-image-preview-toolbar{position:absolute;bottom:46px;left:50%;display:flex;align-items:flex-start;padding:4px 16px;background-color:var(--color-bg-2);border-radius:var(--border-radius-medium);transform:translate(-50%)}.arco-image-preview-toolbar-action{display:flex;align-items:center;color:var(--color-neutral-8);font-size:14px;background-color:transparent;border-radius:var(--border-radius-small);cursor:pointer}.arco-image-preview-toolbar-action:not(:last-of-type){margin-right:0}.arco-image-preview-toolbar-action:hover{color:rgb(var(--primary-6));background-color:var(--color-neutral-2)}.arco-image-preview-toolbar-action-disabled,.arco-image-preview-toolbar-action-disabled:hover{color:var(--color-text-4);background-color:transparent;cursor:not-allowed}.arco-image-preview-toolbar-action-name{padding-right:12px;font-size:12px}.arco-image-preview-toolbar-action-content{padding:13px;line-height:1}.arco-image-preview-loading{display:flex;align-items:center;justify-content:center;box-sizing:border-box;width:48px;height:48px;padding:10px;color:rgb(var(--primary-6));font-size:18px;background-color:#232324;border-radius:var(--border-radius-medium);position:absolute;top:50%;left:50%;transform:translate(-50%,-50%)}.arco-image-preview-close-btn{position:absolute;top:36px;right:36px;display:flex;align-items:center;justify-content:center;width:32px;height:32px;color:var(--color-white);font-size:14px;line-height:32px;text-align:center;background:rgba(0,0,0,.5);border-radius:50%;cursor:pointer}.arco-image-preview-arrow-left,.arco-image-preview-arrow-right{position:absolute;z-index:2;display:flex;align-items:center;justify-content:center;width:32px;height:32px;color:var(--color-white);background-color:#ffffff4d;border-radius:50%;cursor:pointer}.arco-image-preview-arrow-left>svg,.arco-image-preview-arrow-right>svg{color:var(--color-white);font-size:16px}.arco-image-preview-arrow-left:hover,.arco-image-preview-arrow-right:hover{background-color:#ffffff80}.arco-image-preview-arrow-left{top:50%;left:20px;transform:translateY(-50%)}.arco-image-preview-arrow-right{top:50%;right:20px;transform:translateY(-50%)}.arco-image-preview-arrow-disabled{color:#ffffff4d;background-color:#fff3;cursor:not-allowed}.arco-image-preview-arrow-disabled>svg{color:#ffffff4d}.arco-image-preview-arrow-disabled:hover{background-color:#fff3}.image-fade-enter-from,.image-fade-leave-to{opacity:0}.image-fade-enter-to,.image-fade-leave-from{opacity:1}.image-fade-enter-active,.image-fade-leave-active{transition:opacity .4s cubic-bezier(.3,1.3,.3,1)}.arco-input-number{position:relative;box-sizing:border-box;width:100%;border-radius:var(--border-radius-small)}.arco-input-number-step-button{display:flex;align-items:center;justify-content:center;box-sizing:border-box;padding:0;color:var(--color-text-2);background-color:var(--color-fill-2);cursor:pointer;user-select:none;transition:all .1s cubic-bezier(0,0,1,1)}.arco-input-number-step-button:hover{background-color:var(--color-fill-3);border-color:var(--color-fill-3)}.arco-input-number-step-button:active{background-color:var(--color-fill-4);border-color:var(--color-fill-4)}.arco-input-number-step-button:disabled{color:var(--color-text-4);background-color:var(--color-fill-2);cursor:not-allowed}.arco-input-number-step-button:disabled:hover,.arco-input-number-step-button:disabled:active{background-color:var(--color-fill-2);border-color:var(--color-neutral-3)}.arco-input-number-prefix,.arco-input-number-suffix{transition:all .1s cubic-bezier(0,0,1,1)}.arco-input-number-mode-embed .arco-input-number-step{position:absolute;top:4px;right:4px;bottom:4px;width:18px;overflow:hidden;border-radius:1px;opacity:0;transition:all .1s cubic-bezier(0,0,1,1)}.arco-input-number-mode-embed .arco-input-number-step .arco-input-number-step-button{width:100%;height:50%;font-size:10px;border:none;border-color:var(--color-neutral-3)}.arco-input-number-mode-embed .arco-input-suffix{justify-content:flex-end;min-width:6px}.arco-input-number-mode-embed .arco-input-suffix-has-feedback{min-width:32px}.arco-input-number-mode-embed .arco-input-suffix-has-feedback .arco-input-number-step{right:30px}.arco-input-number-mode-embed:not(.arco-input-disabled):not(.arco-input-outer-disabled):hover .arco-input-number-step,.arco-input-number-mode-embed:not(.arco-input-disabled):not(.arco-input-outer-disabled):focus-within .arco-input-number-step{opacity:1}.arco-input-number-mode-embed:not(.arco-input-disabled):not(.arco-input-outer-disabled):hover .arco-input-number-step~.arco-input-suffix,.arco-input-number-mode-embed:not(.arco-input-disabled):not(.arco-input-outer-disabled):focus-within .arco-input-number-step~.arco-input-suffix{opacity:0;pointer-events:none}.arco-input-number-mode-embed.arco-input-wrapper:not(.arco-input-focus) .arco-input-number-step-button:not(.arco-input-number-step-button-disabled):hover{background-color:var(--color-fill-4)}.arco-input-number-mode-button .arco-input-prepend,.arco-input-number-mode-button .arco-input-append{padding:0;border:none}.arco-input-number-mode-button .arco-input-prepend .arco-input-number-step-button{border-right:1px solid transparent;border-top-right-radius:0;border-bottom-right-radius:0}.arco-input-number-mode-button .arco-input-prepend .arco-input-number-step-button:not(.arco-input-number-mode-button .arco-input-prepend .arco-input-number-step-button:active){border-right-color:var(--color-neutral-3)}.arco-input-number-mode-button .arco-input-append .arco-input-number-step-button{border-left:1px solid transparent;border-top-left-radius:0;border-bottom-left-radius:0}.arco-input-number-mode-button .arco-input-append .arco-input-number-step-button:not(.arco-input-number-mode-button .arco-input-append .arco-input-number-step-button:active){border-left-color:var(--color-neutral-3)}.arco-input-tag{display:inline-flex;box-sizing:border-box;width:100%;padding-right:12px;padding-left:12px;color:var(--color-text-1);font-size:14px;background-color:var(--color-fill-2);border:1px solid transparent;border-radius:var(--border-radius-small);cursor:text;transition:color .1s cubic-bezier(0,0,1,1),border-color .1s cubic-bezier(0,0,1,1),background-color .1s cubic-bezier(0,0,1,1)}.arco-input-tag:hover{background-color:var(--color-fill-3);border-color:transparent}.arco-input-tag:focus-within,.arco-input-tag.arco-input-tag-focus{background-color:var(--color-bg-2);border-color:rgb(var(--primary-6));box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-input-tag.arco-input-tag-disabled{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent;cursor:not-allowed}.arco-input-tag.arco-input-tag-disabled:hover{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent}.arco-input-tag.arco-input-tag-disabled .arco-input-tag-prefix,.arco-input-tag.arco-input-tag-disabled .arco-input-tag-suffix{color:inherit}.arco-input-tag.arco-input-tag-error{background-color:var(--color-danger-light-1);border-color:transparent}.arco-input-tag.arco-input-tag-error:hover{background-color:var(--color-danger-light-2);border-color:transparent}.arco-input-tag.arco-input-tag-error:focus-within,.arco-input-tag.arco-input-tag-error.arco-input-tag-focus{background-color:var(--color-bg-2);border-color:rgb(var(--danger-6));box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-input-tag .arco-input-tag-prefix,.arco-input-tag .arco-input-tag-suffix{display:inline-flex;flex-shrink:0;align-items:center;white-space:nowrap;user-select:none}.arco-input-tag .arco-input-tag-prefix>svg,.arco-input-tag .arco-input-tag-suffix>svg{font-size:14px}.arco-input-tag .arco-input-tag-prefix{padding-right:12px;color:var(--color-text-2)}.arco-input-tag .arco-input-tag-suffix{padding-left:12px;color:var(--color-text-2)}.arco-input-tag .arco-input-tag-suffix .arco-feedback-icon{display:inline-flex}.arco-input-tag .arco-input-tag-suffix .arco-feedback-icon-status-validating{color:rgb(var(--primary-6))}.arco-input-tag .arco-input-tag-suffix .arco-feedback-icon-status-success{color:rgb(var(--success-6))}.arco-input-tag .arco-input-tag-suffix .arco-feedback-icon-status-warning{color:rgb(var(--warning-6))}.arco-input-tag .arco-input-tag-suffix .arco-feedback-icon-status-error{color:rgb(var(--danger-6))}.arco-input-tag .arco-input-tag-clear-btn{align-self:center;color:var(--color-text-2);font-size:12px;visibility:hidden;cursor:pointer}.arco-input-tag .arco-input-tag-clear-btn>svg{position:relative;transition:color .1s cubic-bezier(0,0,1,1)}.arco-input-tag:hover .arco-input-tag-clear-btn{visibility:visible}.arco-input-tag:not(.arco-input-tag-focus) .arco-input-tag-icon-hover:hover:before{background-color:var(--color-fill-4)}.arco-input-tag.arco-input-tag-has-tag{padding-right:4px;padding-left:4px}.arco-input-tag.arco-input-tag-has-prefix{padding-left:12px}.arco-input-tag.arco-input-tag-has-suffix{padding-right:12px}.arco-input-tag .arco-input-tag-inner{flex:1;overflow:hidden;line-height:0}.arco-input-tag .arco-input-tag-inner .arco-input-tag-tag{display:inline-flex;align-items:center;margin-right:4px;color:var(--color-text-1);font-size:12px;white-space:pre-wrap;word-break:break-word;background-color:var(--color-bg-2);border-color:var(--color-fill-3)}.arco-input-tag .arco-input-tag-inner .arco-input-tag-tag .arco-icon-hover:hover:before{background-color:var(--color-fill-2)}.arco-input-tag .arco-input-tag-inner .arco-input-tag-tag.arco-tag-custom-color{color:var(--color-white)}.arco-input-tag .arco-input-tag-inner .arco-input-tag-tag.arco-tag-custom-color .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:#fff3}.arco-input-tag .arco-input-tag-inner .arco-input-tag-input{width:100%;padding-right:0;padding-left:0;color:inherit;line-height:1.5715;background:none;border:none;border-radius:0;outline:none;cursor:inherit;-webkit-appearance:none;-webkit-tap-highlight-color:rgba(0,0,0,0);box-sizing:border-box}.arco-input-tag .arco-input-tag-inner .arco-input-tag-input::placeholder{color:var(--color-text-3)}.arco-input-tag .arco-input-tag-inner .arco-input-tag-input[disabled]::placeholder{color:var(--color-text-4)}.arco-input-tag .arco-input-tag-inner .arco-input-tag-input[disabled]{-webkit-text-fill-color:var(--color-text-4)}.arco-input-tag .arco-input-tag-mirror{position:absolute;top:0;left:0;white-space:pre;visibility:hidden;pointer-events:none}.arco-input-tag.arco-input-tag-focus .arco-input-tag-tag{background-color:var(--color-fill-2);border-color:var(--color-fill-2)}.arco-input-tag.arco-input-tag-focus .arco-input-tag-tag .arco-icon-hover:hover:before{background-color:var(--color-fill-3)}.arco-input-tag.arco-input-tag-disabled .arco-input-tag-tag{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:var(--color-fill-3)}.arco-input-tag.arco-input-tag-readonly,.arco-input-tag.arco-input-tag-disabled-input{cursor:default}.arco-input-tag.arco-input-tag-size-mini{font-size:12px}.arco-input-tag.arco-input-tag-size-mini .arco-input-tag-inner{padding-top:0;padding-bottom:0}.arco-input-tag.arco-input-tag-size-mini .arco-input-tag-tag,.arco-input-tag.arco-input-tag-size-mini .arco-input-tag-input{margin-top:1px;margin-bottom:1px;line-height:18px;vertical-align:middle}.arco-input-tag.arco-input-tag-size-mini .arco-input-tag-tag{height:auto;min-height:20px}.arco-input-tag.arco-input-tag-size-mini .arco-input-tag-input{height:20px}.arco-input-tag.arco-input-tag-size-medium{font-size:14px}.arco-input-tag.arco-input-tag-size-medium .arco-input-tag-inner{padding-top:2px;padding-bottom:2px}.arco-input-tag.arco-input-tag-size-medium .arco-input-tag-tag,.arco-input-tag.arco-input-tag-size-medium .arco-input-tag-input{margin-top:1px;margin-bottom:1px;line-height:22px;vertical-align:middle}.arco-input-tag.arco-input-tag-size-medium .arco-input-tag-tag{height:auto;min-height:24px}.arco-input-tag.arco-input-tag-size-medium .arco-input-tag-input{height:24px}.arco-input-tag.arco-input-tag-size-small{font-size:14px}.arco-input-tag.arco-input-tag-size-small .arco-input-tag-inner{padding-top:2px;padding-bottom:2px}.arco-input-tag.arco-input-tag-size-small .arco-input-tag-tag,.arco-input-tag.arco-input-tag-size-small .arco-input-tag-input{margin-top:1px;margin-bottom:1px;line-height:18px;vertical-align:middle}.arco-input-tag.arco-input-tag-size-small .arco-input-tag-tag{height:auto;min-height:20px}.arco-input-tag.arco-input-tag-size-small .arco-input-tag-input{height:20px}.arco-input-tag.arco-input-tag-size-large{font-size:14px}.arco-input-tag.arco-input-tag-size-large .arco-input-tag-inner{padding-top:2px;padding-bottom:2px}.arco-input-tag.arco-input-tag-size-large .arco-input-tag-tag,.arco-input-tag.arco-input-tag-size-large .arco-input-tag-input{margin-top:1px;margin-bottom:1px;line-height:26px;vertical-align:middle}.arco-input-tag.arco-input-tag-size-large .arco-input-tag-tag{height:auto;min-height:28px}.arco-input-tag.arco-input-tag-size-large .arco-input-tag-input{height:28px}.input-tag-zoom-enter-from{transform:scale(.5);opacity:0}.input-tag-zoom-enter-to{transform:scale(1);opacity:1}.input-tag-zoom-enter-active{transition:all .3s cubic-bezier(.34,.69,.1,1)}.input-tag-zoom-leave-from{transform:scale(1);opacity:1}.input-tag-zoom-leave-to{transform:scale(.5);opacity:0}.input-tag-zoom-leave-active{position:absolute;transition:all .3s cubic-bezier(.3,1.3,.3,1)}.input-tag-zoom-move{transition:all .3s cubic-bezier(.3,1.3,.3,1)}.arco-input-wrapper{display:inline-flex;box-sizing:border-box;width:100%;padding-right:12px;padding-left:12px;color:var(--color-text-1);font-size:14px;background-color:var(--color-fill-2);border:1px solid transparent;border-radius:var(--border-radius-small);cursor:text;transition:color .1s cubic-bezier(0,0,1,1),border-color .1s cubic-bezier(0,0,1,1),background-color .1s cubic-bezier(0,0,1,1)}.arco-input-wrapper:hover{background-color:var(--color-fill-3);border-color:transparent}.arco-input-wrapper:focus-within,.arco-input-wrapper.arco-input-focus{background-color:var(--color-bg-2);border-color:rgb(var(--primary-6));box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-input-wrapper.arco-input-disabled{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent;cursor:not-allowed}.arco-input-wrapper.arco-input-disabled:hover{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent}.arco-input-wrapper.arco-input-disabled .arco-input-prefix,.arco-input-wrapper.arco-input-disabled .arco-input-suffix{color:inherit}.arco-input-wrapper.arco-input-error{background-color:var(--color-danger-light-1);border-color:transparent}.arco-input-wrapper.arco-input-error:hover{background-color:var(--color-danger-light-2);border-color:transparent}.arco-input-wrapper.arco-input-error:focus-within,.arco-input-wrapper.arco-input-error.arco-input-wrapper-focus{background-color:var(--color-bg-2);border-color:rgb(var(--danger-6));box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-input-wrapper .arco-input-prefix,.arco-input-wrapper .arco-input-suffix{display:inline-flex;flex-shrink:0;align-items:center;white-space:nowrap;user-select:none}.arco-input-wrapper .arco-input-prefix>svg,.arco-input-wrapper .arco-input-suffix>svg{font-size:14px}.arco-input-wrapper .arco-input-prefix{padding-right:12px;color:var(--color-text-2)}.arco-input-wrapper .arco-input-suffix{padding-left:12px;color:var(--color-text-2)}.arco-input-wrapper .arco-input-suffix .arco-feedback-icon{display:inline-flex}.arco-input-wrapper .arco-input-suffix .arco-feedback-icon-status-validating{color:rgb(var(--primary-6))}.arco-input-wrapper .arco-input-suffix .arco-feedback-icon-status-success{color:rgb(var(--success-6))}.arco-input-wrapper .arco-input-suffix .arco-feedback-icon-status-warning{color:rgb(var(--warning-6))}.arco-input-wrapper .arco-input-suffix .arco-feedback-icon-status-error{color:rgb(var(--danger-6))}.arco-input-wrapper .arco-input-clear-btn{align-self:center;color:var(--color-text-2);font-size:12px;visibility:hidden;cursor:pointer}.arco-input-wrapper .arco-input-clear-btn>svg{position:relative;transition:color .1s cubic-bezier(0,0,1,1)}.arco-input-wrapper:hover .arco-input-clear-btn{visibility:visible}.arco-input-wrapper:not(.arco-input-focus) .arco-input-icon-hover:hover:before{background-color:var(--color-fill-4)}.arco-input-wrapper .arco-input{width:100%;padding-right:0;padding-left:0;color:inherit;line-height:1.5715;background:none;border:none;border-radius:0;outline:none;cursor:inherit;-webkit-appearance:none;-webkit-tap-highlight-color:rgba(0,0,0,0)}.arco-input-wrapper .arco-input::placeholder{color:var(--color-text-3)}.arco-input-wrapper .arco-input[disabled]::placeholder{color:var(--color-text-4)}.arco-input-wrapper .arco-input[disabled]{-webkit-text-fill-color:var(--color-text-4)}.arco-input-wrapper .arco-input.arco-input-size-mini{padding-top:1px;padding-bottom:1px;font-size:12px;line-height:1.667}.arco-input-wrapper .arco-input.arco-input-size-small{padding-top:2px;padding-bottom:2px;font-size:14px;line-height:1.5715}.arco-input-wrapper .arco-input.arco-input-size-medium{padding-top:4px;padding-bottom:4px;font-size:14px;line-height:1.5715}.arco-input-wrapper .arco-input.arco-input-size-large{padding-top:6px;padding-bottom:6px;font-size:14px;line-height:1.5715}.arco-input-wrapper .arco-input-word-limit{color:var(--color-text-3);font-size:12px}.arco-input-outer{display:inline-flex;width:100%}.arco-input-outer>.arco-input-wrapper{border-radius:0}.arco-input-outer>:first-child{border-top-left-radius:var(--border-radius-small);border-bottom-left-radius:var(--border-radius-small)}.arco-input-outer>:last-child{border-top-right-radius:var(--border-radius-small);border-bottom-right-radius:var(--border-radius-small)}.arco-input-outer.arco-input-outer-size-mini .arco-input-outer,.arco-input-outer.arco-input-outer-size-mini .arco-input-wrapper .arco-input-prefix,.arco-input-outer.arco-input-outer-size-mini .arco-input-wrapper .arco-input-suffix{font-size:12px}.arco-input-outer.arco-input-outer-size-mini .arco-input-wrapper .arco-input-prefix>svg,.arco-input-outer.arco-input-outer-size-mini .arco-input-wrapper .arco-input-suffix>svg{font-size:12px}.arco-input-outer.arco-input-outer-size-mini .arco-input-prepend,.arco-input-outer.arco-input-outer-size-mini .arco-input-append{font-size:12px}.arco-input-outer.arco-input-outer-size-mini .arco-input-prepend>svg,.arco-input-outer.arco-input-outer-size-mini .arco-input-append>svg{font-size:12px}.arco-input-outer.arco-input-outer-size-mini .arco-input-prepend .arco-input{width:auto;height:100%;margin:-1px -13px -1px -12px;border-color:transparent;border-top-left-radius:0;border-bottom-left-radius:0}.arco-input-outer.arco-input-outer-size-mini .arco-input-prepend .arco-select{width:auto;height:100%;margin:-1px -13px -1px -12px}.arco-input-outer.arco-input-outer-size-mini .arco-input-prepend .arco-select .arco-select-view{background-color:inherit;border-color:transparent;border-radius:0}.arco-input-outer.arco-input-outer-size-mini .arco-input-prepend .arco-select.arco-select-single .arco-select-view{height:100%}.arco-input-outer.arco-input-outer-size-mini .arco-input-append .arco-input{width:auto;height:100%;margin:-1px -12px -1px -13px;border-color:transparent;border-top-right-radius:0;border-bottom-right-radius:0}.arco-input-outer.arco-input-outer-size-mini .arco-input-append .arco-select{width:auto;height:100%;margin:-1px -12px -1px -13px}.arco-input-outer.arco-input-outer-size-mini .arco-input-append .arco-select .arco-select-view{background-color:inherit;border-color:transparent;border-radius:0}.arco-input-outer.arco-input-outer-size-mini .arco-input-append .arco-select.arco-select-single .arco-select-view{height:100%}.arco-input-outer.arco-input-outer-size-small .arco-input-outer,.arco-input-outer.arco-input-outer-size-small .arco-input-wrapper .arco-input-prefix,.arco-input-outer.arco-input-outer-size-small .arco-input-wrapper .arco-input-suffix{font-size:14px}.arco-input-outer.arco-input-outer-size-small .arco-input-wrapper .arco-input-prefix>svg,.arco-input-outer.arco-input-outer-size-small .arco-input-wrapper .arco-input-suffix>svg{font-size:14px}.arco-input-outer.arco-input-outer-size-small .arco-input-prepend,.arco-input-outer.arco-input-outer-size-small .arco-input-append{font-size:14px}.arco-input-outer.arco-input-outer-size-small .arco-input-prepend>svg,.arco-input-outer.arco-input-outer-size-small .arco-input-append>svg{font-size:14px}.arco-input-outer.arco-input-outer-size-small .arco-input-prepend .arco-input{width:auto;height:100%;margin:-1px -13px -1px -12px;border-color:transparent;border-top-left-radius:0;border-bottom-left-radius:0}.arco-input-outer.arco-input-outer-size-small .arco-input-prepend .arco-select{width:auto;height:100%;margin:-1px -13px -1px -12px}.arco-input-outer.arco-input-outer-size-small .arco-input-prepend .arco-select .arco-select-view{background-color:inherit;border-color:transparent;border-radius:0}.arco-input-outer.arco-input-outer-size-small .arco-input-prepend .arco-select.arco-select-single .arco-select-view{height:100%}.arco-input-outer.arco-input-outer-size-small .arco-input-append .arco-input{width:auto;height:100%;margin:-1px -12px -1px -13px;border-color:transparent;border-top-right-radius:0;border-bottom-right-radius:0}.arco-input-outer.arco-input-outer-size-small .arco-input-append .arco-select{width:auto;height:100%;margin:-1px -12px -1px -13px}.arco-input-outer.arco-input-outer-size-small .arco-input-append .arco-select .arco-select-view{background-color:inherit;border-color:transparent;border-radius:0}.arco-input-outer.arco-input-outer-size-small .arco-input-append .arco-select.arco-select-single .arco-select-view{height:100%}.arco-input-outer.arco-input-outer-size-large .arco-input-outer,.arco-input-outer.arco-input-outer-size-large .arco-input-wrapper .arco-input-prefix,.arco-input-outer.arco-input-outer-size-large .arco-input-wrapper .arco-input-suffix{font-size:14px}.arco-input-outer.arco-input-outer-size-large .arco-input-wrapper .arco-input-prefix>svg,.arco-input-outer.arco-input-outer-size-large .arco-input-wrapper .arco-input-suffix>svg{font-size:14px}.arco-input-outer.arco-input-outer-size-large .arco-input-prepend,.arco-input-outer.arco-input-outer-size-large .arco-input-append{font-size:14px}.arco-input-outer.arco-input-outer-size-large .arco-input-prepend>svg,.arco-input-outer.arco-input-outer-size-large .arco-input-append>svg{font-size:14px}.arco-input-outer.arco-input-outer-size-large .arco-input-prepend .arco-input{width:auto;height:100%;margin:-1px -13px -1px -12px;border-color:transparent;border-top-left-radius:0;border-bottom-left-radius:0}.arco-input-outer.arco-input-outer-size-large .arco-input-prepend .arco-select{width:auto;height:100%;margin:-1px -13px -1px -12px}.arco-input-outer.arco-input-outer-size-large .arco-input-prepend .arco-select .arco-select-view{background-color:inherit;border-color:transparent;border-radius:0}.arco-input-outer.arco-input-outer-size-large .arco-input-prepend .arco-select.arco-select-single .arco-select-view{height:100%}.arco-input-outer.arco-input-outer-size-large .arco-input-append .arco-input{width:auto;height:100%;margin:-1px -12px -1px -13px;border-color:transparent;border-top-right-radius:0;border-bottom-right-radius:0}.arco-input-outer.arco-input-outer-size-large .arco-input-append .arco-select{width:auto;height:100%;margin:-1px -12px -1px -13px}.arco-input-outer.arco-input-outer-size-large .arco-input-append .arco-select .arco-select-view{background-color:inherit;border-color:transparent;border-radius:0}.arco-input-outer.arco-input-outer-size-large .arco-input-append .arco-select.arco-select-single .arco-select-view{height:100%}.arco-input-outer-disabled{cursor:not-allowed}.arco-input-prepend,.arco-input-append{display:inline-flex;flex-shrink:0;align-items:center;box-sizing:border-box;padding:0 12px;color:var(--color-text-1);white-space:nowrap;background-color:var(--color-fill-2);border:1px solid transparent}.arco-input-prepend>svg,.arco-input-append>svg{font-size:14px}.arco-input-prepend{border-right:1px solid var(--color-neutral-3)}.arco-input-prepend .arco-input{width:auto;height:100%;margin:-1px -12px -1px -13px;border-color:transparent;border-top-right-radius:0;border-bottom-right-radius:0}.arco-input-prepend .arco-select{width:auto;height:100%;margin:-1px -12px -1px -13px}.arco-input-prepend .arco-select .arco-select-view{background-color:inherit;border-color:transparent;border-radius:0}.arco-input-prepend .arco-select.arco-select-single .arco-select-view{height:100%}.arco-input-append{border-left:1px solid var(--color-neutral-3)}.arco-input-append .arco-input{width:auto;height:100%;margin:-1px -13px -1px -12px;border-color:transparent;border-top-left-radius:0;border-bottom-left-radius:0}.arco-input-append .arco-select{width:auto;height:100%;margin:-1px -13px -1px -12px}.arco-input-append .arco-select .arco-select-view{background-color:inherit;border-color:transparent;border-radius:0}.arco-input-append .arco-select.arco-select-single .arco-select-view{height:100%}.arco-input-group{display:inline-flex;align-items:center}.arco-input-group>*{border-radius:0}.arco-input-group>*.arco-input-outer>:last-child,.arco-input-group>*.arco-input-outer>:first-child{border-radius:0}.arco-input-group>*:not(:last-child){position:relative;box-sizing:border-box}.arco-input-group>*:first-child,.arco-input-group>*:first-child .arco-input-group>*:first-child{border-top-left-radius:var(--border-radius-small);border-bottom-left-radius:var(--border-radius-small)}.arco-input-group>*:first-child .arco-select-view,.arco-input-group>*:first-child .arco-input-group>*:first-child .arco-select-view{border-top-left-radius:var(--border-radius-small);border-bottom-left-radius:var(--border-radius-small)}.arco-input-group>*:last-child,.arco-input-group>*:last-child .arco-input-outer>*:last-child{border-top-right-radius:var(--border-radius-small);border-bottom-right-radius:var(--border-radius-small)}.arco-input-group>*:last-child .arco-select-view,.arco-input-group>*:last-child .arco-input-outer>*:last-child .arco-select-view{border-top-right-radius:var(--border-radius-small);border-bottom-right-radius:var(--border-radius-small)}.arco-input-group>.arco-input-wrapper:not(:last-child),.arco-input-group>.arco-input-outer:not(:last-child),.arco-input-group>.arco-input-tag:not(:last-child),.arco-input-group>.arco-select-view:not(:last-child){border-right:1px solid var(--color-neutral-3)}.arco-input-group>.arco-input-wrapper:not(:last-child):focus-within,.arco-input-group>.arco-input-outer:not(:last-child):focus-within,.arco-input-group>.arco-input-tag:not(:last-child):focus-within,.arco-input-group>.arco-select-view:not(:last-child):focus-within{border-right-color:rgb(var(--primary-6))}.size-height-size-mini{padding-top:1px;padding-bottom:1px;font-size:12px;line-height:1.667}.size-height-size-small{padding-top:2px;padding-bottom:2px;font-size:14px}.size-height-size-large{padding-top:6px;padding-bottom:6px;font-size:14px}.arco-textarea-wrapper{position:relative;display:inline-block;width:100%}.arco-textarea-clear-wrapper:hover .arco-textarea-clear-icon{display:inline-block}.arco-textarea-clear-wrapper .arco-textarea{padding-right:20px}.arco-textarea-word-limit{position:absolute;right:10px;bottom:6px;color:var(--color-text-3);font-size:12px;user-select:none}.arco-textarea-clear-icon{position:absolute;top:10px;right:10px;display:none;font-size:12px}.arco-input-search .arco-input-append{padding:0;border:none}.arco-input-search .arco-input-suffix{color:var(--color-text-2);font-size:14px}.arco-input-search .arco-input-search-btn{border-top-left-radius:0;border-bottom-left-radius:0}.arco-input-wrapper.arco-input-password:not(.arco-input-disabled) .arco-input-suffix{color:var(--color-text-2);font-size:12px;cursor:pointer}.arco-layout{display:flex;flex:1;flex-direction:column;margin:0;padding:0}.arco-layout-sider{position:relative;flex:none;width:auto;margin:0;padding:0;background:var(--color-menu-dark-bg);transition:width .2s cubic-bezier(.34,.69,.1,1)}.arco-layout-sider-children{height:100%;overflow:auto}.arco-layout-sider-collapsed .arco-layout-sider-children::-webkit-scrollbar{width:0}.arco-layout-sider-has-trigger{box-sizing:border-box;padding-bottom:48px}.arco-layout-sider-trigger{z-index:1;display:flex;align-items:center;justify-content:center;box-sizing:border-box;width:100%;height:48px;color:var(--color-white);background:rgba(255,255,255,.2);cursor:pointer;transition:width .2s cubic-bezier(.34,.69,.1,1)}.arco-layout-sider-trigger-light{color:var(--color-text-1);background:var(--color-menu-light-bg);border-top:1px solid var(--color-bg-5)}.arco-layout-sider-light{background:var(--color-menu-light-bg);box-shadow:0 2px 5px #00000014}.arco-layout-header{flex:0 0 auto;box-sizing:border-box;margin:0}.arco-layout-content{flex:1}.arco-layout-footer{flex:0 0 auto;margin:0}.arco-layout-has-sider{flex-direction:row}.arco-layout-has-sider>.arco-layout,.arco-layout-has-sider>.arco-layout-content{overflow-x:hidden}.arco-link{display:inline-flex;align-items:center;justify-content:center;padding:1px 4px;color:rgb(var(--link-6));font-size:14px;line-height:1.5715;text-decoration:none;background-color:transparent;border-radius:var(--border-radius-small);cursor:pointer;transition:all .1s cubic-bezier(0,0,1,1)}.arco-link:hover{color:rgb(var(--link-6));background-color:var(--color-fill-2)}.arco-link:active{color:rgb(var(--link-6));background-color:var(--color-fill-3);transition:none}.arco-link.arco-link-hoverless{display:inline;padding:0;background-color:unset}.arco-link.arco-link-hoverless:active,.arco-link.arco-link-hoverless:hover{background-color:unset}.arco-link.arco-link-disabled{color:var(--color-link-light-3);background:none;cursor:not-allowed}.arco-link.arco-link-loading{color:var(--color-link-light-3);background:none;cursor:default}.arco-link-status-success,.arco-link-status-success:hover,.arco-link-status-success:active{color:rgb(var(--success-6))}.arco-link-status-success.arco-link-disabled,.arco-link-status-success.arco-link-loading{color:var(--color-success-light-3)}.arco-link-status-danger,.arco-link-status-danger:hover,.arco-link-status-danger:active{color:rgb(var(--danger-6))}.arco-link-status-danger.arco-link-disabled,.arco-link-status-danger.arco-link-loading{color:var(--color-danger-light-3)}.arco-link-status-warning,.arco-link-status-warning:hover,.arco-link-status-warning:active{color:rgb(var(--warning-6))}.arco-link-status-warning.arco-link-disabled,.arco-link-status-warning.arco-link-loading{color:var(--color-warning-light-2)}.arco-link-icon{margin-right:6px;font-size:12px;vertical-align:middle}.arco-list{display:flex;flex-direction:column;box-sizing:border-box;width:100%;overflow-y:auto;color:var(--color-text-1);font-size:14px;line-height:1.5715;border-radius:var(--border-radius-medium)}.arco-list-wrapper{overflow:hidden}.arco-list-wrapper .arco-list-spin{display:block;height:100%;overflow:hidden}.arco-list-content{overflow:hidden}.arco-list-small .arco-list-content-wrapper .arco-list-header{padding:8px 20px}.arco-list-small .arco-list-content-wrapper .arco-list-footer,.arco-list-small .arco-list-content-wrapper .arco-list-content>.arco-list-item,.arco-list-small .arco-list-content-wrapper .arco-list-content .arco-list-col>.arco-list-item,.arco-list-small .arco-list-content-wrapper .arco-list-content.arco-list-virtual .arco-list-item{padding:9px 20px}.arco-list-medium .arco-list-content-wrapper .arco-list-header{padding:12px 20px}.arco-list-medium .arco-list-content-wrapper .arco-list-footer,.arco-list-medium .arco-list-content-wrapper .arco-list-content>.arco-list-item,.arco-list-medium .arco-list-content-wrapper .arco-list-content .arco-list-col>.arco-list-item,.arco-list-medium .arco-list-content-wrapper .arco-list-content.arco-list-virtual .arco-list-item{padding:13px 20px}.arco-list-large .arco-list-content-wrapper .arco-list-header{padding:16px 20px}.arco-list-large .arco-list-content-wrapper .arco-list-footer,.arco-list-large .arco-list-content-wrapper .arco-list-content>.arco-list-item,.arco-list-large .arco-list-content-wrapper .arco-list-content .arco-list-col>.arco-list-item,.arco-list-large .arco-list-content-wrapper .arco-list-content.arco-list-virtual .arco-list-item{padding:17px 20px}.arco-list-bordered{border:1px solid var(--color-neutral-3)}.arco-list-split .arco-list-header,.arco-list-split .arco-list-item:not(:last-child){border-bottom:1px solid var(--color-neutral-3)}.arco-list-split .arco-list-footer{border-top:1px solid var(--color-neutral-3)}.arco-list-header{color:var(--color-text-1);font-weight:500;font-size:16px;line-height:1.5}.arco-list-item{display:flex;justify-content:space-between;box-sizing:border-box;width:100%;overflow:hidden}.arco-list-item-main{flex:1}.arco-list-item-main .arco-list-item-action:not(:first-child){margin-top:4px}.arco-list-item-meta{display:flex;align-items:center;padding:4px 0}.arco-list-item-meta-avatar{display:flex}.arco-list-item-meta-avatar:not(:last-child){margin-right:16px}.arco-list-item-meta-title{color:var(--color-text-1);font-weight:500}.arco-list-item-meta-title:not(:last-child){margin-bottom:2px}.arco-list-item-meta-description{color:var(--color-text-2)}.arco-list-item-action{display:flex;flex-wrap:nowrap;align-self:center;margin:0;padding:0;list-style:none}.arco-list-item-action>li{display:inline-block;cursor:pointer}.arco-list-item-action>li:not(:last-child){margin-right:20px}.arco-list-hover .arco-list-item:hover{background-color:var(--color-fill-1)}.arco-list-pagination{float:right;margin-top:24px}.arco-list-pagination:after{display:block;clear:both;height:0;overflow:hidden;visibility:hidden;content:""}.arco-list-scroll-loading{display:flex;align-items:center;justify-content:center}.arco-list-content{flex:auto}.arco-list-content .arco-empty{display:flex;align-items:center;justify-content:center;height:100%}.arco-mention{position:relative;display:inline-block;box-sizing:border-box;width:100%}.arco-mention-measure{position:absolute;top:0;right:0;bottom:0;left:0;overflow:auto;visibility:hidden;pointer-events:none}.arco-menu{position:relative;box-sizing:border-box;width:100%;font-size:14px;line-height:1.5715;transition:width .2s cubic-bezier(.34,.69,.1,1)}.arco-menu:focus-visible{outline:3px solid var(--color-primary-light-2)}.arco-menu-indent{display:inline-block;width:20px}.arco-menu .arco-menu-item,.arco-menu .arco-menu-group-title,.arco-menu .arco-menu-pop-header,.arco-menu .arco-menu-inline-header{position:relative;box-sizing:border-box;border-radius:var(--border-radius-small);cursor:pointer}.arco-menu .arco-menu-item.arco-menu-disabled,.arco-menu .arco-menu-group-title.arco-menu-disabled,.arco-menu .arco-menu-pop-header.arco-menu-disabled,.arco-menu .arco-menu-inline-header.arco-menu-disabled{cursor:not-allowed}.arco-menu .arco-menu-item.arco-menu-selected,.arco-menu .arco-menu-group-title.arco-menu-selected,.arco-menu .arco-menu-pop-header.arco-menu-selected,.arco-menu .arco-menu-inline-header.arco-menu-selected{font-weight:500;transition:color .2s cubic-bezier(0,0,1,1)}.arco-menu .arco-menu-item .arco-icon,.arco-menu .arco-menu-group-title .arco-icon,.arco-menu .arco-menu-pop-header .arco-icon,.arco-menu .arco-menu-inline-header .arco-icon,.arco-menu .arco-menu-item .arco-menu-icon,.arco-menu .arco-menu-group-title .arco-menu-icon,.arco-menu .arco-menu-pop-header .arco-menu-icon,.arco-menu .arco-menu-inline-header .arco-menu-icon{margin-right:16px}.arco-menu .arco-menu-item .arco-menu-icon .arco-icon,.arco-menu .arco-menu-group-title .arco-menu-icon .arco-icon,.arco-menu .arco-menu-pop-header .arco-menu-icon .arco-icon,.arco-menu .arco-menu-inline-header .arco-menu-icon .arco-icon{margin-right:0}.arco-menu-light{background-color:var(--color-menu-light-bg)}.arco-menu-light .arco-menu-item,.arco-menu-light .arco-menu-group-title,.arco-menu-light .arco-menu-pop-header,.arco-menu-light .arco-menu-inline-header{color:var(--color-text-2);background-color:var(--color-menu-light-bg)}.arco-menu-light .arco-menu-item .arco-icon,.arco-menu-light .arco-menu-group-title .arco-icon,.arco-menu-light .arco-menu-pop-header .arco-icon,.arco-menu-light .arco-menu-inline-header .arco-icon,.arco-menu-light .arco-menu-item .arco-menu-icon,.arco-menu-light .arco-menu-group-title .arco-menu-icon,.arco-menu-light .arco-menu-pop-header .arco-menu-icon,.arco-menu-light .arco-menu-inline-header .arco-menu-icon{color:var(--color-text-3)}.arco-menu-light .arco-menu-item:hover,.arco-menu-light .arco-menu-group-title:hover,.arco-menu-light .arco-menu-pop-header:hover,.arco-menu-light .arco-menu-inline-header:hover{color:var(--color-text-2);background-color:var(--color-fill-2)}.arco-menu-light .arco-menu-item:hover .arco-icon,.arco-menu-light .arco-menu-group-title:hover .arco-icon,.arco-menu-light .arco-menu-pop-header:hover .arco-icon,.arco-menu-light .arco-menu-inline-header:hover .arco-icon,.arco-menu-light .arco-menu-item:hover .arco-menu-icon,.arco-menu-light .arco-menu-group-title:hover .arco-menu-icon,.arco-menu-light .arco-menu-pop-header:hover .arco-menu-icon,.arco-menu-light .arco-menu-inline-header:hover .arco-menu-icon{color:var(--color-text-3)}.arco-menu-light .arco-menu-item.arco-menu-selected,.arco-menu-light .arco-menu-group-title.arco-menu-selected,.arco-menu-light .arco-menu-pop-header.arco-menu-selected,.arco-menu-light .arco-menu-inline-header.arco-menu-selected,.arco-menu-light .arco-menu-item.arco-menu-selected .arco-icon,.arco-menu-light .arco-menu-group-title.arco-menu-selected .arco-icon,.arco-menu-light .arco-menu-pop-header.arco-menu-selected .arco-icon,.arco-menu-light .arco-menu-inline-header.arco-menu-selected .arco-icon,.arco-menu-light .arco-menu-item.arco-menu-selected .arco-menu-icon,.arco-menu-light .arco-menu-group-title.arco-menu-selected .arco-menu-icon,.arco-menu-light .arco-menu-pop-header.arco-menu-selected .arco-menu-icon,.arco-menu-light .arco-menu-inline-header.arco-menu-selected .arco-menu-icon{color:rgb(var(--primary-6))}.arco-menu-light .arco-menu-item.arco-menu-disabled,.arco-menu-light .arco-menu-group-title.arco-menu-disabled,.arco-menu-light .arco-menu-pop-header.arco-menu-disabled,.arco-menu-light .arco-menu-inline-header.arco-menu-disabled{color:var(--color-text-4);background-color:var(--color-menu-light-bg)}.arco-menu-light .arco-menu-item.arco-menu-disabled .arco-icon,.arco-menu-light .arco-menu-group-title.arco-menu-disabled .arco-icon,.arco-menu-light .arco-menu-pop-header.arco-menu-disabled .arco-icon,.arco-menu-light .arco-menu-inline-header.arco-menu-disabled .arco-icon,.arco-menu-light .arco-menu-item.arco-menu-disabled .arco-menu-icon,.arco-menu-light .arco-menu-group-title.arco-menu-disabled .arco-menu-icon,.arco-menu-light .arco-menu-pop-header.arco-menu-disabled .arco-menu-icon,.arco-menu-light .arco-menu-inline-header.arco-menu-disabled .arco-menu-icon{color:var(--color-text-4)}.arco-menu-light .arco-menu-item.arco-menu-selected{background-color:var(--color-fill-2)}.arco-menu-light .arco-menu-inline-header.arco-menu-selected,.arco-menu-light .arco-menu-inline-header.arco-menu-selected .arco-icon,.arco-menu-light .arco-menu-inline-header.arco-menu-selected .arco-menu-icon{color:rgb(var(--primary-6))}.arco-menu-light .arco-menu-inline-header.arco-menu-selected:hover{background-color:var(--color-fill-2)}.arco-menu-light.arco-menu-horizontal .arco-menu-item.arco-menu-selected,.arco-menu-light.arco-menu-horizontal .arco-menu-group-title.arco-menu-selected,.arco-menu-light.arco-menu-horizontal .arco-menu-pop-header.arco-menu-selected,.arco-menu-light.arco-menu-horizontal .arco-menu-inline-header.arco-menu-selected{background:none;transition:color .2s cubic-bezier(0,0,1,1)}.arco-menu-light.arco-menu-horizontal .arco-menu-item.arco-menu-selected:hover,.arco-menu-light.arco-menu-horizontal .arco-menu-group-title.arco-menu-selected:hover,.arco-menu-light.arco-menu-horizontal .arco-menu-pop-header.arco-menu-selected:hover,.arco-menu-light.arco-menu-horizontal .arco-menu-inline-header.arco-menu-selected:hover{background-color:var(--color-fill-2)}.arco-menu-light .arco-menu-group-title{color:var(--color-text-3);pointer-events:none}.arco-menu-light .arco-menu-collapse-button{color:var(--color-text-3);background-color:var(--color-fill-1)}.arco-menu-light .arco-menu-collapse-button:hover{background-color:var(--color-fill-3)}.arco-menu-dark{background-color:var(--color-menu-dark-bg)}.arco-menu-dark .arco-menu-item,.arco-menu-dark .arco-menu-group-title,.arco-menu-dark .arco-menu-pop-header,.arco-menu-dark .arco-menu-inline-header{color:var(--color-text-4);background-color:var(--color-menu-dark-bg)}.arco-menu-dark .arco-menu-item .arco-icon,.arco-menu-dark .arco-menu-group-title .arco-icon,.arco-menu-dark .arco-menu-pop-header .arco-icon,.arco-menu-dark .arco-menu-inline-header .arco-icon,.arco-menu-dark .arco-menu-item .arco-menu-icon,.arco-menu-dark .arco-menu-group-title .arco-menu-icon,.arco-menu-dark .arco-menu-pop-header .arco-menu-icon,.arco-menu-dark .arco-menu-inline-header .arco-menu-icon{color:var(--color-text-3)}.arco-menu-dark .arco-menu-item:hover,.arco-menu-dark .arco-menu-group-title:hover,.arco-menu-dark .arco-menu-pop-header:hover,.arco-menu-dark .arco-menu-inline-header:hover{color:var(--color-text-4);background-color:var(--color-menu-dark-hover)}.arco-menu-dark .arco-menu-item:hover .arco-icon,.arco-menu-dark .arco-menu-group-title:hover .arco-icon,.arco-menu-dark .arco-menu-pop-header:hover .arco-icon,.arco-menu-dark .arco-menu-inline-header:hover .arco-icon,.arco-menu-dark .arco-menu-item:hover .arco-menu-icon,.arco-menu-dark .arco-menu-group-title:hover .arco-menu-icon,.arco-menu-dark .arco-menu-pop-header:hover .arco-menu-icon,.arco-menu-dark .arco-menu-inline-header:hover .arco-menu-icon{color:var(--color-text-3)}.arco-menu-dark .arco-menu-item.arco-menu-selected,.arco-menu-dark .arco-menu-group-title.arco-menu-selected,.arco-menu-dark .arco-menu-pop-header.arco-menu-selected,.arco-menu-dark .arco-menu-inline-header.arco-menu-selected,.arco-menu-dark .arco-menu-item.arco-menu-selected .arco-icon,.arco-menu-dark .arco-menu-group-title.arco-menu-selected .arco-icon,.arco-menu-dark .arco-menu-pop-header.arco-menu-selected .arco-icon,.arco-menu-dark .arco-menu-inline-header.arco-menu-selected .arco-icon,.arco-menu-dark .arco-menu-item.arco-menu-selected .arco-menu-icon,.arco-menu-dark .arco-menu-group-title.arco-menu-selected .arco-menu-icon,.arco-menu-dark .arco-menu-pop-header.arco-menu-selected .arco-menu-icon,.arco-menu-dark .arco-menu-inline-header.arco-menu-selected .arco-menu-icon{color:var(--color-white)}.arco-menu-dark .arco-menu-item.arco-menu-disabled,.arco-menu-dark .arco-menu-group-title.arco-menu-disabled,.arco-menu-dark .arco-menu-pop-header.arco-menu-disabled,.arco-menu-dark .arco-menu-inline-header.arco-menu-disabled{color:var(--color-text-2);background-color:var(--color-menu-dark-bg)}.arco-menu-dark .arco-menu-item.arco-menu-disabled .arco-icon,.arco-menu-dark .arco-menu-group-title.arco-menu-disabled .arco-icon,.arco-menu-dark .arco-menu-pop-header.arco-menu-disabled .arco-icon,.arco-menu-dark .arco-menu-inline-header.arco-menu-disabled .arco-icon,.arco-menu-dark .arco-menu-item.arco-menu-disabled .arco-menu-icon,.arco-menu-dark .arco-menu-group-title.arco-menu-disabled .arco-menu-icon,.arco-menu-dark .arco-menu-pop-header.arco-menu-disabled .arco-menu-icon,.arco-menu-dark .arco-menu-inline-header.arco-menu-disabled .arco-menu-icon{color:var(--color-text-2)}.arco-menu-dark .arco-menu-item.arco-menu-selected{background-color:var(--color-menu-dark-hover)}.arco-menu-dark .arco-menu-inline-header.arco-menu-selected,.arco-menu-dark .arco-menu-inline-header.arco-menu-selected .arco-icon,.arco-menu-dark .arco-menu-inline-header.arco-menu-selected .arco-menu-icon{color:rgb(var(--primary-6))}.arco-menu-dark .arco-menu-inline-header.arco-menu-selected:hover{background-color:var(--color-menu-dark-hover)}.arco-menu-dark.arco-menu-horizontal .arco-menu-item.arco-menu-selected,.arco-menu-dark.arco-menu-horizontal .arco-menu-group-title.arco-menu-selected,.arco-menu-dark.arco-menu-horizontal .arco-menu-pop-header.arco-menu-selected,.arco-menu-dark.arco-menu-horizontal .arco-menu-inline-header.arco-menu-selected{background:none;transition:color .2s cubic-bezier(0,0,1,1)}.arco-menu-dark.arco-menu-horizontal .arco-menu-item.arco-menu-selected:hover,.arco-menu-dark.arco-menu-horizontal .arco-menu-group-title.arco-menu-selected:hover,.arco-menu-dark.arco-menu-horizontal .arco-menu-pop-header.arco-menu-selected:hover,.arco-menu-dark.arco-menu-horizontal .arco-menu-inline-header.arco-menu-selected:hover{background-color:var(--color-menu-dark-hover)}.arco-menu-dark .arco-menu-group-title{color:var(--color-text-3);pointer-events:none}.arco-menu-dark .arco-menu-collapse-button{color:var(--color-white);background-color:rgb(var(--primary-6))}.arco-menu-dark .arco-menu-collapse-button:hover{background-color:rgb(var(--primary-7))}.arco-menu a,.arco-menu a:hover,.arco-menu a:focus,.arco-menu a:active{color:inherit;text-decoration:none;cursor:inherit}.arco-menu-inner{box-sizing:border-box;width:100%;height:100%;overflow:auto}.arco-menu-icon-suffix.is-open{transform:rotate(180deg)}.arco-menu-vertical .arco-menu-item,.arco-menu-vertical .arco-menu-group-title,.arco-menu-vertical .arco-menu-pop-header,.arco-menu-vertical .arco-menu-inline-header{padding:0 12px;line-height:40px}.arco-menu-vertical .arco-menu-item .arco-menu-icon-suffix .arco-icon,.arco-menu-vertical .arco-menu-group-title .arco-menu-icon-suffix .arco-icon,.arco-menu-vertical .arco-menu-pop-header .arco-menu-icon-suffix .arco-icon,.arco-menu-vertical .arco-menu-inline-header .arco-menu-icon-suffix .arco-icon{margin-right:0}.arco-menu-vertical .arco-menu-item,.arco-menu-vertical .arco-menu-group-title,.arco-menu-vertical .arco-menu-pop-header,.arco-menu-vertical .arco-menu-inline-header{margin-bottom:4px}.arco-menu-vertical .arco-menu-item:not(.arco-menu-has-icon),.arco-menu-vertical .arco-menu-group-title:not(.arco-menu-has-icon),.arco-menu-vertical .arco-menu-pop-header:not(.arco-menu-has-icon),.arco-menu-vertical .arco-menu-inline-header:not(.arco-menu-has-icon){overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-menu-vertical .arco-menu-item.arco-menu-has-icon,.arco-menu-vertical .arco-menu-group-title.arco-menu-has-icon,.arco-menu-vertical .arco-menu-pop-header.arco-menu-has-icon,.arco-menu-vertical .arco-menu-inline-header.arco-menu-has-icon{display:flex;align-items:center}.arco-menu-vertical .arco-menu-item.arco-menu-has-icon>.arco-menu-indent-list,.arco-menu-vertical .arco-menu-group-title.arco-menu-has-icon>.arco-menu-indent-list,.arco-menu-vertical .arco-menu-pop-header.arco-menu-has-icon>.arco-menu-indent-list,.arco-menu-vertical .arco-menu-inline-header.arco-menu-has-icon>.arco-menu-indent-list,.arco-menu-vertical .arco-menu-item.arco-menu-has-icon>.arco-menu-icon,.arco-menu-vertical .arco-menu-group-title.arco-menu-has-icon>.arco-menu-icon,.arco-menu-vertical .arco-menu-pop-header.arco-menu-has-icon>.arco-menu-icon,.arco-menu-vertical .arco-menu-inline-header.arco-menu-has-icon>.arco-menu-icon{flex:none}.arco-menu-vertical .arco-menu-item.arco-menu-has-icon .arco-menu-icon,.arco-menu-vertical .arco-menu-group-title.arco-menu-has-icon .arco-menu-icon,.arco-menu-vertical .arco-menu-pop-header.arco-menu-has-icon .arco-menu-icon,.arco-menu-vertical .arco-menu-inline-header.arco-menu-has-icon .arco-menu-icon{line-height:1}.arco-menu-vertical .arco-menu-item.arco-menu-has-icon .arco-menu-title,.arco-menu-vertical .arco-menu-group-title.arco-menu-has-icon .arco-menu-title,.arco-menu-vertical .arco-menu-pop-header.arco-menu-has-icon .arco-menu-title,.arco-menu-vertical .arco-menu-inline-header.arco-menu-has-icon .arco-menu-title{overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-menu-vertical .arco-menu-item .arco-menu-item-inner,.arco-menu-vertical .arco-menu-group-title .arco-menu-item-inner,.arco-menu-vertical .arco-menu-pop-header .arco-menu-item-inner,.arco-menu-vertical .arco-menu-inline-header .arco-menu-item-inner{overflow:hidden;white-space:nowrap;text-overflow:ellipsis;width:100%}.arco-menu-vertical .arco-menu-item .arco-menu-icon-suffix,.arco-menu-vertical .arco-menu-group-title .arco-menu-icon-suffix,.arco-menu-vertical .arco-menu-pop-header .arco-menu-icon-suffix,.arco-menu-vertical .arco-menu-inline-header .arco-menu-icon-suffix{position:absolute;right:12px}.arco-menu-vertical .arco-menu-inner{padding:4px 8px}.arco-menu-vertical .arco-menu-item.arco-menu-item-indented{display:flex}.arco-menu-vertical .arco-menu-pop-header,.arco-menu-vertical .arco-menu-inline-header{padding-right:28px}.arco-menu-horizontal{width:100%;height:auto}.arco-menu-horizontal .arco-menu-item,.arco-menu-horizontal .arco-menu-group-title,.arco-menu-horizontal .arco-menu-pop-header,.arco-menu-horizontal .arco-menu-inline-header{padding:0 12px;line-height:30px}.arco-menu-horizontal .arco-menu-item .arco-menu-icon-suffix .arco-icon,.arco-menu-horizontal .arco-menu-group-title .arco-menu-icon-suffix .arco-icon,.arco-menu-horizontal .arco-menu-pop-header .arco-menu-icon-suffix .arco-icon,.arco-menu-horizontal .arco-menu-inline-header .arco-menu-icon-suffix .arco-icon{margin-right:0}.arco-menu-horizontal .arco-menu-item .arco-icon,.arco-menu-horizontal .arco-menu-group-title .arco-icon,.arco-menu-horizontal .arco-menu-pop-header .arco-icon,.arco-menu-horizontal .arco-menu-inline-header .arco-icon,.arco-menu-horizontal .arco-menu-item .arco-menu-icon,.arco-menu-horizontal .arco-menu-group-title .arco-menu-icon,.arco-menu-horizontal .arco-menu-pop-header .arco-menu-icon,.arco-menu-horizontal .arco-menu-inline-header .arco-menu-icon{margin-right:16px}.arco-menu-horizontal .arco-menu-item .arco-menu-icon-suffix,.arco-menu-horizontal .arco-menu-group-title .arco-menu-icon-suffix,.arco-menu-horizontal .arco-menu-pop-header .arco-menu-icon-suffix,.arco-menu-horizontal .arco-menu-inline-header .arco-menu-icon-suffix{margin-left:6px}.arco-menu-horizontal .arco-menu-inner{display:flex;align-items:center;padding:14px 20px}.arco-menu-horizontal .arco-menu-item,.arco-menu-horizontal .arco-menu-pop{display:inline-block;flex-shrink:0;vertical-align:middle}.arco-menu-horizontal .arco-menu-item:not(:first-child),.arco-menu-horizontal .arco-menu-pop:not(:first-child){margin-left:12px}.arco-menu-horizontal .arco-menu-pop:after{position:absolute;bottom:-14px;left:0;width:100%;height:14px;content:" "}.arco-menu-overflow-wrap{width:100%}.arco-menu-overflow-sub-menu-mirror,.arco-menu-overflow-hidden-menu-item{position:absolute!important;white-space:nowrap;visibility:hidden;pointer-events:none}.arco-menu-selected-label{position:absolute;right:12px;bottom:-14px;left:12px;height:3px;background-color:rgb(var(--primary-6))}.arco-menu-pop-button{width:auto;background:none;box-shadow:none}.arco-menu-pop-button.arco-menu-collapsed{width:auto}.arco-menu-pop-button .arco-menu-item,.arco-menu-pop-button .arco-menu-group-title,.arco-menu-pop-button .arco-menu-pop-header,.arco-menu-pop-button .arco-menu-inline-header{width:40px;height:40px;margin-bottom:16px;line-height:40px;border:1px solid transparent;border-radius:50%;box-shadow:0 4px 10px #0000001a}.arco-menu-collapsed{width:48px}.arco-menu-collapsed .arco-menu-inner{padding:4px}.arco-menu-collapsed .arco-menu-icon-suffix{display:none}.arco-menu-collapsed .arco-menu-has-icon>*:not(.arco-menu-icon){opacity:0}.arco-menu-collapsed .arco-menu-item .arco-icon,.arco-menu-collapsed .arco-menu-group-title .arco-icon,.arco-menu-collapsed .arco-menu-pop-header .arco-icon,.arco-menu-collapsed .arco-menu-inline-header .arco-icon{margin-right:100%}.arco-menu-collapse-button{position:absolute;right:12px;bottom:12px;display:flex;align-items:center;justify-content:center;width:24px;height:24px;border-radius:var(--border-radius-small);cursor:pointer}.arco-menu-inline-content{height:auto;overflow:hidden;transition:height .2s cubic-bezier(.34,.69,.1,1)}.arco-menu-inline-content-hide{height:0}.arco-menu-item-tooltip a{color:inherit;cursor:text}.arco-menu-item-tooltip a:hover,.arco-menu-item-tooltip a:focus,.arco-menu-item-tooltip a:active{color:inherit}.arco-menu-pop-trigger.arco-trigger-position-bl{transform:translateY(14px)}.arco-menu-pop-trigger.arco-trigger-position-bl .arco-trigger-arrow{z-index:0;border-top:1px solid var(--color-neutral-3);border-left:1px solid var(--color-neutral-3)}.arco-menu-pop-trigger.arco-trigger-position-rt{transform:translate(8px)}.arco-menu-pop-trigger.arco-trigger-position-rt .arco-trigger-arrow{z-index:0;border-bottom:1px solid var(--color-neutral-3);border-left:1px solid var(--color-neutral-3)}.arco-menu-pop-trigger.arco-menu-pop-trigger-dark .arco-trigger-arrow{background-color:var(--color-menu-dark-bg);border-color:var(--color-menu-dark-bg)}.arco-trigger-menu{position:relative;box-sizing:border-box;max-height:200px;padding:4px 0;overflow:auto;background-color:var(--color-bg-popup);border:1px solid var(--color-fill-3);border-radius:var(--border-radius-medium);box-shadow:0 4px 10px #0000001a}.arco-trigger-menu-hidden{display:none}.arco-trigger-menu-item,.arco-trigger-menu-pop-header{position:relative;z-index:1;box-sizing:border-box;width:100%;height:36px;padding:0 12px;color:var(--color-text-1);font-size:14px;line-height:36px;text-align:left;background-color:transparent;cursor:pointer;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-trigger-menu-item.arco-trigger-menu-selected,.arco-trigger-menu-pop-header.arco-trigger-menu-selected{color:var(--color-text-1);font-weight:500;background-color:transparent;transition:all .1s cubic-bezier(0,0,1,1)}.arco-trigger-menu-item:hover,.arco-trigger-menu-pop-header:hover{color:var(--color-text-1);background-color:var(--color-fill-2)}.arco-trigger-menu-item.arco-trigger-menu-disabled,.arco-trigger-menu-pop-header.arco-trigger-menu-disabled{color:var(--color-text-4);background-color:transparent;cursor:not-allowed}.arco-trigger-menu .arco-trigger-menu-has-icon{display:flex;align-items:center}.arco-trigger-menu .arco-trigger-menu-has-icon .arco-trigger-menu-icon{margin-right:8px;line-height:1}.arco-trigger-menu .arco-trigger-menu-has-icon>*{flex:none}.arco-trigger-menu .arco-trigger-menu-has-icon .arco-trigger-menu-title{flex:auto;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-trigger-menu-pop-header{display:flex;align-items:center;justify-content:space-between}.arco-trigger-menu-pop-header .arco-trigger-menu-icon-suffix{margin-left:12px}.arco-trigger-menu-group:first-child .arco-trigger-menu-group-title{padding-top:4px}.arco-trigger-menu-group-title{box-sizing:border-box;width:100%;padding:8px 12px 0;color:var(--color-text-3);font-size:12px;line-height:20px;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-trigger-menu-pop-trigger .arco-trigger-arrow{display:none}.arco-trigger-menu-dark{background-color:var(--color-menu-dark-bg);border-color:var(--color-menu-dark-bg)}.arco-trigger-menu-dark .arco-trigger-menu-item,.arco-trigger-menu-dark .arco-trigger-menu-pop-header{color:var(--color-text-4);background-color:transparent}.arco-trigger-menu-dark .arco-trigger-menu-item.arco-trigger-menu-selected,.arco-trigger-menu-dark .arco-trigger-menu-pop-header.arco-trigger-menu-selected{color:var(--color-white);background-color:transparent}.arco-trigger-menu-dark .arco-trigger-menu-item.arco-trigger-menu-selected:hover,.arco-trigger-menu-dark .arco-trigger-menu-pop-header.arco-trigger-menu-selected:hover{color:var(--color-white)}.arco-trigger-menu-dark .arco-trigger-menu-item:hover,.arco-trigger-menu-dark .arco-trigger-menu-pop-header:hover{color:var(--color-text-4);background-color:var(--color-menu-dark-hover)}.arco-trigger-menu-dark .arco-trigger-menu-item.arco-trigger-menu-disabled,.arco-trigger-menu-dark .arco-trigger-menu-pop-header.arco-trigger-menu-disabled{color:var(--color-text-2);background-color:transparent}.arco-trigger-menu-dark .arco-trigger-menu-group-title{color:var(--color-text-3)}.arco-message-list{position:fixed;z-index:1003;display:flex;flex-direction:column;align-items:center;box-sizing:border-box;width:100%;margin:0;padding:0 10px;text-align:center;pointer-events:none}.arco-message-list-top{top:40px}.arco-message-list-bottom{bottom:40px}.arco-message{position:relative;display:inline-flex;align-items:center;margin-bottom:16px;padding:10px 16px;overflow:hidden;line-height:1;text-align:center;list-style:none;background-color:var(--color-bg-popup);border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-small);box-shadow:0 4px 10px #0000001a;transition:all .1s cubic-bezier(0,0,1,1);pointer-events:auto}.arco-message-icon{display:inline-block;margin-right:8px;color:var(--color-text-1);font-size:20px;vertical-align:middle;animation:arco-msg-fade .1s cubic-bezier(0,0,1,1),arco-msg-fade .4s cubic-bezier(.3,1.3,.3,1)}.arco-message-content{font-size:14px;color:var(--color-text-1);vertical-align:middle}.arco-message-info{background-color:var(--color-bg-popup);border-color:var(--color-neutral-3)}.arco-message-info .arco-message-icon{color:rgb(var(--primary-6))}.arco-message-info .arco-message-content{color:var(--color-text-1)}.arco-message-success{background-color:var(--color-bg-popup);border-color:var(--color-neutral-3)}.arco-message-success .arco-message-icon{color:rgb(var(--success-6))}.arco-message-success .arco-message-content{color:var(--color-text-1)}.arco-message-warning{background-color:var(--color-bg-popup);border-color:var(--color-neutral-3)}.arco-message-warning .arco-message-icon{color:rgb(var(--warning-6))}.arco-message-warning .arco-message-content{color:var(--color-text-1)}.arco-message-error{background-color:var(--color-bg-popup);border-color:var(--color-neutral-3)}.arco-message-error .arco-message-icon{color:rgb(var(--danger-6))}.arco-message-error .arco-message-content{color:var(--color-text-1)}.arco-message-loading{background-color:var(--color-bg-popup);border-color:var(--color-neutral-3)}.arco-message-loading .arco-message-icon{color:rgb(var(--primary-6))}.arco-message-loading .arco-message-content{color:var(--color-text-1)}.arco-message-close-btn{margin-left:8px;color:var(--color-text-1);font-size:12px}.arco-message .arco-icon-hover.arco-message-icon-hover:before{width:20px;height:20px}.fade-message-enter-from,.fade-message-appear-from{opacity:0}.fade-message-enter-to,.fade-message-appear-to{opacity:1}.fade-message-enter-active,.fade-message-appear-active{transition:opacity .1s cubic-bezier(0,0,1,1)}.fade-message-leave-from{opacity:1}.fade-message-leave-to{opacity:0}.fade-message-leave-active{position:absolute}.flip-list-move{transition:transform .8s ease}@keyframes arco-msg-fade{0%{opacity:0}to{opacity:1}}@keyframes arco-msg-scale{0%{transform:scale(0)}to{transform:scale(1)}}.arco-modal-container{position:fixed;top:0;right:0;bottom:0;left:0}.arco-modal-mask{position:absolute;top:0;right:0;bottom:0;left:0;background-color:var(--color-mask-bg)}.arco-modal-wrapper{position:absolute;top:0;right:0;bottom:0;left:0;overflow:auto;text-align:center}.arco-modal-wrapper.arco-modal-wrapper-align-center{white-space:nowrap}.arco-modal-wrapper.arco-modal-wrapper-align-center:after{display:inline-block;width:0;height:100%;vertical-align:middle;content:""}.arco-modal-wrapper.arco-modal-wrapper-align-center .arco-modal{top:0;vertical-align:middle}.arco-modal-wrapper.arco-modal-wrapper-moved{text-align:left}.arco-modal-wrapper.arco-modal-wrapper-moved .arco-modal{top:0;vertical-align:top}.arco-modal{position:relative;top:100px;display:inline-block;width:520px;margin:0 auto;line-height:1.5715;white-space:initial;text-align:left;background-color:var(--color-bg-3);border-radius:var(--border-radius-medium)}.arco-modal-draggable .arco-modal-header{cursor:move}.arco-modal-header{display:flex;flex-shrink:0;align-items:center;box-sizing:border-box;width:100%;height:48px;padding:0 20px;border-bottom:1px solid var(--color-neutral-3)}.arco-modal-header .arco-modal-title{display:flex;flex:1;align-items:center;justify-content:center}.arco-modal-header .arco-modal-title-align-start{justify-content:flex-start}.arco-modal-header .arco-modal-title-align-center{justify-content:center}.arco-modal-body{position:relative;padding:24px 20px;overflow:auto;color:var(--color-text-1);font-size:14px}.arco-modal-footer{box-sizing:border-box;flex-shrink:0;width:100%;padding:16px 20px;text-align:right;border-top:1px solid var(--color-neutral-3)}.arco-modal-footer>.arco-btn:not(:nth-child(1)){margin-left:12px}.arco-modal-close-btn{margin-left:-12px;color:var(--color-text-1);font-size:12px;cursor:pointer}.arco-modal-title{color:var(--color-text-1);font-weight:500;font-size:16px}.arco-modal-title-icon{margin-right:10px;font-size:18px;vertical-align:-.15em}.arco-modal-title-icon .arco-icon-info-circle-fill{color:rgb(var(--primary-6))}.arco-modal-title-icon .arco-icon-check-circle-fill{color:rgb(var(--success-6))}.arco-modal-title-icon .arco-icon-exclamation-circle-fill{color:rgb(var(--warning-6))}.arco-modal-title-icon .arco-icon-close-circle-fill{color:rgb(var(--danger-6))}.arco-modal-simple{width:400px;padding:24px 32px 32px}.arco-modal-simple .arco-modal-header,.arco-modal-simple .arco-modal-footer{height:unset;padding:0;border:none}.arco-modal-simple .arco-modal-header{margin-bottom:24px}.arco-modal-simple .arco-modal-title{justify-content:center}.arco-modal-simple .arco-modal-title-align-start{justify-content:flex-start}.arco-modal-simple .arco-modal-title-align-center{justify-content:center}.arco-modal-simple .arco-modal-footer{margin-top:32px;text-align:center}.arco-modal-simple .arco-modal-body{padding:0}.arco-modal-fullscreen{top:0;display:inline-flex;flex-direction:column;box-sizing:border-box;width:100%;height:100%}.arco-modal-fullscreen .arco-modal-footer{margin-top:auto}.zoom-modal-enter-from,.zoom-modal-appear-from{transform:scale(.5);opacity:0}.zoom-modal-enter-to,.zoom-modal-appear-to{transform:scale(1);opacity:1}.zoom-modal-enter-active,.zoom-modal-appear-active{transition:opacity .4s cubic-bezier(.3,1.3,.3,1),transform .4s cubic-bezier(.3,1.3,.3,1)}.zoom-modal-leave-from{transform:scale(1);opacity:1}.zoom-modal-leave-to{transform:scale(.5);opacity:0}.zoom-modal-leave-active{transition:opacity .4s cubic-bezier(.3,1.3,.3,1),transform .4s cubic-bezier(.3,1.3,.3,1)}.fade-modal-enter-from,.fade-modal-appear-from{opacity:0}.fade-modal-enter-to,.fade-modal-appear-to{opacity:1}.fad-modal-enter-active,.fade-modal-appear-active{transition:opacity .4s cubic-bezier(.3,1.3,.3,1)}.fade-modal-leave-from{opacity:1}.fade-modal-leave-to{opacity:0}.fade-modal-leave-active{transition:opacity .4s cubic-bezier(.3,1.3,.3,1)}.arco-notification-list{position:fixed;z-index:1003;margin:0;padding-left:0}.arco-notification-list-top-left{top:20px;left:20px}.arco-notification-list-top-right{top:20px;right:20px}.arco-notification-list-top-right .arco-notification{margin-left:auto}.arco-notification-list-bottom-left{bottom:20px;left:20px}.arco-notification-list-bottom-right{right:20px;bottom:20px}.arco-notification-list-bottom-right .arco-notification{margin-left:auto}.arco-notification{position:relative;display:flex;box-sizing:border-box;width:340px;padding:20px;overflow:hidden;background-color:var(--color-bg-popup);border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-medium);box-shadow:0 4px 12px #00000026;opacity:1;transition:opacity .2s cubic-bezier(0,0,1,1)}.arco-notification:not(:last-child){margin-bottom:20px}.arco-notification-icon{display:flex;align-items:center;font-size:24px}.arco-notification-info{background-color:var(--color-bg-popup);border-color:var(--color-neutral-3)}.arco-notification-info .arco-notification-icon{color:rgb(var(--primary-6))}.arco-notification-success{background-color:var(--color-bg-popup);border-color:var(--color-neutral-3)}.arco-notification-success .arco-notification-icon{color:rgb(var(--success-6))}.arco-notification-warning{background-color:var(--color-bg-popup);border-color:var(--color-neutral-3)}.arco-notification-warning .arco-notification-icon{color:rgb(var(--warning-6))}.arco-notification-error{background-color:var(--color-bg-popup);border-color:var(--color-neutral-3)}.arco-notification-error .arco-notification-icon{color:rgb(var(--danger-6))}.arco-notification-left{padding-right:16px}.arco-notification-right{flex:1;word-break:break-word}.arco-notification-title{color:var(--color-text-1);font-weight:500;font-size:16px}.arco-notification-title+.arco-notification-content{margin-top:4px}.arco-notification-content{color:var(--color-text-1);font-size:14px}.arco-notification-info .arco-notification-title,.arco-notification-info .arco-notification-content,.arco-notification-success .arco-notification-title,.arco-notification-success .arco-notification-content,.arco-notification-warning .arco-notification-title,.arco-notification-warning .arco-notification-content,.arco-notification-error .arco-notification-title,.arco-notification-error .arco-notification-content{color:var(--color-text-1)}.arco-notification-footer{margin-top:16px;text-align:right}.arco-notification-close-btn{position:absolute;top:12px;right:12px;color:var(--color-text-1);font-size:12px;cursor:pointer}.arco-notification-close-btn>svg{position:relative}.arco-notification .arco-icon-hover.arco-notification-icon-hover:before{width:20px;height:20px}.slide-left-notification-enter-from,.slide-left-notification-appear-from{transform:translate(-100%)}.slide-left-notification-enter-to,.slide-left-notification-appear-to{transform:translate(0)}.slide-left-notification-enter-active,.slide-left-notification-appear-active{transition:transform .4s cubic-bezier(.3,1.3,.3,1)}.slide-left-notification-leave-from{opacity:1}.slide-left-notification-leave-to{height:0;margin-top:0;margin-bottom:0;padding-top:0;padding-bottom:0;opacity:0}.slide-left-notification-leave-active{transition:all .3s cubic-bezier(.34,.69,.1,1)}.slide-right-notification-enter-from,.slide-right-notification-appear-from{transform:translate(100%)}.slide-right-notification-enter-to,.slide-right-notification-appear-to{transform:translate(0)}.slide-right-notification-enter-active,.slide-right-notification-appear-active{transition:transform .4s cubic-bezier(.3,1.3,.3,1)}.slide-right-notification-leave-from{opacity:1}.slide-right-notification-leave-to{height:0;margin-top:0;margin-bottom:0;padding-top:0;padding-bottom:0;opacity:0}.slide-right-notification-leave-active{transition:all .3s cubic-bezier(.34,.69,.1,1)}.arco-overflow-list{display:flex;align-items:center;justify-content:flex-start}.arco-overflow-list>*:not(:last-child){flex-shrink:0}.arco-overflow-list-spacer{flex:1;min-width:0;height:1px}.arco-page-header{padding:16px 0}.arco-page-header-breadcrumb+.arco-page-header-header{margin-top:4px}.arco-page-header-wrapper{padding-right:20px;padding-left:24px}.arco-page-header-header{display:flex;align-items:center;justify-content:space-between;line-height:28px}.arco-page-header-header-left{display:flex;align-items:center}.arco-page-header-main{display:flex;align-items:center;min-height:30px}.arco-page-header-main-with-back{margin-left:-8px;padding-left:8px}.arco-page-header-extra{overflow:hidden;white-space:nowrap}.arco-page-header .arco-icon-hover.arco-page-header-icon-hover:before{width:30px;height:30px}.arco-page-header .arco-icon-hover.arco-page-header-icon-hover:hover:before{background-color:var(--color-fill-2)}.arco-page-header-back-btn{margin-right:12px;color:var(--color-text-2);font-size:14px}.arco-page-header-back-btn-icon{position:relative}.arco-page-header-title{overflow:hidden;white-space:nowrap;text-overflow:ellipsis;color:var(--color-text-1);font-weight:600;font-size:20px}.arco-page-header-divider{width:1px;height:16px;margin-right:12px;margin-left:12px;background-color:var(--color-fill-3)}.arco-page-header-subtitle{overflow:hidden;white-space:nowrap;text-overflow:ellipsis;color:var(--color-text-3);font-size:14px}.arco-page-header-content{padding:20px 32px;border-top:1px solid var(--color-neutral-3)}.arco-page-header-footer{padding:16px 20px 0 24px}.arco-page-header-with-breadcrumb{padding:12px 0}.arco-page-header-with-breadcrumb .arco-page-header-footer{padding-top:12px}.arco-page-header-with-content .arco-page-header-wrapper{padding-bottom:12px}.arco-page-header-with-footer{padding-bottom:0}.arco-page-header-wrapper .arco-page-header-header{flex-wrap:wrap}.arco-page-header-wrapper .arco-page-header-header .arco-page-header-head-extra{margin-top:4px}.arco-pagination{display:flex;align-items:center;font-size:14px}.arco-pagination-list{display:inline-block;margin:0;padding:0;white-space:nowrap;list-style:none}.arco-pagination-item{display:inline-block;box-sizing:border-box;padding:0 8px;color:var(--color-text-2);text-align:center;vertical-align:middle;list-style:none;background-color:transparent;border:0 solid transparent;border-radius:var(--border-radius-small);outline:0;cursor:pointer;user-select:none;min-width:32px;height:32px;font-size:14px;line-height:32px}.arco-pagination-item-previous,.arco-pagination-item-next{font-size:12px}.arco-pagination-item:hover{color:var(--color-text-2);background-color:var(--color-fill-1);border-color:transparent}.arco-pagination-item-active,.arco-pagination-item-active:hover{color:rgb(var(--primary-6));background-color:var(--color-primary-light-1);border-color:transparent;transition:color .2s cubic-bezier(0,0,1,1),background-color .2s cubic-bezier(0,0,1,1)}.arco-pagination-item-disabled,.arco-pagination-item-disabled:hover{color:var(--color-text-4);background-color:transparent;border-color:transparent;cursor:not-allowed}.arco-pagination-item:not(:last-child){margin-right:8px}.arco-pagination-item-previous,.arco-pagination-item-next{color:var(--color-text-2);font-size:12px;background-color:transparent}.arco-pagination-item-previous:not(.arco-pagination-item-disabled):hover,.arco-pagination-item-next:not(.arco-pagination-item-disabled):hover{color:rgb(var(--primary-6));background-color:var(--color-fill-1)}.arco-pagination-item-previous:after,.arco-pagination-item-next:after{display:inline-block;font-size:0;vertical-align:middle;content:"."}.arco-pagination .arco-pagination-item-previous.arco-pagination-item-disabled,.arco-pagination .arco-pagination-item-next.arco-pagination-item-disabled{color:var(--color-text-4);background-color:transparent}.arco-pagination-item-jumper{font-size:16px}.arco-pagination-jumper{display:flex;align-items:center;margin-left:8px}.arco-pagination-jumper>span{font-size:14px}.arco-pagination-jumper-text-goto,.arco-pagination-jumper-prepend,.arco-pagination-jumper-append{color:var(--color-text-3);white-space:nowrap}.arco-pagination-jumper-prepend{margin-right:8px}.arco-pagination-jumper-append{margin-left:8px}.arco-pagination-jumper .arco-pagination-jumper-input{width:40px;padding-right:2px;padding-left:2px}.arco-pagination-jumper .arco-pagination-jumper-input input{text-align:center}.arco-pagination-options{position:relative;display:inline-block;flex:0 0 auto;min-width:0;margin-left:8px;text-align:center;vertical-align:middle}.arco-pagination-options .arco-select{width:auto}.arco-pagination-options .arco-select-view-value{padding-right:6px;overflow:inherit}.arco-pagination-total{display:inline-block;height:100%;margin-right:8px;color:var(--color-text-1);font-size:14px;line-height:32px;white-space:nowrap}.arco-pagination-jumper{flex:0 0 auto}.arco-pagination-jumper-separator{padding:0 12px}.arco-pagination-jumper-total-page{margin-right:8px}.arco-pagination-simple{display:flex;align-items:center}.arco-pagination-simple .arco-pagination-item{margin-right:0}.arco-pagination-simple .arco-pagination-jumper{margin:0 4px;color:var(--color-text-1)}.arco-pagination-simple .arco-pagination-jumper .arco-pagination-jumper-input{width:40px;margin-left:0}.arco-pagination-simple .arco-pagination-item-previous,.arco-pagination-simple .arco-pagination-item-next{color:var(--color-text-2);background-color:transparent}.arco-pagination-simple .arco-pagination-item-previous:not(.arco-pagination-item-disabled):hover,.arco-pagination-simple .arco-pagination-item-next:not(.arco-pagination-item-disabled):hover{color:rgb(var(--primary-6));background-color:var(--color-fill-1)}.arco-pagination-simple .arco-pagination-item-previous.arco-pagination-item-disabled,.arco-pagination-simple .arco-pagination-item-next.arco-pagination-item-disabled{color:var(--color-text-4);background-color:transparent}.arco-pagination-disabled{cursor:not-allowed}.arco-pagination-disabled .arco-pagination-item,.arco-pagination-disabled .arco-pagination-item:not(.arco-pagination-item-disabled):not(.arco-pagination-item-active):hover{color:var(--color-text-4);background-color:transparent;border-color:transparent;cursor:not-allowed}.arco-pagination.arco-pagination-disabled .arco-pagination-item-active{color:var(--color-primary-light-3);background-color:var(--color-fill-1);border-color:transparent}.arco-pagination-size-mini .arco-pagination-item{min-width:24px;height:24px;font-size:12px;line-height:24px}.arco-pagination-size-mini .arco-pagination-item-previous,.arco-pagination-size-mini .arco-pagination-item-next{font-size:12px}.arco-pagination-size-mini .arco-pagination-total{font-size:12px;line-height:24px}.arco-pagination-size-mini .arco-pagination-option{height:24px;font-size:12px;line-height:0}.arco-pagination-size-mini .arco-pagination-jumper>span{font-size:12px}.arco-pagination-size-small .arco-pagination-item{min-width:28px;height:28px;font-size:14px;line-height:28px}.arco-pagination-size-small .arco-pagination-item-previous,.arco-pagination-size-small .arco-pagination-item-next{font-size:12px}.arco-pagination-size-small .arco-pagination-total{font-size:14px;line-height:28px}.arco-pagination-size-small .arco-pagination-option{height:28px;font-size:14px;line-height:0}.arco-pagination-size-small .arco-pagination-jumper>span{font-size:14px}.arco-pagination-size-large .arco-pagination-item{min-width:36px;height:36px;font-size:14px;line-height:36px}.arco-pagination-size-large .arco-pagination-item-previous,.arco-pagination-size-large .arco-pagination-item-next{font-size:14px}.arco-pagination-size-large .arco-pagination-total{font-size:14px;line-height:36px}.arco-pagination-size-large .arco-pagination-option{height:36px;font-size:14px;line-height:0}.arco-pagination-size-large .arco-pagination-jumper>span{font-size:14px}.arco-popconfirm-popup-content{box-sizing:border-box;padding:16px;color:var(--color-text-2);font-size:14px;line-height:1.5715;background-color:var(--color-bg-popup);border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-medium);box-shadow:0 4px 10px #0000001a}.arco-popconfirm-popup-content .arco-popconfirm-body{position:relative;display:flex;align-items:flex-start;margin-bottom:16px;color:var(--color-text-1);font-size:14px}.arco-popconfirm-popup-content .arco-popconfirm-body .arco-popconfirm-icon{display:inline-flex;align-items:center;height:22.001px;margin-right:8px;font-size:18px}.arco-popconfirm-popup-content .arco-popconfirm-body .arco-popconfirm-icon .arco-icon-exclamation-circle-fill{color:rgb(var(--warning-6))}.arco-popconfirm-popup-content .arco-popconfirm-body .arco-popconfirm-icon .arco-icon-check-circle-fill{color:rgb(var(--success-6))}.arco-popconfirm-popup-content .arco-popconfirm-body .arco-popconfirm-icon .arco-icon-info-circle-fill{color:rgb(var(--primary-6))}.arco-popconfirm-popup-content .arco-popconfirm-body .arco-popconfirm-icon .arco-icon-close-circle-fill{color:rgb(var(--danger-6))}.arco-popconfirm-popup-content .arco-popconfirm-body .arco-popconfirm-content{text-align:left;word-wrap:break-word}.arco-popconfirm-popup-content .arco-popconfirm-footer{text-align:right}.arco-popconfirm-popup-content .arco-popconfirm-footer>button{margin-left:8px}.arco-popconfirm-popup-arrow{z-index:1;background-color:var(--color-bg-popup);border:1px solid var(--color-neutral-3)}.arco-popover-popup-content{box-sizing:border-box;padding:12px 16px;color:var(--color-text-2);font-size:14px;line-height:1.5715;background-color:var(--color-bg-popup);border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-medium);box-shadow:0 4px 10px #0000001a}.arco-popover-title{color:var(--color-text-1);font-weight:500;font-size:16px}.arco-popover-content{margin-top:4px;text-align:left;word-wrap:break-word}.arco-popover-popup-arrow{z-index:1;background-color:var(--color-bg-popup);border:1px solid var(--color-neutral-3)}.arco-progress{position:relative;line-height:1;font-size:12px}.arco-progress-type-line,.arco-progress-type-steps{display:inline-block;max-width:100%;width:100%}.arco-progress-type-line.arco-progress-size-mini{width:auto}.arco-progress-line-wrapper,.arco-progress-steps-wrapper{display:flex;align-items:center;width:100%;max-width:100%;height:100%}.arco-progress-line-text,.arco-progress-steps-text{font-size:12px;margin-left:16px;color:var(--color-text-2);white-space:nowrap;text-align:right;flex-grow:1;flex-shrink:0;min-width:32px}.arco-progress-line-text .arco-icon,.arco-progress-steps-text .arco-icon{font-size:12px;margin-left:4px}.arco-progress-line{background-color:var(--color-fill-3);border-radius:100px;width:100%;position:relative;display:inline-block;overflow:hidden}.arco-progress-line-bar{height:100%;border-radius:100px;background-color:rgb(var(--primary-6));position:relative;transition:width .6s cubic-bezier(.34,.69,.1,1),background .3s cubic-bezier(.34,.69,.1,1);max-width:100%}.arco-progress-line-bar-buffer{position:absolute;background-color:var(--color-primary-light-3);height:100%;top:0;left:0;border-radius:0 100px 100px 0;max-width:100%;transition:all .6s cubic-bezier(.34,.69,.1,1)}.arco-progress-line-bar-animate:after{content:"";display:block;position:absolute;top:0;width:100%;height:100%;border-radius:inherit;background:linear-gradient(90deg,transparent 25%,rgba(255,255,255,.5) 50%,transparent 75%);background-size:400% 100%;animation:arco-progress-loading 1.5s cubic-bezier(.34,.69,.1,1) infinite}.arco-progress-line-text .arco-icon{color:var(--color-text-2)}.arco-progress-type-steps.arco-progress-size-small{width:auto}.arco-progress-type-steps.arco-progress-size-small .arco-progress-steps-item{width:2px;flex:unset;border-radius:2px}.arco-progress-type-steps.arco-progress-size-small .arco-progress-steps-item:not(:last-of-type){margin-right:3px}.arco-progress-steps{display:flex;width:100%}.arco-progress-steps-text{margin-left:8px;min-width:unset}.arco-progress-steps-text .arco-icon{color:var(--color-text-2)}.arco-progress-steps-item{height:100%;flex:1;background-color:var(--color-fill-3);position:relative;display:inline-block}.arco-progress-steps-item:not(:last-of-type){margin-right:3px}.arco-progress-steps-item:last-of-type{border-top-right-radius:100px;border-bottom-right-radius:100px}.arco-progress-steps-item:first-of-type{border-top-left-radius:100px;border-bottom-left-radius:100px}.arco-progress-steps-item-active{background-color:rgb(var(--primary-6))}.arco-progress-status-warning .arco-progress-line-bar,.arco-progress-status-warning .arco-progress-steps-item-active{background-color:rgb(var(--warning-6))}.arco-progress-status-warning .arco-progress-line-text .arco-icon,.arco-progress-status-warning .arco-progress-steps-text .arco-icon{color:rgb(var(--warning-6))}.arco-progress-status-success .arco-progress-line-bar,.arco-progress-status-success .arco-progress-steps-item-active{background-color:rgb(var(--success-6))}.arco-progress-status-success .arco-progress-line-text .arco-icon,.arco-progress-status-success .arco-progress-steps-text .arco-icon{color:rgb(var(--success-6))}.arco-progress-status-danger .arco-progress-line-bar,.arco-progress-status-danger .arco-progress-steps-item-active{background-color:rgb(var(--danger-6))}.arco-progress-status-danger .arco-progress-line-text .arco-icon,.arco-progress-status-danger .arco-progress-steps-text .arco-icon{color:rgb(var(--danger-6))}.arco-progress-size-small .arco-progress-line-text{font-size:12px;margin-left:16px}.arco-progress-size-small .arco-progress-line-text .arco-icon{font-size:12px}.arco-progress-size-large .arco-progress-line-text{font-size:16px;margin-left:16px}.arco-progress-size-large .arco-progress-line-text .arco-icon{font-size:14px}.arco-progress-type-circle{display:inline-block}.arco-progress-circle-wrapper{position:relative;text-align:center;line-height:1;display:inline-block;vertical-align:text-bottom}.arco-progress-circle-svg{transform:rotate(-90deg)}.arco-progress-circle-text{position:absolute;top:50%;left:50%;color:var(--color-text-3);transform:translate(-50%,-50%);font-size:14px}.arco-progress-circle-text .arco-icon{font-size:16px;color:var(--color-text-2)}.arco-progress-circle-bg{stroke:var(--color-fill-3)}.arco-progress-circle-bar{stroke:rgb(var(--primary-6));transition:stroke-dashoffset .6s cubic-bezier(0,0,1,1) 0s,stroke .6s cubic-bezier(0,0,1,1)}.arco-progress-size-mini .arco-progress-circle-bg{stroke:var(--color-primary-light-3)}.arco-progress-size-mini .arco-progress-circle-bar{stroke:rgb(var(--primary-6))}.arco-progress-size-mini.arco-progress-status-warning .arco-progress-circle-bg{stroke:var(--color-warning-light-3)}.arco-progress-size-mini.arco-progress-status-danger .arco-progress-circle-bg{stroke:var(--color-danger-light-3)}.arco-progress-size-mini.arco-progress-status-success .arco-progress-circle-bg{stroke:var(--color-success-light-3)}.arco-progress-size-mini .arco-progress-circle-wrapper .arco-icon-check{position:absolute;top:50%;left:50%;transform:translate(-50%) translateY(-50%)}.arco-progress-size-mini .arco-progress-circle-text{position:static;top:unset;left:unset;transform:unset}.arco-progress-size-small .arco-progress-circle-text{font-size:13px}.arco-progress-size-small .arco-progress-circle-text .arco-icon{font-size:14px}.arco-progress-size-large .arco-progress-circle-text,.arco-progress-size-large .arco-progress-circle-text .arco-icon{font-size:16px}.arco-progress-status-warning .arco-progress-circle-bar{stroke:rgb(var(--warning-6))}.arco-progress-status-warning .arco-icon{color:rgb(var(--warning-6))}.arco-progress-status-success .arco-progress-circle-bar{stroke:rgb(var(--success-6))}.arco-progress-status-success .arco-icon{color:rgb(var(--success-6))}.arco-progress-status-danger .arco-progress-circle-bar{stroke:rgb(var(--danger-6))}.arco-progress-status-danger .arco-icon{color:rgb(var(--danger-6))}@keyframes arco-progress-loading{0%{background-position:100% 50%}to{background-position:0 50%}}.arco-radio>input[type=radio],.arco-radio-button>input[type=radio]{position:absolute;top:0;left:0;width:0;height:0;opacity:0}.arco-radio>input[type=radio]:focus+.arco-radio-icon-hover:before,.arco-radio-button>input[type=radio]:focus+.arco-radio-icon-hover:before{background-color:var(--color-fill-2)}.arco-icon-hover.arco-radio-icon-hover:before{width:24px;height:24px}.arco-radio{position:relative;display:inline-flex;align-items:center;padding-left:5px;font-size:14px;line-height:unset;cursor:pointer}.arco-radio-label{margin-left:8px;color:var(--color-text-1)}.arco-radio-icon{position:relative;display:block;box-sizing:border-box;width:14px;height:14px;line-height:14px;border:2px solid var(--color-neutral-3);border-radius:var(--border-radius-circle)}.arco-radio-icon:after{position:absolute;top:0;left:0;display:inline-block;box-sizing:border-box;width:10px;height:10px;background-color:var(--color-bg-2);border-radius:var(--border-radius-circle);transform:scale(1);transition:transform .3s cubic-bezier(.3,1.3,.3,1);content:""}.arco-radio:hover .arco-radio-icon{border-color:var(--color-neutral-3)}.arco-radio-checked .arco-radio-icon{background-color:rgb(var(--primary-6));border-color:rgb(var(--primary-6))}.arco-radio-checked .arco-radio-icon:after{background-color:var(--color-white);transform:scale(.4)}.arco-radio-checked:hover .arco-radio-icon{border-color:rgb(var(--primary-6))}.arco-radio-disabled,.arco-radio-disabled .arco-radio-icon-hover{cursor:not-allowed}.arco-radio-disabled .arco-radio-label{color:var(--color-text-4)}.arco-radio-disabled .arco-radio-icon{border-color:var(--color-neutral-3)}.arco-radio-disabled .arco-radio-icon:after{background-color:var(--color-fill-2)}.arco-radio-disabled:hover .arco-radio-icon{border-color:var(--color-neutral-3)}.arco-radio-checked.arco-radio-disabled .arco-radio-icon,.arco-radio-checked.arco-radio-disabled:hover .arco-radio-icon{background-color:var(--color-primary-light-3);border-color:transparent}.arco-radio-checked.arco-radio-disabled .arco-radio-icon:after{background-color:var(--color-fill-2)}.arco-radio-checked.arco-radio-disabled .arco-radio-label{color:var(--color-text-4)}.arco-radio:hover .arco-radio-icon-hover:before{background-color:var(--color-fill-2)}.arco-radio-group{display:inline-block;box-sizing:border-box}.arco-radio-group .arco-radio{margin-right:20px}.arco-radio-group-button{display:inline-flex;padding:1.5px;line-height:26px;background-color:var(--color-fill-2);border-radius:var(--border-radius-small)}.arco-radio-button{position:relative;display:inline-block;margin:1.5px;color:var(--color-text-2);font-size:14px;line-height:26px;background-color:transparent;border-radius:var(--border-radius-small);cursor:pointer;transition:all .1s cubic-bezier(0,0,1,1)}.arco-radio-button-content{position:relative;display:block;padding:0 12px}.arco-radio-button:not(:first-of-type):before{position:absolute;top:50%;left:-2px;display:block;width:1px;height:14px;background-color:var(--color-neutral-3);transform:translateY(-50%);transition:all .1s cubic-bezier(0,0,1,1);content:""}.arco-radio-button:hover:before,.arco-radio-button:hover+.arco-radio-button:before,.arco-radio-button.arco-radio-checked:before,.arco-radio-button.arco-radio-checked+.arco-radio-button:before{opacity:0}.arco-radio-button:hover{color:var(--color-text-1);background-color:var(--color-bg-5)}.arco-radio-button.arco-radio-checked{color:rgb(var(--primary-6));background-color:var(--color-bg-5)}.arco-radio-button.arco-radio-disabled{color:var(--color-text-4);background-color:transparent;cursor:not-allowed}.arco-radio-button.arco-radio-disabled.arco-radio-checked{color:var(--color-primary-light-3);background-color:var(--color-bg-5)}.arco-radio-group-size-small{line-height:28px}.arco-radio-group-size-small.arco-radio-group-button,.arco-radio-group-size-small .arco-radio-button{font-size:14px;line-height:22px}.arco-radio-group-size-large{line-height:36px}.arco-radio-group-size-large.arco-radio-group-button,.arco-radio-group-size-large .arco-radio-button{font-size:14px;line-height:30px}.arco-radio-group-size-mini{line-height:24px}.arco-radio-group-size-mini.arco-radio-group-button,.arco-radio-group-size-mini .arco-radio-button{font-size:12px;line-height:18px}.arco-radio-group-direction-vertical .arco-radio{display:flex;margin-right:0;line-height:32px}body[arco-theme=dark] .arco-radio-button.arco-radio-checked,body[arco-theme=dark] .arco-radio-button:not(.arco-radio-disabled):hover{background-color:var(--color-fill-3)}body[arco-theme=dark] .arco-radio-button:after{background-color:var(--color-bg-3)}.arco-rate{display:inline-flex;align-items:center;min-height:32px;font-size:24px;line-height:1;user-select:none}.arco-rate-disabled{cursor:not-allowed}.arco-rate-character{position:relative;color:var(--color-fill-3);transition:transform .2s cubic-bezier(.34,.69,.1,1)}.arco-rate-character:not(:last-child){margin-right:8px}.arco-rate-character-left,.arco-rate-character-right{transition:inherit}.arco-rate-character-left>*,.arco-rate-character-right>*{float:left}.arco-rate-character-left{position:absolute;top:0;left:0;width:50%;overflow:hidden;white-space:nowrap;opacity:0}.arco-rate-character-scale{animation:arco-rate-scale .4s cubic-bezier(.34,.69,.1,1)}.arco-rate-character-full .arco-rate-character-right{color:rgb(var(--gold-6))}.arco-rate-character-half .arco-rate-character-left{color:rgb(var(--gold-6));opacity:1}.arco-rate-character-disabled{cursor:not-allowed}.arco-rate:not(.arco-rate-readonly):not(.arco-rate-disabled) .arco-rate-character{cursor:pointer}.arco-rate:not(.arco-rate-readonly):not(.arco-rate-disabled) .arco-rate-character:hover,.arco-rate:not(.arco-rate-readonly):not(.arco-rate-disabled) .arco-rate-character:focus{transform:scale(1.2)}@keyframes arco-rate-scale{0%{transform:scale(1)}50%{transform:scale(1.2)}to{transform:scale(1)}}.arco-resizebox{position:relative;width:100%;overflow:hidden}.arco-resizebox-direction-left,.arco-resizebox-direction-right,.arco-resizebox-direction-top,.arco-resizebox-direction-bottom{position:absolute;top:0;left:0;box-sizing:border-box;user-select:none}.arco-resizebox-direction-right{right:0;left:unset}.arco-resizebox-direction-bottom{top:unset;bottom:0}.arco-resizebox-trigger-icon-wrapper{display:flex;align-items:center;justify-content:center;height:100%;color:var(--color-text-1);font-size:12px;line-height:1;background-color:var(--color-neutral-3)}.arco-resizebox-trigger-icon{display:inline-block;margin:-3px}.arco-resizebox-trigger-vertical{height:100%;cursor:col-resize}.arco-resizebox-trigger-horizontal{width:100%;cursor:row-resize}.arco-result{box-sizing:border-box;width:100%;padding:32px 32px 24px}.arco-result-icon{margin-bottom:16px;font-size:20px;text-align:center}.arco-result-icon-tip{display:flex;width:45px;height:45px;align-items:center;justify-content:center;border-radius:50%;margin:0 auto}.arco-result-icon-custom .arco-result-icon-tip{font-size:45px;color:inherit;width:unset;height:unset}.arco-result-icon-success .arco-result-icon-tip{color:rgb(var(--success-6));background-color:var(--color-success-light-1)}.arco-result-icon-error .arco-result-icon-tip{color:rgb(var(--danger-6));background-color:var(--color-danger-light-1)}.arco-result-icon-info .arco-result-icon-tip{color:rgb(var(--primary-6));background-color:var(--color-primary-light-1)}.arco-result-icon-warning .arco-result-icon-tip{color:rgb(var(--warning-6));background-color:var(--color-warning-light-1)}.arco-result-icon-404,.arco-result-icon-403,.arco-result-icon-500{padding-top:24px}.arco-result-icon-404 .arco-result-icon-tip,.arco-result-icon-403 .arco-result-icon-tip,.arco-result-icon-500 .arco-result-icon-tip{width:92px;height:92px;line-height:92px}.arco-result-title{color:var(--color-text-1);font-weight:500;font-size:14px;line-height:1.5715;text-align:center}.arco-result-subtitle{color:var(--color-text-2);font-size:14px;line-height:1.5715;text-align:center}.arco-result-extra{margin-top:20px;text-align:center}.arco-result-content{margin-top:20px}.arco-scrollbar{position:relative}.arco-scrollbar-container{position:relative;scrollbar-width:none}.arco-scrollbar-container::-webkit-scrollbar{display:none}.arco-scrollbar-track{position:absolute;z-index:100}.arco-scrollbar-track-direction-horizontal{bottom:0;left:0;box-sizing:border-box;width:100%;height:15px}.arco-scrollbar-track-direction-vertical{top:0;right:0;box-sizing:border-box;width:15px;height:100%}.arco-scrollbar-thumb{position:absolute;display:block;box-sizing:border-box}.arco-scrollbar-thumb-bar{width:100%;height:100%;background-color:var(--color-neutral-4);border-radius:6px}.arco-scrollbar-thumb:hover .arco-scrollbar-thumb-bar,.arco-scrollbar-thumb-dragging .arco-scrollbar-thumb-bar{background-color:var(--color-neutral-6)}.arco-scrollbar-thumb-direction-horizontal .arco-scrollbar-thumb-bar{height:9px;margin:3px 0}.arco-scrollbar-thumb-direction-vertical .arco-scrollbar-thumb-bar{width:9px;margin:0 3px}.arco-scrollbar.arco-scrollbar-type-embed .arco-scrollbar-thumb{opacity:0;transition:opacity ease .2s}.arco-scrollbar.arco-scrollbar-type-embed .arco-scrollbar-thumb-dragging,.arco-scrollbar.arco-scrollbar-type-embed:hover .arco-scrollbar-thumb{opacity:.8}.arco-scrollbar.arco-scrollbar-type-track .arco-scrollbar-track{background-color:var(--color-neutral-1)}.arco-scrollbar.arco-scrollbar-type-track .arco-scrollbar-track-direction-horizontal{border-top:1px solid var(--color-neutral-3);border-bottom:1px solid var(--color-neutral-3)}.arco-scrollbar.arco-scrollbar-type-track .arco-scrollbar-track-direction-vertical{border-right:1px solid var(--color-neutral-3);border-left:1px solid var(--color-neutral-3)}.arco-scrollbar.arco-scrollbar-type-track .arco-scrollbar-thumb-direction-horizontal{margin:-1px 0}.arco-scrollbar.arco-scrollbar-type-track .arco-scrollbar-thumb-direction-vertical{margin:0 -1px}.arco-scrollbar.arco-scrollbar-type-track.arco-scrollbar-both .arco-scrollbar-track-direction-vertical:after{position:absolute;right:-1px;bottom:0;display:block;box-sizing:border-box;width:15px;height:15px;background-color:var(--color-neutral-1);border-right:1px solid var(--color-neutral-3);border-bottom:1px solid var(--color-neutral-3);content:""}.arco-select-dropdown{box-sizing:border-box;padding:4px 0;background-color:var(--color-bg-popup);border:1px solid var(--color-fill-3);border-radius:var(--border-radius-medium);box-shadow:0 4px 10px #0000001a}.arco-select-dropdown .arco-select-dropdown-loading{display:flex;align-items:center;justify-content:center;min-height:50px}.arco-select-dropdown-list{margin-top:0;margin-bottom:0;padding-left:0;list-style:none}.arco-select-dropdown-list-wrapper{max-height:200px;overflow-y:auto}.arco-select-dropdown .arco-select-option{position:relative;z-index:1;display:flex;align-items:center;box-sizing:border-box;width:100%;padding:0 12px;color:var(--color-text-1);font-size:14px;line-height:36px;text-align:left;background-color:var(--color-bg-popup);cursor:pointer}.arco-select-dropdown .arco-select-option-content{overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-select-dropdown .arco-select-option-checkbox{overflow:hidden}.arco-select-dropdown .arco-select-option-checkbox .arco-checkbox-label{overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-select-dropdown .arco-select-option-has-suffix{justify-content:space-between}.arco-select-dropdown .arco-select-option-active,.arco-select-dropdown .arco-select-option:not(.arco-select-dropdown .arco-select-option-disabled):hover{color:var(--color-text-1);background-color:var(--color-fill-2);transition:all .1s cubic-bezier(0,0,1,1)}.arco-select-dropdown .arco-select-option-disabled{color:var(--color-text-4);background-color:var(--color-bg-popup);cursor:not-allowed}.arco-select-dropdown .arco-select-option-icon{display:inline-flex;margin-right:8px}.arco-select-dropdown .arco-select-option-suffix{margin-left:12px}.arco-select-dropdown .arco-select-group:first-child .arco-select-dropdown .arco-select-group-title{margin-top:8px}.arco-select-dropdown .arco-select-group-title{box-sizing:border-box;width:100%;margin-top:8px;padding:0 12px;color:var(--color-text-3);font-size:12px;line-height:20px;cursor:default;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-select-dropdown.arco-select-dropdown-has-header{padding-top:0}.arco-select-dropdown-header{border-bottom:1px solid var(--color-fill-3)}.arco-select-dropdown.arco-select-dropdown-has-footer{padding-bottom:0}.arco-select-dropdown-footer{border-top:1px solid var(--color-fill-3)}.arco-skeleton-shape{width:48px;height:48px;background-color:var(--color-fill-2);border-radius:var(--border-radius-small)}.arco-skeleton-shape-circle{border-radius:50%}.arco-skeleton-shape-small{width:36px;height:36px}.arco-skeleton-shape-large{width:60px;height:60px}.arco-skeleton-line{margin:0;padding:0;list-style:none}.arco-skeleton-line-row{height:16px;background-color:var(--color-fill-2)}.arco-skeleton-line-row:not(:last-child){margin-bottom:16px}.arco-skeleton-animation .arco-skeleton-shape,.arco-skeleton-animation .arco-skeleton-line-row{background:linear-gradient(90deg,var(--color-fill-2) 25%,var(--color-fill-3) 37%,var(--color-fill-2) 63%);background-size:400% 100%;animation:arco-skeleton-circle 1.5s cubic-bezier(0,0,1,1) infinite}@keyframes arco-skeleton-circle{0%{background-position:100% 50%}to{background-position:0 50%}}.arco-slider{display:inline-flex;align-items:center;width:100%}.arco-slider-vertical{display:inline-block;width:auto;min-width:22px;height:auto}.arco-slider-vertical .arco-slider-wrapper{flex-direction:column}.arco-slider-with-marks{margin-bottom:24px;padding:20px}.arco-slider-vertical.arco-slider-with-marks{margin-bottom:0;padding:0}.arco-slider-track{position:relative;flex:1;width:100%;height:12px;cursor:pointer}.arco-slider-track:before{position:absolute;top:50%;display:block;width:100%;height:2px;background-color:var(--color-fill-3);border-radius:2px;transform:translateY(-50%);content:""}.arco-slider-track.arco-slider-track-vertical{width:12px;max-width:12px;height:100%;min-height:200px;margin-right:0;margin-bottom:6px;margin-top:6px;transform:translateY(0)}.arco-slider-track.arco-slider-track-vertical:before{top:unset;left:50%;width:2px;height:100%;transform:translate(-50%)}.arco-slider-track.arco-slider-track-disabled:before{background-color:var(--color-fill-2)}.arco-slider-track.arco-slider-track-disabled .arco-slider-bar{background-color:var(--color-fill-3)}.arco-slider-track.arco-slider-track-disabled .arco-slider-btn{cursor:not-allowed}.arco-slider-track.arco-slider-track-disabled .arco-slider-btn:after{border-color:var(--color-fill-3)}.arco-slider-track.arco-slider-track-disabled .arco-slider-dots .arco-slider-dot{border-color:var(--color-fill-2)}.arco-slider-track.arco-slider-track-disabled .arco-slider-dots .arco-slider-dot-active{border-color:var(--color-fill-3)}.arco-slider-track.arco-slider-track-disabled .arco-slider-ticks .arco-slider-tick{background:var(--color-fill-2)}.arco-slider-track.arco-slider-track-disabled .arco-slider-ticks .arco-slider-tick-active{background:var(--color-fill-3)}.arco-slider-bar{position:absolute;top:50%;height:2px;background-color:rgb(var(--primary-6));border-radius:2px;transform:translateY(-50%)}.arco-slider-track-vertical .arco-slider-bar{top:unset;left:50%;width:2px;height:unset;transform:translate(-50%)}.arco-slider-btn{position:absolute;top:0;left:0;width:12px;height:12px;transform:translate(-50%)}.arco-slider-btn:after{position:absolute;top:0;left:0;display:inline-block;box-sizing:border-box;width:12px;height:12px;background:var(--color-bg-2);border:2px solid rgb(var(--primary-6));border-radius:50%;transition:all .3s cubic-bezier(.3,1.3,.3,1);content:""}.arco-slider-btn.arco-slider-btn-active:after,.arco-slider-btn:hover:after{box-shadow:0 2px 5px #0000001a;transform:scale(1.16666667)}.arco-slider-track-vertical .arco-slider-btn{top:unset;bottom:0;left:0;transform:translateY(50%)}.arco-slider-marks{position:absolute;top:12px;width:100%}.arco-slider-marks .arco-slider-mark{position:absolute;color:var(--color-text-3);font-size:14px;line-height:1;transform:translate(-50%);cursor:pointer}.arco-slider-track-vertical .arco-slider-marks{top:0;left:15px;height:100%}.arco-slider-track-vertical .arco-slider-marks .arco-slider-mark{transform:translateY(50%)}.arco-slider-dots{height:100%}.arco-slider-dots .arco-slider-dot-wrapper{position:absolute;top:50%;font-size:12px;transform:translate(-50%,-50%)}.arco-slider-track-vertical .arco-slider-dots .arco-slider-dot-wrapper{top:unset;left:50%;transform:translate(-50%,50%)}.arco-slider-dots .arco-slider-dot-wrapper .arco-slider-dot{box-sizing:border-box;width:8px;height:8px;background-color:var(--color-bg-2);border:2px solid var(--color-fill-3);border-radius:50%}.arco-slider-dots .arco-slider-dot-wrapper .arco-slider-dot-active{border-color:rgb(var(--primary-6))}.arco-slider-ticks .arco-slider-tick{position:absolute;top:50%;width:1px;height:3px;margin-top:-1px;background:var(--color-fill-3);transform:translate(-50%,-100%)}.arco-slider-ticks .arco-slider-tick-active{background:rgb(var(--primary-6))}.arco-slider-vertical .arco-slider-ticks .arco-slider-tick{top:unset;left:50%;width:3px;height:1px;margin-top:unset;transform:translate(1px,50%)}.arco-slider-input{display:flex;align-items:center;margin-left:20px}.arco-slider-vertical .arco-slider-input{margin-left:0}.arco-slider-input>.arco-input-number{width:60px;height:32px;overflow:visible;line-height:normal}.arco-slider-input>.arco-input-number input{text-align:center}.arco-slider-input-hyphens{margin:0 6px;width:8px;height:2px;background:rgb(var(--gray-6))}.arco-space{display:inline-flex}.arco-space-horizontal .arco-space-item{display:flex;align-items:center}.arco-space-vertical{flex-direction:column}.arco-space-align-baseline{align-items:baseline}.arco-space-align-start{align-items:flex-start}.arco-space-align-end{align-items:flex-end}.arco-space-align-center{align-items:center}.arco-space-wrap{flex-wrap:wrap}.arco-space-fill{display:flex}.arco-dot-loading{position:relative;display:inline-block;width:56px;height:8px;transform-style:preserve-3d;perspective:200px}.arco-dot-loading-item{position:absolute;top:0;left:50%;width:8px;height:8px;background-color:rgb(var(--primary-6));border-radius:var(--border-radius-circle);transform:translate(-50%) scale(0);animation:arco-dot-loading 2s cubic-bezier(0,0,1,1) infinite forwards}.arco-dot-loading-item:nth-child(2){background-color:rgb(var(--primary-5));animation-delay:.4s}.arco-dot-loading-item:nth-child(3){background-color:rgb(var(--primary-4));animation-delay:.8s}.arco-dot-loading-item:nth-child(4){background-color:rgb(var(--primary-4));animation-delay:1.2s}.arco-dot-loading-item:nth-child(5){background-color:rgb(var(--primary-2));animation-delay:1.6s}@keyframes arco-dot-loading{0%{transform:translate3D(-48.621%,0,-.985px) scale(.511)}2.778%{transform:translate3D(-95.766%,0,-.94px) scale(.545)}5.556%{transform:translate3D(-140%,0,-.866px) scale(.6)}8.333%{transform:translate3D(-179.981%,0,-.766px) scale(.675)}11.111%{transform:translate3D(-214.492%,0,-.643px) scale(.768)}13.889%{transform:translate3D(-242.487%,0,-.5px) scale(.875)}16.667%{transform:translate3D(-263.114%,0,-.342px) scale(.993)}19.444%{transform:translate3D(-275.746%,0,-.174px) scale(1.12)}22.222%{transform:translate3D(-280%,0,0) scale(1.25)}25%{transform:translate3D(-275.746%,0,.174px) scale(1.38)}27.778%{transform:translate3D(-263.114%,0,.342px) scale(1.507)}30.556%{transform:translate3D(-242.487%,0,.5px) scale(1.625)}33.333%{transform:translate3D(-214.492%,0,.643px) scale(1.732)}36.111%{transform:translate3D(-179.981%,0,.766px) scale(1.825)}38.889%{transform:translate3D(-140%,0,.866px) scale(1.9)}41.667%{transform:translate3D(-95.766%,0,.94px) scale(1.955)}44.444%{transform:translate3D(-48.621%,0,.985px) scale(1.989)}47.222%{transform:translateZ(1px) scale(2)}50%{transform:translate3D(48.621%,0,.985px) scale(1.989)}52.778%{transform:translate3D(95.766%,0,.94px) scale(1.955)}55.556%{transform:translate3D(140%,0,.866px) scale(1.9)}58.333%{transform:translate3D(179.981%,0,.766px) scale(1.825)}61.111%{transform:translate3D(214.492%,0,.643px) scale(1.732)}63.889%{transform:translate3D(242.487%,0,.5px) scale(1.625)}66.667%{transform:translate3D(263.114%,0,.342px) scale(1.507)}69.444%{transform:translate3D(275.746%,0,.174px) scale(1.38)}72.222%{transform:translate3D(280%,0,0) scale(1.25)}75%{transform:translate3D(275.746%,0,-.174px) scale(1.12)}77.778%{transform:translate3D(263.114%,0,-.342px) scale(.993)}80.556%{transform:translate3D(242.487%,0,-.5px) scale(.875)}83.333%{transform:translate3D(214.492%,0,-.643px) scale(.768)}86.111%{transform:translate3D(179.981%,0,-.766px) scale(.675)}88.889%{transform:translate3D(140%,0,-.866px) scale(.6)}91.667%{transform:translate3D(95.766%,0,-.94px) scale(.545)}94.444%{transform:translate3D(48.621%,0,-.985px) scale(.511)}97.222%{transform:translateZ(-1px) scale(.5)}}.arco-spin{display:inline-block}.arco-spin-with-tip{text-align:center}.arco-spin-icon{color:rgb(var(--primary-6));font-size:20px}.arco-spin-tip{margin-top:6px;color:rgb(var(--primary-6));font-weight:500;font-size:14px}.arco-spin-mask{position:absolute;top:0;right:0;bottom:0;left:0;z-index:11;text-align:center;background-color:var(--color-spin-layer-bg);transition:opacity .1s cubic-bezier(0,0,1,1);user-select:none}.arco-spin-loading{position:relative;user-select:none}.arco-spin-loading .arco-spin-mask-icon{position:absolute;top:50%;left:50%;z-index:12;transform:translate(-50%,-50%)}.arco-spin-loading .arco-spin-children:after{opacity:1;pointer-events:auto}.arco-split{display:flex}.arco-split-pane{overflow:auto}.arco-split-pane-second{flex:1}.arco-split-horizontal{flex-direction:row}.arco-split-vertical{flex-direction:column}.arco-split-trigger-icon-wrapper{display:flex;align-items:center;justify-content:center;height:100%;color:var(--color-text-1);font-size:12px;line-height:1;background-color:var(--color-neutral-3)}.arco-split-trigger-icon{display:inline-block;margin:-3px}.arco-split-trigger-vertical{height:100%;cursor:col-resize}.arco-split-trigger-horizontal{width:100%;cursor:row-resize}.arco-statistic{display:inline-block;color:var(--color-text-2);line-height:1.5715}.arco-statistic-title{margin-bottom:8px;font-size:14px;color:var(--color-text-2)}.arco-statistic-content .arco-statistic-value{color:var(--color-text-1);font-weight:500;font-size:26px;white-space:nowrap}.arco-statistic-content .arco-statistic-value-integer{font-size:26px;white-space:nowrap}.arco-statistic-content .arco-statistic-value-decimal{display:inline-block;font-size:26px}.arco-statistic-prefix,.arco-statistic-suffix{font-size:14px}.arco-statistic-extra{margin-top:8px;color:var(--color-text-2)}.arco-steps-item{position:relative;flex:1;margin-right:12px;overflow:hidden;white-space:nowrap;text-align:left}.arco-steps-item:last-child{flex:none;margin-right:0}.arco-steps-item-active .arco-steps-item-title{font-weight:500}.arco-steps-item-node{display:inline-block;margin-right:12px;font-weight:500;font-size:16px;vertical-align:top}.arco-steps-icon{box-sizing:border-box;width:28px;height:28px;line-height:26px;text-align:center;border-radius:var(--border-radius-circle);font-size:16px}.arco-steps-item-wait .arco-steps-icon{color:var(--color-text-2);background-color:var(--color-fill-2);border:1px solid transparent}.arco-steps-item-process .arco-steps-icon{color:var(--color-white);background-color:rgb(var(--primary-6));border:1px solid transparent}.arco-steps-item-finish .arco-steps-icon{color:rgb(var(--primary-6));background-color:var(--color-primary-light-1);border:1px solid transparent}.arco-steps-item-error .arco-steps-icon{color:var(--color-white);background-color:rgb(var(--danger-6));border:1px solid transparent}.arco-steps-item-title{position:relative;display:inline-block;padding-right:12px;color:var(--color-text-2);font-size:16px;line-height:28px;white-space:nowrap}.arco-steps-item-wait .arco-steps-item-title{color:var(--color-text-2)}.arco-steps-item-process .arco-steps-item-title,.arco-steps-item-finish .arco-steps-item-title,.arco-steps-item-error .arco-steps-item-title{color:var(--color-text-1)}.arco-steps-item-content{display:inline-block}.arco-steps-item-description{max-width:140px;margin-top:2px;color:var(--color-text-3);font-size:12px;white-space:normal}.arco-steps-item-wait .arco-steps-item-description,.arco-steps-item-process .arco-steps-item-description,.arco-steps-item-finish .arco-steps-item-description,.arco-steps-item-error .arco-steps-item-description{color:var(--color-text-3)}.arco-steps-label-horizontal .arco-steps-item:not(:last-child) .arco-steps-item-title:after{position:absolute;top:13.5px;left:100%;display:block;box-sizing:border-box;width:5000px;height:1px;background-color:var(--color-neutral-3);content:""}.arco-steps-label-horizontal .arco-steps-item.arco-steps-item-process .arco-steps-item-title:after{background-color:var(--color-neutral-3)}.arco-steps-label-horizontal .arco-steps-item.arco-steps-item-finish .arco-steps-item-title:after{background-color:rgb(var(--primary-6))}.arco-steps-label-horizontal .arco-steps-item.arco-steps-item-next-error .arco-steps-item-title:after{background-color:rgb(var(--danger-6))}.arco-steps-item:not(:last-child) .arco-steps-item-tail{position:absolute;top:13.5px;box-sizing:border-box;width:100%;height:1px}.arco-steps-item:not(:last-child) .arco-steps-item-tail:after{display:block;width:100%;height:100%;background-color:var(--color-neutral-3);content:""}.arco-steps-vertical .arco-steps-item:not(:last-child) .arco-steps-item-tail{position:absolute;top:0;left:13.5px;box-sizing:border-box;width:1px;height:100%;padding:34px 0 6px}.arco-steps-vertical .arco-steps-item:not(:last-child) .arco-steps-item-tail:after{display:block;width:100%;height:100%;background-color:var(--color-neutral-3);content:""}.arco-steps-size-small.arco-steps-vertical .arco-steps-item:not(:last-child) .arco-steps-item-tail{left:11.5px;padding:30px 0 6px}.arco-steps-item:not(:last-child).arco-steps-item-finish .arco-steps-item-tail:after{background-color:rgb(var(--primary-6))}.arco-steps-item:not(:last-child).arco-steps-item-next-error .arco-steps-item-tail:after{background-color:rgb(var(--danger-6))}.arco-steps-size-small:not(.arco-steps-vertical) .arco-steps-item:not(:last-child) .arco-steps-item-tail{top:11.5px}.arco-steps-size-small .arco-steps-item-node{font-size:14px}.arco-steps-size-small .arco-steps-item-title{font-size:14px;line-height:24px}.arco-steps-size-small .arco-steps-item-description{font-size:12px}.arco-steps-size-small .arco-steps-icon{width:24px;height:24px;font-size:14px;line-height:22px}.arco-steps-size-small.arco-steps-label-horizontal .arco-steps-item:not(:last-child) .arco-steps-item-title:after{top:11.5px}.arco-steps-label-vertical .arco-steps-item{overflow:visible}.arco-steps-label-vertical .arco-steps-item-title{margin-top:2px;padding-right:0}.arco-steps-label-vertical .arco-steps-item-node{margin-left:56px}.arco-steps-label-vertical .arco-steps-item-tail{left:96px;padding-right:40px}.arco-steps-label-vertical.arco-steps-size-small .arco-steps-item-node{margin-left:58px}.arco-steps-label-vertical.arco-steps-size-small .arco-steps-item-tail{left:94px;padding-right:36px}.arco-steps-mode-dot .arco-steps-item{position:relative;flex:1;margin-right:16px;overflow:visible;white-space:nowrap;text-align:left}.arco-steps-mode-dot .arco-steps-item:last-child{flex:none;margin-right:0}.arco-steps-mode-dot .arco-steps-item-active .arco-steps-item-title{font-weight:500}.arco-steps-mode-dot .arco-steps-item-node{display:inline-block;box-sizing:border-box;width:8px;height:8px;vertical-align:top;border-radius:var(--border-radius-circle)}.arco-steps-mode-dot .arco-steps-item-active .arco-steps-item-node{width:10px;height:10px}.arco-steps-mode-dot .arco-steps-item-wait .arco-steps-item-node{background-color:var(--color-fill-4);border-color:var(--color-fill-4)}.arco-steps-mode-dot .arco-steps-item-process .arco-steps-item-node,.arco-steps-mode-dot .arco-steps-item-finish .arco-steps-item-node{background-color:rgb(var(--primary-6));border-color:rgb(var(--primary-6))}.arco-steps-mode-dot .arco-steps-item-error .arco-steps-item-node{background-color:rgb(var(--danger-6));border-color:rgb(var(--danger-6))}.arco-steps-mode-dot.arco-steps-horizontal .arco-steps-item-node{margin-left:66px}.arco-steps-mode-dot.arco-steps-horizontal .arco-steps-item-active .arco-steps-item-node{margin-top:-1px;margin-left:65px}.arco-steps-mode-dot .arco-steps-item-content{display:inline-block}.arco-steps-mode-dot .arco-steps-item-title{position:relative;display:inline-block;margin-top:4px;font-size:16px}.arco-steps-mode-dot .arco-steps-item-wait .arco-steps-item-title{color:var(--color-text-2)}.arco-steps-mode-dot .arco-steps-item-process .arco-steps-item-title,.arco-steps-mode-dot .arco-steps-item-finish .arco-steps-item-title,.arco-steps-mode-dot .arco-steps-item-error .arco-steps-item-title{color:var(--color-text-1)}.arco-steps-mode-dot .arco-steps-item-description{margin-top:4px;font-size:12px;white-space:normal}.arco-steps-mode-dot .arco-steps-item-wait .arco-steps-item-description,.arco-steps-mode-dot .arco-steps-item-process .arco-steps-item-description,.arco-steps-mode-dot .arco-steps-item-finish .arco-steps-item-description,.arco-steps-mode-dot .arco-steps-item-error .arco-steps-item-description{color:var(--color-text-3)}.arco-steps-mode-dot .arco-steps-item:not(:last-child) .arco-steps-item-tail{position:absolute;top:3.5px;left:78px;box-sizing:border-box;width:100%;height:1px;background-color:var(--color-neutral-3)}.arco-steps-mode-dot .arco-steps-item:not(:last-child).arco-steps-item-process .arco-steps-item-tail{background-color:var(--color-neutral-3)}.arco-steps-mode-dot .arco-steps-item:not(:last-child).arco-steps-item-finish .arco-steps-item-tail{background-color:rgb(var(--primary-6))}.arco-steps-mode-dot .arco-steps-item:not(:last-child).arco-steps-item-next-error .arco-steps-item-tail{background-color:rgb(var(--danger-6))}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item-node{margin-right:16px}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item-content{overflow:hidden}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item-title{margin-top:-2px}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item-description{margin-top:4px}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item:not(:last-child) .arco-steps-item-tail{position:absolute;bottom:0;left:4px;box-sizing:border-box;width:1px;height:100%;padding-top:16px;padding-bottom:2px;background-color:transparent;transform:translate(-50%)}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item:not(:last-child) .arco-steps-item-tail:after{display:block;width:100%;height:100%;background-color:var(--color-neutral-3);content:""}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item:not(:last-child).arco-steps-item-process .arco-steps-item-tail:after{background-color:var(--color-neutral-3)}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item:not(:last-child).arco-steps-item-finish .arco-steps-item-tail:after{background-color:rgb(var(--primary-6))}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item:not(:last-child).arco-steps-item-next-error .arco-steps-item-tail:after{background-color:rgb(var(--danger-6))}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item .arco-steps-item-node{margin-top:8px}.arco-steps-mode-dot.arco-steps-vertical .arco-steps-item-active .arco-steps-item-node{margin-top:6px;margin-left:-1px}.arco-steps-mode-arrow .arco-steps-item{position:relative;display:flex;flex:1;align-items:center;height:72px;overflow:visible;white-space:nowrap}.arco-steps-mode-arrow .arco-steps-item:not(:last-child){margin-right:4px}.arco-steps-mode-arrow .arco-steps-item-wait{background-color:var(--color-fill-1)}.arco-steps-mode-arrow .arco-steps-item-process{background-color:rgb(var(--primary-6))}.arco-steps-mode-arrow .arco-steps-item-finish{background-color:var(--color-primary-light-1)}.arco-steps-mode-arrow .arco-steps-item-error{background-color:rgb(var(--danger-6))}.arco-steps-mode-arrow .arco-steps-item-content{display:inline-block;box-sizing:border-box}.arco-steps-mode-arrow .arco-steps-item:first-child .arco-steps-item-content{padding-left:16px}.arco-steps-mode-arrow .arco-steps-item:not(:first-child) .arco-steps-item-content{padding-left:52px}.arco-steps-mode-arrow .arco-steps-item-title{position:relative;display:inline-block;font-size:16px;white-space:nowrap}.arco-steps-mode-arrow .arco-steps-item-title:after{display:none!important}.arco-steps-mode-arrow .arco-steps-item-wait .arco-steps-item-title{color:var(--color-text-2)}.arco-steps-mode-arrow .arco-steps-item-process .arco-steps-item-title{color:var(--color-white)}.arco-steps-mode-arrow .arco-steps-item-finish .arco-steps-item-title{color:var(--color-text-1)}.arco-steps-mode-arrow .arco-steps-item-error .arco-steps-item-title{color:var(--color-white)}.arco-steps-mode-arrow .arco-steps-item-active .arco-steps-item-title{font-weight:500}.arco-steps-mode-arrow .arco-steps-item-description{max-width:none;margin-top:0;font-size:12px;white-space:nowrap}.arco-steps-mode-arrow .arco-steps-item-wait .arco-steps-item-description{color:var(--color-text-3)}.arco-steps-mode-arrow .arco-steps-item-process .arco-steps-item-description{color:var(--color-white)}.arco-steps-mode-arrow .arco-steps-item-finish .arco-steps-item-description{color:var(--color-text-3)}.arco-steps-mode-arrow .arco-steps-item-error .arco-steps-item-description{color:var(--color-white)}.arco-steps-mode-arrow .arco-steps-item:not(:first-child):before{position:absolute;top:0;left:0;z-index:1;display:block;width:0;height:0;border-top:36px solid transparent;border-bottom:36px solid transparent;border-left:36px solid var(--color-bg-2);content:""}.arco-steps-mode-arrow .arco-steps-item:not(:last-child):after{position:absolute;top:0;right:-36px;z-index:2;display:block;clear:both;width:0;height:0;border-top:36px solid transparent;border-bottom:36px solid transparent;content:""}.arco-steps-mode-arrow .arco-steps-item:not(:last-child).arco-steps-item-wait:after{border-left:36px solid var(--color-fill-1)}.arco-steps-mode-arrow .arco-steps-item:not(:last-child).arco-steps-item-process:after{border-left:36px solid rgb(var(--primary-6))}.arco-steps-mode-arrow .arco-steps-item:not(:last-child).arco-steps-item-error:after{border-left:36px solid rgb(var(--danger-6))}.arco-steps-mode-arrow .arco-steps-item:not(:last-child).arco-steps-item-finish:after{border-left:36px solid var(--color-primary-light-1)}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item{height:40px}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item-title{font-size:14px}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item-description{display:none}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item:not(:first-child):before{border-top:20px solid transparent;border-bottom:20px solid transparent;border-left:20px solid var(--color-bg-2)}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item:not(:last-child):after{right:-20px;border-top:20px solid transparent;border-bottom:20px solid transparent;border-left:20px solid var(--color-fill-1)}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item:first-child .arco-steps-item-content{padding-left:20px}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item:not(:first-child) .arco-steps-item-content{padding-left:40px}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item-error:not(:last-child):after{border-left:20px solid rgb(var(--danger-6))}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item:not(:last-child).arco-steps-item-wait:after{border-left:20px solid var(--color-fill-1)}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item:not(:last-child).arco-steps-item-process:after{border-left:20px solid rgb(var(--primary-6))}.arco-steps-mode-arrow.arco-steps-size-small .arco-steps-item:not(:last-child).arco-steps-item-finish:after{border-left:20px solid var(--color-primary-light-1)}.arco-steps-mode-navigation.arco-steps-label-horizontal .arco-steps-item:not(:last-child) .arco-steps-item-title:after{display:none}.arco-steps-mode-navigation .arco-steps-item{padding-left:20px;padding-right:10px;margin-right:32px}.arco-steps-mode-navigation .arco-steps-item:last-child{flex:1}.arco-steps-mode-navigation .arco-steps-item-content{margin-bottom:20px}.arco-steps-mode-navigation .arco-steps-item-description{padding-right:20px}.arco-steps-mode-navigation .arco-steps-item-active:after{content:"";position:absolute;display:block;height:2px;left:0;right:30px;bottom:0;background-color:rgb(var(--primary-6))}.arco-steps-mode-navigation .arco-steps-item-active:last-child:after{width:100%}.arco-steps-mode-navigation .arco-steps-item:not(:last-child) .arco-steps-item-content:after{position:absolute;top:10px;right:30px;display:inline-block;width:6px;height:6px;background-color:var(--color-bg-2);border:2px solid var(--color-text-4);border-bottom:none;border-left:none;-webkit-transform:rotate(45deg);transform:rotate(45deg);content:""}.arco-steps{display:flex}.arco-steps-changeable .arco-steps-item-title,.arco-steps-changeable .arco-steps-item-description{transition:all .1s cubic-bezier(0,0,1,1)}.arco-steps-changeable .arco-steps-item:not(.arco-steps-item-active):not(.arco-steps-item-disabled){cursor:pointer}.arco-steps-changeable .arco-steps-item:not(.arco-steps-item-active):not(.arco-steps-item-disabled):hover .arco-steps-item-content .arco-steps-item-title,.arco-steps-changeable .arco-steps-item:not(.arco-steps-item-active):not(.arco-steps-item-disabled):hover .arco-steps-item-content .arco-steps-item-description{color:rgb(var(--primary-6))}.arco-steps-line-less .arco-steps-item-title:after{display:none!important}.arco-steps-vertical{flex-direction:column}.arco-steps-vertical .arco-steps-item:not(:last-child){min-height:90px}.arco-steps-vertical .arco-steps-item-title:after{display:none!important}.arco-steps-vertical .arco-steps-item-description{max-width:none}.arco-steps-label-vertical .arco-steps-item-content{display:block;width:140px;text-align:center}.arco-steps-label-vertical .arco-steps-item-description{max-width:none}.switch-slide-text-enter-from{left:-100%!important}.switch-slide-text-enter-to{left:8px!important}.switch-slide-text-enter-active{transition:left .2s cubic-bezier(.34,.69,.1,1)}.switch-slide-text-leave-from{left:100%!important}.switch-slide-text-leave-to{left:26px!important}.switch-slide-text-leave-active{transition:left .2s cubic-bezier(.34,.69,.1,1)}.arco-switch{position:relative;box-sizing:border-box;min-width:40px;height:24px;padding:0;overflow:hidden;line-height:24px;vertical-align:middle;background-color:var(--color-fill-4);border:none;border-radius:12px;outline:none;cursor:pointer;transition:background-color .2s cubic-bezier(.34,.69,.1,1)}.arco-switch-handle{position:absolute;top:4px;left:4px;display:flex;align-items:center;justify-content:center;width:16px;height:16px;color:var(--color-neutral-3);font-size:12px;background-color:var(--color-bg-white);border-radius:50%;transition:all .2s cubic-bezier(.34,.69,.1,1)}.arco-switch-checked{background-color:rgb(var(--primary-6))}.arco-switch-checked .arco-switch-handle{left:calc(100% - 20px);color:rgb(var(--primary-6))}.arco-switch[disabled] .arco-switch-handle{color:var(--color-fill-2)}.arco-switch[disabled].arco-switch-checked .arco-switch-handle{color:var(--color-primary-light-3)}.arco-switch-text-holder{margin:0 8px 0 26px;font-size:12px;opacity:0}.arco-switch-text{position:absolute;top:0;left:26px;color:var(--color-white);font-size:12px}.arco-switch-checked .arco-switch-text-holder{margin:0 26px 0 8px}.arco-switch-checked .arco-switch-text{left:8px;color:var(--color-white)}.arco-switch[disabled]{background-color:var(--color-fill-2);cursor:not-allowed}.arco-switch[disabled] .arco-switch-text{color:var(--color-white)}.arco-switch[disabled].arco-switch-checked{background-color:var(--color-primary-light-3)}.arco-switch[disabled].arco-switch-checked .arco-switch-text{color:var(--color-white)}.arco-switch-loading{background-color:var(--color-fill-2)}.arco-switch-loading .arco-switch-handle{color:var(--color-neutral-3)}.arco-switch-loading .arco-switch-text{color:var(--color-white)}.arco-switch-loading.arco-switch-checked{background-color:var(--color-primary-light-3)}.arco-switch-loading.arco-switch-checked .arco-switch-handle{color:var(--color-primary-light-3)}.arco-switch-loading.arco-switch-checked .arco-switch-text{color:var(--color-primary-light-1)}.arco-switch-small{min-width:28px;height:16px;line-height:16px}.arco-switch-small.arco-switch-checked{padding-left:-2px}.arco-switch-small .arco-switch-handle{top:2px;left:2px;width:12px;height:12px;border-radius:8px}.arco-switch-small .arco-switch-handle-icon{position:absolute;top:50%;left:50%;transform:translate(-50%,-50%) scale(.66667)}.arco-switch-small.arco-switch-checked .arco-switch-handle{left:calc(100% - 14px)}.arco-switch-type-round{min-width:40px;border-radius:var(--border-radius-small)}.arco-switch-type-round .arco-switch-handle{border-radius:2px}.arco-switch-type-round.arco-switch-small{min-width:28px;height:16px;line-height:16px;border-radius:2px}.arco-switch-type-round.arco-switch-small .arco-switch-handle{border-radius:1px}.arco-switch-type-line{min-width:36px;overflow:unset;background-color:transparent}.arco-switch-type-line:after{display:block;width:100%;height:6px;background-color:var(--color-fill-4);border-radius:3px;transition:background-color .2s cubic-bezier(.34,.69,.1,1);content:""}.arco-switch-type-line .arco-switch-handle{top:2px;left:0;width:20px;height:20px;background-color:var(--color-bg-white);border-radius:10px;box-shadow:0 1px 3px var(--color-neutral-6)}.arco-switch-type-line.arco-switch-checked{background-color:transparent}.arco-switch-type-line.arco-switch-checked:after{background-color:rgb(var(--primary-6))}.arco-switch-type-line.arco-switch-custom-color{--custom-color: var(--color-fill-4)}.arco-switch-type-line.arco-switch-custom-color:after{background-color:var(--custom-color)}.arco-switch-type-line.arco-switch-custom-color.arco-switch-checked{--custom-color: rgb(var(--primary-6))}.arco-switch-type-line.arco-switch-checked .arco-switch-handle{left:calc(100% - 20px)}.arco-switch-type-line[disabled]{background-color:transparent;cursor:not-allowed}.arco-switch-type-line[disabled]:after{background-color:var(--color-fill-2)}.arco-switch-type-line[disabled].arco-switch-checked{background-color:transparent}.arco-switch-type-line[disabled].arco-switch-checked:after{background-color:var(--color-primary-light-3)}.arco-switch-type-line.arco-switch-loading{background-color:transparent}.arco-switch-type-line.arco-switch-loading:after{background-color:var(--color-fill-2)}.arco-switch-type-line.arco-switch-loading.arco-switch-checked{background-color:transparent}.arco-switch-type-line.arco-switch-loading.arco-switch-checked:after{background-color:var(--color-primary-light-3)}.arco-switch-type-line.arco-switch-small{min-width:28px;height:16px;line-height:16px}.arco-switch-type-line.arco-switch-small.arco-switch-checked{padding-left:0}.arco-switch-type-line.arco-switch-small .arco-switch-handle{top:0px;width:16px;height:16px;border-radius:8px}.arco-switch-type-line.arco-switch-small .arco-switch-handle-icon{transform:translate(-50%,-50%) scale(1)}.arco-switch-type-line.arco-switch-small.arco-switch-checked .arco-switch-handle{left:calc(100% - 16px)}.arco-table-filters-content{box-sizing:border-box;min-width:100px;background:var(--color-bg-5);border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-medium);box-shadow:0 2px 5px #0000001a}.arco-table-filters-list{max-height:200px;padding:4px 0;overflow-y:auto}.arco-table-filters-item{height:32px;padding:0 12px;font-size:14px;line-height:32px}.arco-table-filters-text{width:100%;max-width:160px;height:34px;margin-right:0;padding-left:10px;overflow:hidden;line-height:32px;white-space:nowrap;text-overflow:ellipsis;cursor:pointer}.arco-table-filters-bottom{box-sizing:border-box;height:38px;padding:0 12px;overflow:hidden;line-height:38px;border-top:1px solid var(--color-neutral-3)}.arco-table-filters-bottom>*:not(*:last-child){margin-right:8px}.arco-table{position:relative}.arco-table-column-handle{position:absolute;top:0;right:-4px;z-index:1;width:8px;height:100%;cursor:col-resize}.arco-table .arco-spin{display:flex;flex-direction:column;height:100%}.arco-table>.arco-spin>.arco-spin-children:after{z-index:2}.arco-table-footer{border-radius:0 0 var(--border-radius-medium) var(--border-radius-medium)}.arco-table-scroll-position-right .arco-table-col-fixed-left-last:after,.arco-table-scroll-position-middle .arco-table-col-fixed-left-last:after{box-shadow:inset 6px 0 8px -3px #00000026}.arco-table-scroll-position-left .arco-table-col-fixed-right-first:after,.arco-table-scroll-position-middle .arco-table-col-fixed-right-first:after{box-shadow:inset -6px 0 8px -3px #00000026}.arco-table-layout-fixed .arco-table-element{table-layout:fixed}.arco-table .arco-table-element{width:100%;min-width:100%;margin:0;border-collapse:separate;border-spacing:0}.arco-table-th{position:relative;box-sizing:border-box;color:rgb(var(--gray-10));font-weight:500;line-height:1.5715;text-align:left;background-color:var(--color-neutral-2)}.arco-table-th[colspan]{text-align:center}.arco-table-th-align-right{text-align:right}.arco-table-th-align-right .arco-table-cell-with-sorter{justify-content:flex-end}.arco-table-th-align-center{text-align:center}.arco-table-th-align-center .arco-table-cell-with-sorter{justify-content:center}.arco-table-td{box-sizing:border-box;color:rgb(var(--gray-10));line-height:1.5715;text-align:left;word-break:break-all;background-color:var(--color-bg-2);border-bottom:1px solid var(--color-neutral-3)}.arco-table-td-align-right{text-align:right}.arco-table-td-align-center{text-align:center}.arco-table-td.arco-table-drag-handle{cursor:move}.arco-table-cell{display:flex;align-items:center}.arco-table-cell-align-right{justify-content:flex-end;text-align:right}.arco-table-cell-align-center{justify-content:center;text-align:center}.arco-table-text-ellipsis{overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-table-td-content{display:block;width:100%}.arco-table-th.arco-table-col-sorted{background-color:var(--color-neutral-3)}.arco-table-td.arco-table-col-sorted{background-color:var(--color-fill-1)}.arco-table-col-fixed-left,.arco-table-col-fixed-right{position:sticky;z-index:10}.arco-table-col-fixed-left-last:after,.arco-table-col-fixed-right-first:after{position:absolute;top:0;bottom:-1px;left:0;width:10px;box-shadow:none;transform:translate(-100%);transition:box-shadow .1s cubic-bezier(0,0,1,1);content:"";pointer-events:none}.arco-table-col-fixed-left-last:after{right:0;left:unset;transform:translate(100%)}.arco-table-cell-text-ellipsis{overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-table-editable-row .arco-table-cell-wrap-value{border:1px solid var(--color-white);border-radius:var(--border-radius-medium);cursor:pointer;transition:all .1s cubic-bezier(0,0,1,1)}.arco-table-editable-row:hover .arco-table-cell-wrap-value{border:1px solid var(--color-neutral-3)}.arco-table .arco-table-expand-btn{display:inline-flex;align-items:center;justify-content:center;width:14px;height:14px;padding:0;color:var(--color-text-2);font-size:12px;line-height:14px;background-color:var(--color-neutral-3);border:1px solid transparent;border-radius:2px;outline:none;cursor:pointer;transition:background-color .1s cubic-bezier(0,0,1,1)}.arco-table .arco-table-expand-btn:hover{color:var(--color-text-1);background-color:var(--color-neutral-4);border-color:transparent}.arco-table-cell-expand-icon{display:flex;align-items:center}.arco-table-cell-expand-icon .arco-table-cell-inline-icon{display:inline-flex;margin-right:4px}.arco-table-cell-expand-icon .arco-table-cell-inline-icon .arco-icon-loading{color:rgb(var(--primary-6))}.arco-table-cell-expand-icon-hidden{display:inline-block;width:14px;height:14px;margin-right:4px}.arco-table-tr-expand .arco-table-td{background-color:var(--color-fill-1)}.arco-table-cell-fixed-expand{position:sticky;left:0;box-sizing:border-box}.arco-table-tr-expand .arco-table-td .arco-table .arco-table-container{border:none}.arco-table-tr-expand .arco-table-td .arco-table .arco-table-th{border-bottom:1px solid var(--color-neutral-3)}.arco-table-tr-expand .arco-table-td .arco-table .arco-table-th,.arco-table-tr-expand .arco-table-td .arco-table .arco-table-td{background-color:transparent}.arco-table-tr-expand .arco-table-td .arco-table .arco-table-pagination{margin-bottom:12px}.arco-table-th.arco-table-operation,.arco-table-td.arco-table-operation{text-align:center}.arco-table-th.arco-table-operation .arco-table-cell,.arco-table-td.arco-table-operation .arco-table-cell{display:flex;justify-content:center;padding:0}.arco-table-radio,.arco-table-checkbox{justify-content:center}.arco-table-checkbox .arco-checkbox,.arco-table-radio .arco-radio{padding-left:0}.arco-table-selection-checkbox-col,.arco-table-selection-radio-col,.arco-table-expand-col,.arco-table-drag-handle-col{width:40px;min-width:40px;max-width:40px}.arco-table-th{transition:background-color .1s cubic-bezier(0,0,1,1)}.arco-table-cell-with-sorter{display:flex;align-items:center;cursor:pointer}.arco-table-cell-with-sorter:hover{background-color:rgba(var(--gray-4),.5)}.arco-table-cell-with-filter{display:flex;align-items:center}.arco-table-cell-next-ascend .arco-table-sorter-icon .arco-icon-caret-up,.arco-table-cell-next-descend .arco-table-sorter-icon .arco-icon-caret-down{color:var(--color-neutral-6)}.arco-table-sorter{display:inline-block;margin-left:8px;vertical-align:-3px}.arco-table-sorter.arco-table-sorter-direction-one{vertical-align:0}.arco-table-sorter-icon{position:relative;width:14px;height:8px;overflow:hidden;line-height:8px}.arco-table-sorter-icon .arco-icon-caret-up,.arco-table-sorter-icon .arco-icon-caret-down{position:absolute;top:50%;color:var(--color-neutral-5);font-size:12px;transition:all .1s cubic-bezier(0,0,1,1)}.arco-table-sorter-icon .arco-icon-caret-up{top:-2px;left:1px}.arco-table-sorter-icon .arco-icon-caret-down{top:-3px;left:1px}.arco-table-sorter-icon.arco-table-sorter-icon-active svg{color:rgb(var(--primary-6))}.arco-table-filters{position:absolute;top:0;right:0;display:flex;align-items:center;justify-content:center;width:24px;height:100%;line-height:1;vertical-align:0;background-color:transparent;cursor:pointer;transition:all .1s cubic-bezier(0,0,1,1)}.arco-table-filters:hover,.arco-table-filters-open{background-color:var(--color-neutral-4)}.arco-table-filters svg{color:var(--color-text-2);font-size:16px;transition:all .1s cubic-bezier(0,0,1,1)}.arco-table-filters-active svg{color:rgb(var(--primary-6))}.arco-table-filters-align-left{position:relative;width:auto;margin-left:8px}.arco-table-filters-align-left svg{font-size:12px}.arco-table-filters-align-left:hover,.arco-table-filters-align-left-open{background:none}.arco-table-filters-align-left:hover:before,.arco-table-filters-align-left.arco-table-filters-open:before{background:var(--color-fill-4)}.arco-table-container{position:relative;border-radius:var(--border-radius-medium) var(--border-radius-medium) 0 0}.arco-table-header{flex-shrink:0;border-radius:var(--border-radius-medium) var(--border-radius-medium) 0 0}.arco-table-container{box-sizing:border-box;width:100%;min-height:0}.arco-table-container .arco-table-content{display:flex;flex-direction:column;width:auto;height:100%}.arco-table-container .arco-table-content-scroll-x{overflow-x:auto;overflow-y:hidden}.arco-table-container:before,.arco-table-container:after{position:absolute;z-index:1;width:10px;height:100%;box-shadow:none;transition:box-shadow .1s cubic-bezier(0,0,1,1);content:"";pointer-events:none}.arco-table-container:before{top:0;left:0;border-top-left-radius:var(--border-radius-medium)}.arco-table-container:after{top:0;right:0;border-top-right-radius:var(--border-radius-medium)}.arco-table-container:not(.arco-table-has-fixed-col-left).arco-table-scroll-position-right:before,.arco-table-container:not(.arco-table-has-fixed-col-left).arco-table-scroll-position-middle:before{box-shadow:inset 6px 0 8px -3px #00000026}.arco-table-container:not(.arco-table-has-fixed-col-right).arco-table-scroll-position-left:after,.arco-table-container:not(.arco-table-has-fixed-col-right).arco-table-scroll-position-middle:after{box-shadow:inset -6px 0 8px -3px #00000026}.arco-table-header{overflow-x:hidden;overflow-y:hidden;background-color:var(--color-neutral-2);scrollbar-color:transparent transparent}.arco-table-header-sticky{position:sticky;top:0;z-index:100}.arco-table:not(.arco-table-empty) .arco-table-header::-webkit-scrollbar{height:0;background-color:transparent}.arco-table.arco-table-empty .arco-table-header{overflow-x:auto}.arco-table-body{position:relative;width:100%;min-height:40px;overflow:auto;background-color:var(--color-bg-2)}.arco-table-border .arco-table-container{border-top:1px solid var(--color-neutral-3);border-left:1px solid var(--color-neutral-3)}.arco-table-border .arco-table-scroll-y{border-bottom:1px solid var(--color-neutral-3)}.arco-table-border .arco-table-scroll-y .arco-table-body .arco-table-tr:last-of-type .arco-table-td,.arco-table-border .arco-table-scroll-y tfoot .arco-table-tr:last-of-type .arco-table-td{border-bottom:none}.arco-table-border .arco-table-scroll-y .arco-table-body .arco-table-tr:last-of-type .arco-table-td.arco-table-col-fixed-left-last:after,.arco-table-border .arco-table-scroll-y tfoot .arco-table-tr:last-of-type .arco-table-td.arco-table-col-fixed-left-last:after,.arco-table-border .arco-table-scroll-y .arco-table-body .arco-table-tr:last-of-type .arco-table-td.arco-table-col-fixed-right-first:after,.arco-table-border .arco-table-scroll-y tfoot .arco-table-tr:last-of-type .arco-table-td.arco-table-col-fixed-right-first:after{bottom:0}.arco-table-border .arco-table-tr .arco-table-th{border-bottom:1px solid var(--color-neutral-3)}.arco-table-border .arco-table-footer{border:1px solid var(--color-neutral-3);border-top:0}.arco-table-border:not(.arco-table-border-cell) .arco-table-container{border-right:1px solid var(--color-neutral-3)}.arco-table-border-cell .arco-table-th,.arco-table-border-cell .arco-table-td:not(.arco-table-tr-expand){border-right:1px solid var(--color-neutral-3)}.arco-table-border-cell .arco-table-th-resizing,.arco-table-border-cell .arco-table-td-resizing:not(.arco-table-tr-expand){border-right-color:rgb(var(--primary-6))}.arco-table-border-header-cell .arco-table-th{border-right:1px solid var(--color-neutral-3);border-bottom:1px solid var(--color-neutral-3)}.arco-table-border.arco-table-border-header-cell thead .arco-table-tr:first-child .arco-table-th:last-child{border-right:0}.arco-table-border-body-cell .arco-table-td:not(:last-child):not(.arco-table-tr-expand){border-right:1px solid var(--color-neutral-3)}.arco-table-stripe:not(.arco-table-dragging) .arco-table-tr:not(.arco-table-tr-empty):not(.arco-table-tr-summary):nth-child(even) .arco-table-td:not(.arco-table-col-fixed-left):not(.arco-table-col-fixed-right),.arco-table-stripe .arco-table-tr-drag .arco-table-td:not(.arco-table-col-fixed-left):not(.arco-table-col-fixed-right){background-color:var(--color-fill-1)}.arco-table-stripe:not(.arco-table-dragging) .arco-table-tr:not(.arco-table-tr-empty):not(.arco-table-tr-summary):nth-child(even) .arco-table-td.arco-table-col-fixed-left:before,.arco-table-stripe .arco-table-tr-drag .arco-table-td.arco-table-col-fixed-left:before,.arco-table-stripe:not(.arco-table-dragging) .arco-table-tr:not(.arco-table-tr-empty):not(.arco-table-tr-summary):nth-child(even) .arco-table-td.arco-table-col-fixed-right:before,.arco-table-stripe .arco-table-tr-drag .arco-table-td.arco-table-col-fixed-right:before{position:absolute;top:0;left:0;z-index:-1;width:100%;height:100%;background-color:var(--color-fill-1);content:""}.arco-table .arco-table-tr-draggable{cursor:move}.arco-table-hover:not(.arco-table-dragging) .arco-table-tr:not(.arco-table-tr-empty):not(.arco-table-tr-summary):hover .arco-table-td:not(.arco-table-col-fixed-left):not(.arco-table-col-fixed-right),.arco-table-hover .arco-table-tr-drag .arco-table-td:not(.arco-table-col-fixed-left):not(.arco-table-col-fixed-right){background-color:var(--color-fill-1)}.arco-table-hover:not(.arco-table-dragging) .arco-table-tr:not(.arco-table-tr-empty):not(.arco-table-tr-summary):hover .arco-table-td.arco-table-col-fixed-left:before,.arco-table-hover .arco-table-tr-drag .arco-table-td.arco-table-col-fixed-left:before,.arco-table-hover:not(.arco-table-dragging) .arco-table-tr:not(.arco-table-tr-empty):not(.arco-table-tr-summary):hover .arco-table-td.arco-table-col-fixed-right:before,.arco-table-hover .arco-table-tr-drag .arco-table-td.arco-table-col-fixed-right:before{position:absolute;top:0;left:0;z-index:-1;width:100%;height:100%;background-color:var(--color-fill-1);content:""}.arco-table-hover .arco-table-tr-expand:not(.arco-table-tr-empty):hover .arco-table-td:not(.arco-table-col-fixed-left):not(.arco-table-col-fixed-right){background-color:var(--color-fill-1)}.arco-table-tr-expand .arco-table-td .arco-table-hover .arco-table-tr:not(.arco-table-tr-empty) .arco-table-td:not(.arco-table-col-fixed-left):not(.arco-table-col-fixed-right){background-color:transparent}.arco-table-tr-expand .arco-table-td .arco-table-hover .arco-table-tr:not(.arco-table-tr-empty) .arco-table-td.arco-table-col-fixed-left:before,.arco-table-tr-expand .arco-table-td .arco-table-hover .arco-table-tr:not(.arco-table-tr-empty) .arco-table-td.arco-table-col-fixed-right:before{background-color:transparent}.arco-table-tfoot{position:relative;z-index:1;flex-shrink:0;width:100%;overflow-x:auto;background-color:var(--color-neutral-2);box-shadow:0 -1px 0 var(--color-neutral-3);scrollbar-color:transparent transparent}.arco-table-tfoot::-webkit-scrollbar{height:0;background-color:transparent}.arco-table tfoot .arco-table-td{background-color:var(--color-neutral-2)}.arco-table-tr-checked .arco-table-td{background-color:var(--color-fill-1)}.arco-table .arco-table-cell{padding:9px 16px}.arco-table .arco-table-th,.arco-table .arco-table-td{font-size:14px}.arco-table .arco-table-footer{padding:9px 16px}.arco-table .arco-table-tr-expand .arco-table-td .arco-table{margin:-9px -16px -10px}.arco-table .arco-table-editable-row .arco-table-cell-wrap-value{padding:9px 16px}.arco-table-size-medium .arco-table-cell{padding:7px 16px}.arco-table-size-medium .arco-table-th,.arco-table-size-medium .arco-table-td{font-size:14px}.arco-table-size-medium .arco-table-footer{padding:7px 16px}.arco-table-size-medium .arco-table-tr-expand .arco-table-td .arco-table{margin:-7px -16px -8px}.arco-table-size-medium .arco-table-editable-row .arco-table-cell-wrap-value{padding:7px 16px}.arco-table-size-small .arco-table-cell{padding:5px 16px}.arco-table-size-small .arco-table-th,.arco-table-size-small .arco-table-td{font-size:14px}.arco-table-size-small .arco-table-footer{padding:5px 16px}.arco-table-size-small .arco-table-tr-expand .arco-table-td .arco-table{margin:-5px -16px -6px}.arco-table-size-small .arco-table-editable-row .arco-table-cell-wrap-value{padding:5px 16px}.arco-table-size-mini .arco-table-cell{padding:2px 16px}.arco-table-size-mini .arco-table-th,.arco-table-size-mini .arco-table-td{font-size:12px}.arco-table-size-mini .arco-table-footer{padding:2px 16px}.arco-table-size-mini .arco-table-tr-expand .arco-table-td .arco-table{margin:-2px -16px -3px}.arco-table-size-mini .arco-table-editable-row .arco-table-cell-wrap-value{padding:2px 16px}.arco-table-virtualized .arco-table-element{table-layout:fixed}.arco-table-virtualized div.arco-table-body div.arco-table-tr{display:flex}.arco-table-virtualized div.arco-table-body div.arco-table-td{display:flex;flex:1;align-items:center}.arco-table-pagination{display:flex;align-items:center;justify-content:flex-end;margin-top:12px}.arco-table-pagination-left{justify-content:flex-start}.arco-table-pagination-center{justify-content:center}.arco-table-pagination-top{margin-top:0;margin-bottom:12px}.arco-icon-hover.arco-tabs-icon-hover:before{width:16px;height:16px}.arco-tabs .arco-tabs-icon-hover{color:var(--color-text-2);font-size:12px;user-select:none}.arco-tabs-dropdown-icon{margin-left:6px;font-size:12px;user-select:none}.arco-tabs-tab-close-btn{margin-left:8px;user-select:none}.arco-tabs-nav-add-btn{display:inline-flex;align-items:center;justify-content:center;padding:0 8px;font-size:12px;user-select:none}.arco-tabs-add{position:relative}.arco-tabs-nav-button-left{margin-right:6px;margin-left:10px}.arco-tabs-nav-button-right{margin-right:10px;margin-left:6px}.arco-tabs-nav-button-up{margin-bottom:10px}.arco-tabs-nav-button-down{margin-top:10px}.arco-tabs-nav-button-disabled{color:var(--color-text-4);cursor:not-allowed}.arco-tabs{position:relative;overflow:hidden}.arco-tabs-nav{position:relative;flex-shrink:0}.arco-tabs-nav:before{position:absolute;right:0;bottom:0;left:0;display:block;clear:both;height:1px;background-color:var(--color-neutral-3);content:""}.arco-tabs-nav-tab{display:flex;flex:1;overflow:hidden}.arco-tabs-nav-tab-list{position:relative;display:inline-block;white-space:nowrap;transition:transform .2s cubic-bezier(.34,.69,.1,1)}.arco-tabs-nav-extra{display:flex;align-items:center;width:auto;line-height:32px}.arco-tabs-nav-extra .arco-tabs-nav-add-btn{padding-left:0}.arco-tabs-tab{display:inline-flex;align-items:center;box-sizing:border-box;padding:4px 0;color:var(--color-text-2);font-size:14px;line-height:1.5715;outline:none;cursor:pointer;transition:color .2s cubic-bezier(0,0,1,1)}.arco-tabs-tab-title{display:inline-block}.arco-tabs-tab:hover{color:var(--color-text-2);font-weight:400}.arco-tabs-tab-disabled,.arco-tabs-tab-disabled:hover{color:var(--color-text-4);cursor:not-allowed}.arco-tabs-tab-active,.arco-tabs-tab-active:hover{color:rgb(var(--primary-6));font-weight:500}.arco-tabs-tab-active.arco-tabs-tab-disabled,.arco-tabs-tab-active:hover.arco-tabs-tab-disabled{color:var(--color-primary-light-3)}.arco-tabs-nav-ink{position:absolute;top:initial;right:initial;bottom:0;height:2px;background-color:rgb(var(--primary-6));transition:left .2s cubic-bezier(.34,.69,.1,1),width .2s cubic-bezier(.34,.69,.1,1)}.arco-tabs-nav-ink.arco-tabs-header-ink-no-animation{transition:none}.arco-tabs-nav-ink-disabled{background-color:var(--color-primary-light-3)}.arco-tabs-nav-type-line .arco-tabs-nav-extra{line-height:40px}.arco-tabs-nav-type-line .arco-tabs-tab{margin:0 16px;padding:8px 0;line-height:1.5715}.arco-tabs-nav-type-line .arco-tabs-tab-title{position:relative;display:inline-block;padding:1px 0}.arco-tabs-nav-type-line .arco-tabs-tab-title:before{position:absolute;top:0;right:-8px;bottom:0;left:-8px;z-index:-1;background-color:transparent;border-radius:var(--border-radius-small);opacity:1;transition:background-color,opacity .2s cubic-bezier(0,0,1,1);content:""}.arco-tabs-nav-type-line .arco-tabs-tab:hover .arco-tabs-tab-title:before{background-color:var(--color-fill-2)}.arco-tabs-nav-type-line .arco-tabs-tab-active .arco-tabs-tab-title:before,.arco-tabs-nav-type-line .arco-tabs-tab-active:hover .arco-tabs-tab-title:before{background-color:transparent}.arco-tabs-nav-type-line .arco-tabs-tab-disabled .arco-tabs-tab-title:before,.arco-tabs-nav-type-line .arco-tabs-tab-disabled:hover .arco-tabs-tab-title:before{opacity:0}.arco-tabs-nav-type-line .arco-tabs-tab:focus-visible .arco-tabs-tab-title:before{border:2px solid rgb(var(--primary-6))}.arco-tabs-nav-type-line.arco-tabs-nav-horizontal>.arco-tabs-tab:first-of-type{margin-left:16px}.arco-tabs-nav-type-line.arco-tabs-nav-horizontal .arco-tabs-nav-tab-list-no-padding>.arco-tabs-tab:first-of-type,.arco-tabs-nav-text.arco-tabs-nav-horizontal .arco-tabs-nav-tab-list-no-padding>.arco-tabs-tab:first-of-type{margin-left:0}.arco-tabs-nav-type-card .arco-tabs-tab,.arco-tabs-nav-type-card-gutter .arco-tabs-tab{position:relative;padding:4px 16px;font-size:14px;border:1px solid var(--color-neutral-3);transition:padding .2s cubic-bezier(0,0,1,1),color .2s cubic-bezier(0,0,1,1)}.arco-tabs-nav-type-card .arco-tabs-tab-closable,.arco-tabs-nav-type-card-gutter .arco-tabs-tab-closable{padding-right:12px}.arco-tabs-nav-type-card .arco-tabs-tab-closable:not(.arco-tabs-tab-active):hover .arco-icon-hover:hover:before,.arco-tabs-nav-type-card-gutter .arco-tabs-tab-closable:not(.arco-tabs-tab-active):hover .arco-icon-hover:hover:before{background-color:var(--color-fill-4)}.arco-tabs-nav-type-card .arco-tabs-tab:focus-visible:before,.arco-tabs-nav-type-card-gutter .arco-tabs-tab:focus-visible:before{position:absolute;top:-1px;right:0;bottom:-1px;left:-1px;border:2px solid rgb(var(--primary-6));content:""}.arco-tabs-nav-type-card .arco-tabs-tab:last-child:focus-visible:before,.arco-tabs-nav-type-card-gutter .arco-tabs-tab:last-child:focus-visible:before{right:-1px}.arco-tabs-nav-type-card .arco-tabs-nav-add-btn,.arco-tabs-nav-type-card-gutter .arco-tabs-nav-add-btn{height:32px}.arco-tabs-nav-type-card .arco-tabs-tab{background-color:transparent;border-right:none}.arco-tabs-nav-type-card .arco-tabs-tab:last-child{border-right:1px solid var(--color-neutral-3);border-top-right-radius:var(--border-radius-small)}.arco-tabs-nav-type-card .arco-tabs-tab:first-child{border-top-left-radius:var(--border-radius-small)}.arco-tabs-nav-type-card .arco-tabs-tab:hover{background-color:var(--color-fill-3)}.arco-tabs-nav-type-card .arco-tabs-tab-disabled,.arco-tabs-nav-type-card .arco-tabs-tab-disabled:hover{background-color:transparent}.arco-tabs-nav-type-card .arco-tabs-tab-active,.arco-tabs-nav-type-card .arco-tabs-tab-active:hover{background-color:transparent;border-bottom-color:var(--color-bg-2)}.arco-tabs-nav-type-card-gutter .arco-tabs-tab{margin-left:4px;background-color:var(--color-fill-1);border-right:1px solid var(--color-neutral-3);border-radius:var(--border-radius-small) var(--border-radius-small) 0 0}.arco-tabs-nav-type-card-gutter .arco-tabs-tab:hover{background-color:var(--color-fill-3)}.arco-tabs-nav-type-card-gutter .arco-tabs-tab-disabled,.arco-tabs-nav-type-card-gutter .arco-tabs-tab-disabled:hover{background-color:var(--color-fill-1)}.arco-tabs-nav-type-card-gutter .arco-tabs-tab-active,.arco-tabs-nav-type-card-gutter .arco-tabs-tab-active:hover{background-color:transparent;border-bottom-color:var(--color-bg-2)}.arco-tabs-nav-type-card-gutter .arco-tabs-tab:first-child{margin-left:0}.arco-tabs-nav-type-text:before{display:none}.arco-tabs-nav-type-text .arco-tabs-tab{position:relative;margin:0 9px;padding:5px 0;font-size:14px;line-height:1.5715}.arco-tabs-nav-type-text .arco-tabs-tab:not(:first-of-type):before{position:absolute;top:50%;left:-9px;display:block;width:2px;height:12px;background-color:var(--color-fill-3);transform:translateY(-50%);content:""}.arco-tabs-nav-type-text .arco-tabs-tab-title{padding-right:8px;padding-left:8px;background-color:transparent}.arco-tabs-nav-type-text .arco-tabs-tab-title:hover{background-color:var(--color-fill-2)}.arco-tabs-nav-type-text .arco-tabs-tab-active .arco-tabs-tab-title,.arco-tabs-nav-type-text .arco-tabs-tab-active .arco-tabs-tab-title:hover,.arco-tabs-nav-type-text .arco-tabs-tab-disabled .arco-tabs-tab-title,.arco-tabs-nav-type-text .arco-tabs-tab-disabled .arco-tabs-tab-title:hover{background-color:transparent}.arco-tabs-nav-type-text .arco-tabs-tab-active.arco-tabs-nav-type-text .arco-tabs-tab-disabled .arco-tabs-tab-title,.arco-tabs-nav-type-text .arco-tabs-tab-active.arco-tabs-nav-type-text .arco-tabs-tab-disabled .arco-tabs-tab-title:hover{background-color:var(--color-primary-light-3)}.arco-tabs-nav-type-text .arco-tabs-tab:focus-visible .arco-tabs-tab-title{margin:-2px;border:2px solid rgb(var(--primary-6))}.arco-tabs-nav-type-rounded:before{display:none}.arco-tabs-nav-type-rounded .arco-tabs-tab{margin:0 6px;padding:5px 16px;font-size:14px;background-color:transparent;border-radius:32px}.arco-tabs-nav-type-rounded .arco-tabs-tab:hover{background-color:var(--color-fill-2)}.arco-tabs-nav-type-rounded .arco-tabs-tab-disabled:hover{background-color:transparent}.arco-tabs-nav-type-rounded .arco-tabs-tab-active,.arco-tabs-nav-type-rounded .arco-tabs-tab-active:hover{background-color:var(--color-fill-2)}.arco-tabs-nav-type-rounded .arco-tabs-tab:focus-visible{border-color:rgb(var(--primary-6))}.arco-tabs-nav-type-capsule:before{display:none}.arco-tabs-nav-type-capsule .arco-tabs-nav-tab:not(.arco-tabs-nav-tab-scroll){justify-content:flex-end}.arco-tabs-nav-type-capsule .arco-tabs-nav-tab-list{padding:3px;line-height:1;background-color:var(--color-fill-2);border-radius:var(--border-radius-small)}.arco-tabs-nav-type-capsule .arco-tabs-tab{position:relative;padding:0 10px;font-size:14px;line-height:26px;background-color:transparent}.arco-tabs-nav-type-capsule .arco-tabs-tab:hover{background-color:var(--color-bg-2)}.arco-tabs-nav-type-capsule .arco-tabs-tab-disabled:hover{background-color:unset}.arco-tabs-nav-type-capsule .arco-tabs-tab-active,.arco-tabs-nav-type-capsule .arco-tabs-tab-active:hover{background-color:var(--color-bg-2)}.arco-tabs-nav-type-capsule .arco-tabs-tab-active:before,.arco-tabs-nav-type-capsule .arco-tabs-tab-active:hover:before,.arco-tabs-nav-type-capsule .arco-tabs-tab-active+.arco-tabs-tab:before,.arco-tabs-nav-type-capsule .arco-tabs-tab-active:hover+.arco-tabs-tab:before{opacity:0}.arco-tabs-nav-type-capsule .arco-tabs-tab:focus-visible{border-color:rgb(var(--primary-6))}.arco-tabs-nav-type-capsule.arco-tabs-nav-horizontal .arco-tabs-tab:not(:first-of-type){margin-left:3px}.arco-tabs-nav-type-capsule.arco-tabs-nav-horizontal .arco-tabs-tab:not(:first-of-type):before{position:absolute;top:50%;left:-4px;display:block;width:1px;height:14px;background-color:var(--color-fill-3);transform:translateY(-50%);transition:all .2s cubic-bezier(0,0,1,1);content:""}.arco-tabs-nav{position:relative;display:flex;align-items:center;overflow:hidden}.arco-tabs-content{box-sizing:border-box;width:100%;padding-top:16px;overflow:hidden}.arco-tabs-content-hide{display:none}.arco-tabs-content .arco-tabs-content-list{display:flex;width:100%}.arco-tabs-content .arco-tabs-content-item{flex-shrink:0;width:100%;height:0;overflow:hidden}.arco-tabs-content .arco-tabs-content-item.arco-tabs-content-item-active{height:auto}.arco-tabs-type-card>.arco-tabs-content,.arco-tabs-type-card-gutter>.arco-tabs-content{border:1px solid var(--color-neutral-3);border-top:none}.arco-tabs-content-animation{transition:all .2s cubic-bezier(.34,.69,.1,1)}.arco-tabs-horizontal.arco-tabs-justify{display:flex;flex-direction:column;height:100%}.arco-tabs-horizontal.arco-tabs-justify .arco-tabs-content,.arco-tabs-horizontal.arco-tabs-justify .arco-tabs-content-list,.arco-tabs-horizontal.arco-tabs-justify .arco-tabs-pane{height:100%}.arco-tabs-nav-size-mini.arco-tabs-nav-type-line .arco-tabs-tab{padding-top:6px;padding-bottom:6px;font-size:12px}.arco-tabs-nav-size-mini.arco-tabs-nav-type-line .arco-tabs-nav-extra{font-size:12px;line-height:32px}.arco-tabs-nav-size-mini.arco-tabs-nav-type-card .arco-tabs-tab,.arco-tabs-nav-size-mini.arco-tabs-nav-type-card-gutter .arco-tabs-tab{padding-top:1px;padding-bottom:1px;font-size:12px}.arco-tabs-nav-size-mini.arco-tabs-nav-type-card .arco-tabs-nav-extra,.arco-tabs-nav-size-mini.arco-tabs-nav-type-card-gutter .arco-tabs-nav-extra{font-size:12px;line-height:24px}.arco-tabs-nav-size-mini.arco-tabs-nav-type-card .arco-tabs-nav-add-btn,.arco-tabs-nav-size-mini.arco-tabs-nav-type-card-gutter .arco-tabs-nav-add-btn{height:24px}.arco-tabs-nav-size-mini.arco-tabs-nav-type-capsule .arco-tabs-tab{font-size:12px;line-height:18px}.arco-tabs-nav-size-mini.arco-tabs-nav-type-capsule .arco-tabs-nav-extra{font-size:12px;line-height:24px}.arco-tabs-nav-size-mini.arco-tabs-nav-type-rounded .arco-tabs-tab{padding-top:3px;padding-bottom:3px;font-size:12px}.arco-tabs-nav-size-mini.arco-tabs-nav-type-rounded .arco-tabs-nav-extra{font-size:12px;line-height:24px}.arco-tabs-nav-size-small.arco-tabs-nav-type-line .arco-tabs-tab{padding-top:6px;padding-bottom:6px;font-size:14px}.arco-tabs-nav-size-small.arco-tabs-nav-type-line .arco-tabs-nav-extra{font-size:14px;line-height:36px}.arco-tabs-nav-size-small.arco-tabs-nav-type-card .arco-tabs-tab,.arco-tabs-nav-size-small.arco-tabs-nav-type-card-gutter .arco-tabs-tab{padding-top:1px;padding-bottom:1px;font-size:14px}.arco-tabs-nav-size-small.arco-tabs-nav-type-card .arco-tabs-nav-extra,.arco-tabs-nav-size-small.arco-tabs-nav-type-card-gutter .arco-tabs-nav-extra{font-size:14px;line-height:28px}.arco-tabs-nav-size-small.arco-tabs-nav-type-card .arco-tabs-nav-add-btn,.arco-tabs-nav-size-small.arco-tabs-nav-type-card-gutter .arco-tabs-nav-add-btn{height:28px}.arco-tabs-nav-size-small.arco-tabs-nav-type-capsule .arco-tabs-tab{font-size:14px;line-height:22px}.arco-tabs-nav-size-small.arco-tabs-nav-type-capsule .arco-tabs-nav-extra{font-size:14px;line-height:28px}.arco-tabs-nav-size-small.arco-tabs-nav-type-rounded .arco-tabs-tab{padding-top:3px;padding-bottom:3px;font-size:14px}.arco-tabs-nav-size-small.arco-tabs-nav-type-rounded .arco-tabs-nav-extra{font-size:14px;line-height:28px}.arco-tabs-nav-size-large.arco-tabs-nav-type-line .arco-tabs-tab{padding-top:10px;padding-bottom:10px;font-size:14px}.arco-tabs-nav-size-large.arco-tabs-nav-type-line .arco-tabs-nav-extra{font-size:14px;line-height:44px}.arco-tabs-nav-size-large.arco-tabs-nav-type-card .arco-tabs-tab,.arco-tabs-nav-size-large.arco-tabs-nav-type-card-gutter .arco-tabs-tab{padding-top:5px;padding-bottom:5px;font-size:14px}.arco-tabs-nav-size-large.arco-tabs-nav-type-card .arco-tabs-nav-extra,.arco-tabs-nav-size-large.arco-tabs-nav-type-card-gutter .arco-tabs-nav-extra{font-size:14px;line-height:36px}.arco-tabs-nav-size-large.arco-tabs-nav-type-card .arco-tabs-nav-add-btn,.arco-tabs-nav-size-large.arco-tabs-nav-type-card-gutter .arco-tabs-nav-add-btn{height:36px}.arco-tabs-nav-size-large.arco-tabs-nav-type-capsule .arco-tabs-tab{font-size:14px;line-height:30px}.arco-tabs-nav-size-large.arco-tabs-nav-type-capsule .arco-tabs-nav-extra{font-size:14px;line-height:36px}.arco-tabs-nav-size-large.arco-tabs-nav-type-rounded .arco-tabs-tab{padding-top:7px;padding-bottom:7px;font-size:14px}.arco-tabs-nav-size-large.arco-tabs-nav-type-rounded .arco-tabs-nav-extra{font-size:14px;line-height:36px}.arco-tabs-nav-vertical{float:left;height:100%}.arco-tabs-nav-vertical:before{position:absolute;top:0;right:0;bottom:0;left:initial;clear:both;width:1px;height:100%}.arco-tabs-nav-vertical .arco-tabs-nav-add-btn{height:auto;margin-top:8px;margin-left:0;padding:0 16px}.arco-tabs-nav-right{float:right}.arco-tabs-nav-vertical{flex-direction:column}.arco-tabs-nav-vertical .arco-tabs-nav-tab{flex-direction:column;height:100%}.arco-tabs-nav-vertical .arco-tabs-nav-ink{position:absolute;right:0;bottom:initial;left:initial;width:2px;transition:top .2s cubic-bezier(.34,.69,.1,1),height .2s cubic-bezier(.34,.69,.1,1)}.arco-tabs-nav-vertical .arco-tabs-nav-tab-list{height:auto}.arco-tabs-nav-vertical .arco-tabs-nav-tab-list-overflow-scroll{padding:6px 0}.arco-tabs-nav-vertical .arco-tabs-tab{display:block;margin:12px 0 0;white-space:nowrap}.arco-tabs-nav-vertical .arco-tabs-tab:first-of-type{margin-top:0}.arco-tabs-nav-right:before{right:unset;left:0}.arco-tabs-nav-right .arco-tabs-nav-ink{right:unset;left:0}.arco-tabs-nav-vertical{position:relative;box-sizing:border-box;height:100%}.arco-tabs-nav-vertical.arco-tabs-nav-type-line .arco-tabs-tab{padding:0 20px}.arco-tabs-nav-vertical.arco-tabs-nav-type-card .arco-tabs-tab{position:relative;margin:0;border:1px solid var(--color-neutral-3);border-bottom-color:transparent}.arco-tabs-nav-vertical.arco-tabs-nav-type-card .arco-tabs-tab:first-child{border-top-left-radius:var(--border-radius-small)}.arco-tabs-nav-vertical.arco-tabs-nav-type-card .arco-tabs-tab-active,.arco-tabs-nav-vertical.arco-tabs-nav-type-card .arco-tabs-tab-active:hover{border-right-color:var(--color-bg-2);border-bottom-color:transparent}.arco-tabs-nav-vertical.arco-tabs-nav-type-card .arco-tabs-tab:last-child{border-bottom:1px solid var(--color-neutral-3);border-bottom-left-radius:var(--border-radius-small)}.arco-tabs-nav-vertical.arco-tabs-nav-type-card-gutter .arco-tabs-tab{position:relative;margin-left:0;border-radius:var(--border-radius-small) 0 0 var(--border-radius-small)}.arco-tabs-nav-vertical.arco-tabs-nav-type-card-gutter .arco-tabs-tab:not(:first-of-type){margin-top:4px}.arco-tabs-nav-vertical.arco-tabs-nav-type-card-gutter .arco-tabs-tab-active,.arco-tabs-nav-vertical.arco-tabs-nav-type-card-gutter .arco-tabs-tab-active:hover{border-right-color:var(--color-bg-2);border-bottom-color:var(--color-neutral-3)}.arco-tabs-vertical .arco-tabs-content{width:auto;height:100%;padding:0}.arco-tabs-right.arco-tabs-vertical .arco-tabs-content{padding-right:16px}.arco-tabs-left.arco-tabs-vertical .arco-tabs-content{padding-left:16px}.arco-tabs-vertical.arco-tabs-type-card>.arco-tabs-content,.arco-tabs-vertical.arco-tabs-type-card-gutter>.arco-tabs-content{border:1px solid var(--color-neutral-3);border-left:none}body[arco-theme=dark] .arco-tabs-nav-type-capsule .arco-tabs-tab-active,body[arco-theme=dark] .arco-tabs-nav-type-capsule .arco-tabs-tab:hover{background-color:var(--color-fill-3)}.arco-tag{display:inline-flex;align-items:center;box-sizing:border-box;height:24px;padding:0 8px;color:var(--color-text-1);font-weight:500;font-size:12px;line-height:22px;vertical-align:middle;border:1px solid transparent;border-radius:var(--border-radius-small);overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-tag .arco-icon-hover.arco-tag-icon-hover:before{width:16px;height:16px}.arco-tag .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:var(--color-fill-3)}.arco-tag-checkable{cursor:pointer;transition:all .1s cubic-bezier(0,0,1,1)}.arco-tag-checkable:hover{background-color:var(--color-fill-2)}.arco-tag-checked{background-color:var(--color-fill-2);border-color:transparent}.arco-tag-checkable.arco-tag-checked:hover{background-color:var(--color-fill-3);border-color:transparent}.arco-tag-bordered,.arco-tag-checkable.arco-tag-checked.arco-tag-bordered:hover{border-color:var(--color-border-2)}.arco-tag-size-small{height:20px;font-size:12px;line-height:18px}.arco-tag-size-medium{height:24px;font-size:12px;line-height:22px}.arco-tag-size-large{height:32px;font-size:14px;line-height:30px}.arco-tag-hide{display:none}.arco-tag-loading{cursor:default;opacity:.8}.arco-tag-icon{margin-right:4px;color:var(--color-text-2)}.arco-tag.arco-tag-checked.arco-tag-red{color:rgb(var(--red-6));background-color:rgb(var(--red-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-red .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--red-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-red.arco-tag:hover{background-color:rgb(var(--red-2));border-color:transparent}.arco-tag-checked.arco-tag-red.arco-tag-bordered,.arco-tag-checked.arco-tag-red.arco-tag-bordered:hover{border-color:rgb(var(--red-6))}.arco-tag.arco-tag-checked.arco-tag-red .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-red .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-red .arco-tag-loading-icon{color:rgb(var(--red-6))}.arco-tag.arco-tag-checked.arco-tag-orangered{color:rgb(var(--orangered-6));background-color:rgb(var(--orangered-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-orangered .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--orangered-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-orangered.arco-tag:hover{background-color:rgb(var(--orangered-2));border-color:transparent}.arco-tag-checked.arco-tag-orangered.arco-tag-bordered,.arco-tag-checked.arco-tag-orangered.arco-tag-bordered:hover{border-color:rgb(var(--orangered-6))}.arco-tag.arco-tag-checked.arco-tag-orangered .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-orangered .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-orangered .arco-tag-loading-icon{color:rgb(var(--orangered-6))}.arco-tag.arco-tag-checked.arco-tag-orange{color:rgb(var(--orange-6));background-color:rgb(var(--orange-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-orange .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--orange-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-orange.arco-tag:hover{background-color:rgb(var(--orange-2));border-color:transparent}.arco-tag-checked.arco-tag-orange.arco-tag-bordered,.arco-tag-checked.arco-tag-orange.arco-tag-bordered:hover{border-color:rgb(var(--orange-6))}.arco-tag.arco-tag-checked.arco-tag-orange .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-orange .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-orange .arco-tag-loading-icon{color:rgb(var(--orange-6))}.arco-tag.arco-tag-checked.arco-tag-gold{color:rgb(var(--gold-6));background-color:rgb(var(--gold-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-gold .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--gold-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-gold.arco-tag:hover{background-color:rgb(var(--gold-3));border-color:transparent}.arco-tag-checked.arco-tag-gold.arco-tag-bordered,.arco-tag-checked.arco-tag-gold.arco-tag-bordered:hover{border-color:rgb(var(--gold-6))}.arco-tag.arco-tag-checked.arco-tag-gold .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-gold .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-gold .arco-tag-loading-icon{color:rgb(var(--gold-6))}.arco-tag.arco-tag-checked.arco-tag-lime{color:rgb(var(--lime-6));background-color:rgb(var(--lime-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-lime .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--lime-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-lime.arco-tag:hover{background-color:rgb(var(--lime-2));border-color:transparent}.arco-tag-checked.arco-tag-lime.arco-tag-bordered,.arco-tag-checked.arco-tag-lime.arco-tag-bordered:hover{border-color:rgb(var(--lime-6))}.arco-tag.arco-tag-checked.arco-tag-lime .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-lime .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-lime .arco-tag-loading-icon{color:rgb(var(--lime-6))}.arco-tag.arco-tag-checked.arco-tag-green{color:rgb(var(--green-6));background-color:rgb(var(--green-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-green .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--green-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-green.arco-tag:hover{background-color:rgb(var(--green-2));border-color:transparent}.arco-tag-checked.arco-tag-green.arco-tag-bordered,.arco-tag-checked.arco-tag-green.arco-tag-bordered:hover{border-color:rgb(var(--green-6))}.arco-tag.arco-tag-checked.arco-tag-green .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-green .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-green .arco-tag-loading-icon{color:rgb(var(--green-6))}.arco-tag.arco-tag-checked.arco-tag-cyan{color:rgb(var(--cyan-6));background-color:rgb(var(--cyan-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-cyan .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--cyan-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-cyan.arco-tag:hover{background-color:rgb(var(--cyan-2));border-color:transparent}.arco-tag-checked.arco-tag-cyan.arco-tag-bordered,.arco-tag-checked.arco-tag-cyan.arco-tag-bordered:hover{border-color:rgb(var(--cyan-6))}.arco-tag.arco-tag-checked.arco-tag-cyan .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-cyan .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-cyan .arco-tag-loading-icon{color:rgb(var(--cyan-6))}.arco-tag.arco-tag-checked.arco-tag-blue{color:rgb(var(--blue-6));background-color:rgb(var(--blue-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-blue .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--blue-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-blue.arco-tag:hover{background-color:rgb(var(--blue-2));border-color:transparent}.arco-tag-checked.arco-tag-blue.arco-tag-bordered,.arco-tag-checked.arco-tag-blue.arco-tag-bordered:hover{border-color:rgb(var(--blue-6))}.arco-tag.arco-tag-checked.arco-tag-blue .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-blue .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-blue .arco-tag-loading-icon{color:rgb(var(--blue-6))}.arco-tag.arco-tag-checked.arco-tag-arcoblue{color:rgb(var(--arcoblue-6));background-color:rgb(var(--arcoblue-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-arcoblue .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--arcoblue-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-arcoblue.arco-tag:hover{background-color:rgb(var(--arcoblue-2));border-color:transparent}.arco-tag-checked.arco-tag-arcoblue.arco-tag-bordered,.arco-tag-checked.arco-tag-arcoblue.arco-tag-bordered:hover{border-color:rgb(var(--arcoblue-6))}.arco-tag.arco-tag-checked.arco-tag-arcoblue .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-arcoblue .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-arcoblue .arco-tag-loading-icon{color:rgb(var(--arcoblue-6))}.arco-tag.arco-tag-checked.arco-tag-purple{color:rgb(var(--purple-6));background-color:rgb(var(--purple-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-purple .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--purple-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-purple.arco-tag:hover{background-color:rgb(var(--purple-2));border-color:transparent}.arco-tag-checked.arco-tag-purple.arco-tag-bordered,.arco-tag-checked.arco-tag-purple.arco-tag-bordered:hover{border-color:rgb(var(--purple-6))}.arco-tag.arco-tag-checked.arco-tag-purple .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-purple .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-purple .arco-tag-loading-icon{color:rgb(var(--purple-6))}.arco-tag.arco-tag-checked.arco-tag-pinkpurple{color:rgb(var(--pinkpurple-6));background-color:rgb(var(--pinkpurple-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-pinkpurple .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--pinkpurple-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-pinkpurple.arco-tag:hover{background-color:rgb(var(--pinkpurple-2));border-color:transparent}.arco-tag-checked.arco-tag-pinkpurple.arco-tag-bordered,.arco-tag-checked.arco-tag-pinkpurple.arco-tag-bordered:hover{border-color:rgb(var(--pinkpurple-6))}.arco-tag.arco-tag-checked.arco-tag-pinkpurple .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-pinkpurple .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-pinkpurple .arco-tag-loading-icon{color:rgb(var(--pinkpurple-6))}.arco-tag.arco-tag-checked.arco-tag-magenta{color:rgb(var(--magenta-6));background-color:rgb(var(--magenta-1));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-magenta .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--magenta-2))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-magenta.arco-tag:hover{background-color:rgb(var(--magenta-2));border-color:transparent}.arco-tag-checked.arco-tag-magenta.arco-tag-bordered,.arco-tag-checked.arco-tag-magenta.arco-tag-bordered:hover{border-color:rgb(var(--magenta-6))}.arco-tag.arco-tag-checked.arco-tag-magenta .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-magenta .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-magenta .arco-tag-loading-icon{color:rgb(var(--magenta-6))}.arco-tag.arco-tag-checked.arco-tag-gray{color:rgb(var(--gray-6));background-color:rgb(var(--gray-2));border:1px solid transparent}.arco-tag.arco-tag-checked.arco-tag-gray .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgb(var(--gray-3))}.arco-tag.arco-tag-checkable.arco-tag-checked.arco-tag-gray.arco-tag:hover{background-color:rgb(var(--gray-3));border-color:transparent}.arco-tag-checked.arco-tag-gray.arco-tag-bordered,.arco-tag-checked.arco-tag-gray.arco-tag-bordered:hover{border-color:rgb(var(--gray-6))}.arco-tag.arco-tag-checked.arco-tag-gray .arco-tag-icon,.arco-tag.arco-tag-checked.arco-tag-gray .arco-tag-close-btn,.arco-tag.arco-tag-checked.arco-tag-gray .arco-tag-loading-icon{color:rgb(var(--gray-6))}.arco-tag.arco-tag-custom-color{color:var(--color-white)}.arco-tag.arco-tag-custom-color .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:#fff3}.arco-tag .arco-tag-close-btn{margin-left:4px;font-size:12px}.arco-tag .arco-tag-close-btn>svg{position:relative}.arco-tag .arco-tag-loading-icon{margin-left:4px;font-size:12px}body[arco-theme=dark] .arco-tag-checked{color:#ffffffe6}body[arco-theme=dark] .arco-tag-checked.arco-tag-red{background-color:rgba(var(--red-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-red .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--red-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-red:hover{background-color:rgba(var(--red-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-orangered{background-color:rgba(var(--orangered-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-orangered .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--orangered-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-orangered:hover{background-color:rgba(var(--orangered-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-orange{background-color:rgba(var(--orange-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-orange .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--orange-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-orange:hover{background-color:rgba(var(--orange-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-gold{background-color:rgba(var(--gold-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-gold .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--gold-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-gold:hover{background-color:rgba(var(--gold-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-lime{background-color:rgba(var(--lime-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-lime .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--lime-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-lime:hover{background-color:rgba(var(--lime-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-green{background-color:rgba(var(--green-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-green .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--green-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-green:hover{background-color:rgba(var(--green-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-cyan{background-color:rgba(var(--cyan-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-cyan .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--cyan-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-cyan:hover{background-color:rgba(var(--cyan-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-blue{background-color:rgba(var(--blue-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-blue .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--blue-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-blue:hover{background-color:rgba(var(--blue-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-arcoblue{background-color:rgba(var(--arcoblue-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-arcoblue .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--arcoblue-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-arcoblue:hover{background-color:rgba(var(--arcoblue-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-purple{background-color:rgba(var(--purple-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-purple .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--purple-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-purple:hover{background-color:rgba(var(--purple-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-pinkpurple{background-color:rgba(var(--pinkpurple-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-pinkpurple .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--pinkpurple-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-pinkpurple:hover{background-color:rgba(var(--pinkpurple-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-magenta{background-color:rgba(var(--magenta-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-magenta .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--magenta-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-magenta:hover{background-color:rgba(var(--magenta-6),.35)}body[arco-theme=dark] .arco-tag-checked.arco-tag-gray{background-color:rgba(var(--gray-6),.2)}body[arco-theme=dark] .arco-tag-checked.arco-tag-gray .arco-icon-hover.arco-tag-icon-hover:hover:before{background-color:rgba(var(--gray-6),.35)}body[arco-theme=dark] .arco-tag-checkable.arco-tag-checked.arco-tag-gray:hover{background-color:rgba(var(--gray-6),.35)}.arco-textarea-wrapper{display:inline-flex;box-sizing:border-box;color:var(--color-text-1);font-size:14px;background-color:var(--color-fill-2);border:1px solid transparent;border-radius:var(--border-radius-small);cursor:text;transition:color .1s cubic-bezier(0,0,1,1),border-color .1s cubic-bezier(0,0,1,1),background-color .1s cubic-bezier(0,0,1,1);position:relative;display:inline-block;width:100%;padding-right:0;padding-left:0;overflow:hidden}.arco-textarea-wrapper:hover{background-color:var(--color-fill-3);border-color:transparent}.arco-textarea-wrapper:focus-within,.arco-textarea-wrapper.arco-textarea-focus{background-color:var(--color-bg-2);border-color:rgb(var(--primary-6));box-shadow:0 0 0 0 var(--color-primary-light-2)}.arco-textarea-wrapper.arco-textarea-disabled{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent;cursor:not-allowed}.arco-textarea-wrapper.arco-textarea-disabled:hover{color:var(--color-text-4);background-color:var(--color-fill-2);border-color:transparent}.arco-textarea-wrapper.arco-textarea-disabled .arco-textarea-prefix,.arco-textarea-wrapper.arco-textarea-disabled .arco-textarea-suffix{color:inherit}.arco-textarea-wrapper.arco-textarea-error{background-color:var(--color-danger-light-1);border-color:transparent}.arco-textarea-wrapper.arco-textarea-error:hover{background-color:var(--color-danger-light-2);border-color:transparent}.arco-textarea-wrapper.arco-textarea-error:focus-within,.arco-textarea-wrapper.arco-textarea-error.arco-textarea-wrapper-focus{background-color:var(--color-bg-2);border-color:rgb(var(--danger-6));box-shadow:0 0 0 0 var(--color-danger-light-2)}.arco-textarea-wrapper .arco-textarea-prefix,.arco-textarea-wrapper .arco-textarea-suffix{display:inline-flex;flex-shrink:0;align-items:center;white-space:nowrap;user-select:none}.arco-textarea-wrapper .arco-textarea-prefix>svg,.arco-textarea-wrapper .arco-textarea-suffix>svg{font-size:14px}.arco-textarea-wrapper .arco-textarea-prefix{padding-right:12px;color:var(--color-text-2)}.arco-textarea-wrapper .arco-textarea-suffix{padding-left:12px;color:var(--color-text-2)}.arco-textarea-wrapper .arco-textarea-suffix .arco-feedback-icon{display:inline-flex}.arco-textarea-wrapper .arco-textarea-suffix .arco-feedback-icon-status-validating{color:rgb(var(--primary-6))}.arco-textarea-wrapper .arco-textarea-suffix .arco-feedback-icon-status-success{color:rgb(var(--success-6))}.arco-textarea-wrapper .arco-textarea-suffix .arco-feedback-icon-status-warning{color:rgb(var(--warning-6))}.arco-textarea-wrapper .arco-textarea-suffix .arco-feedback-icon-status-error{color:rgb(var(--danger-6))}.arco-textarea-wrapper .arco-textarea-clear-btn{align-self:center;color:var(--color-text-2);font-size:12px;visibility:hidden;cursor:pointer}.arco-textarea-wrapper .arco-textarea-clear-btn>svg{position:relative;transition:color .1s cubic-bezier(0,0,1,1)}.arco-textarea-wrapper:hover .arco-textarea-clear-btn{visibility:visible}.arco-textarea-wrapper:not(.arco-textarea-focus) .arco-textarea-icon-hover:hover:before{background-color:var(--color-fill-4)}.arco-textarea-wrapper .arco-textarea-word-limit{position:absolute;right:10px;bottom:6px;color:var(--color-text-3);font-size:12px;user-select:none}.arco-textarea-wrapper.arco-textarea-scroll .arco-textarea-word-limit{right:25px}.arco-textarea-wrapper .arco-textarea-clear-btn{position:absolute;top:50%;right:10px;transform:translateY(-50%)}.arco-textarea-wrapper.arco-textarea-scroll .arco-textarea-clear-btn{right:25px}.arco-textarea-wrapper:hover .arco-textarea-clear-btn{display:block}.arco-textarea-wrapper .arco-textarea-mirror{position:absolute;visibility:hidden}.arco-textarea{width:100%;color:inherit;background:none;border:none;border-radius:0;outline:none;cursor:inherit;-webkit-appearance:none;-webkit-tap-highlight-color:rgba(0,0,0,0);display:block;box-sizing:border-box;height:100%;min-height:32px;padding:4px 12px;font-size:14px;line-height:1.5715;vertical-align:top;resize:vertical}.arco-textarea::placeholder{color:var(--color-text-3)}.arco-textarea[disabled]::placeholder{color:var(--color-text-4)}.arco-textarea[disabled]{-webkit-text-fill-color:var(--color-text-4)}.arco-timepicker{position:relative;display:flex;box-sizing:border-box;padding:0}.arco-timepicker-container{overflow:hidden;background-color:var(--color-bg-popup);border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-medium);box-shadow:0 2px 5px #0000001a}.arco-timepicker-column{box-sizing:border-box;width:64px;height:224px;overflow:hidden}.arco-timepicker-column:not(:last-child){border-right:1px solid var(--color-neutral-3)}.arco-timepicker-column:hover{overflow-y:auto}.arco-timepicker-column ul{box-sizing:border-box;margin:0;padding:0;list-style:none}.arco-timepicker-column ul:after{display:block;width:100%;height:192px;content:""}.arco-timepicker-cell{padding:4px 0;color:var(--color-text-1);font-weight:500;cursor:pointer}.arco-timepicker-cell-inner{height:24px;padding-left:24px;font-size:14px;line-height:24px}.arco-timepicker-cell:not(.arco-timepicker-cell-selected):not(.arco-timepicker-cell-disabled):hover .arco-timepicker-cell-inner{background-color:var(--color-fill-2)}.arco-timepicker-cell-selected .arco-timepicker-cell-inner{font-weight:500;background-color:var(--color-fill-2)}.arco-timepicker-cell-disabled{color:var(--color-text-4);cursor:not-allowed}.arco-timepicker-footer-extra-wrapper{padding:8px;color:var(--color-text-1);font-size:12px;border-top:1px solid var(--color-neutral-3)}.arco-timepicker-footer-btn-wrapper{display:flex;justify-content:space-between;padding:8px;border-top:1px solid var(--color-neutral-3)}.arco-timepicker-footer-btn-wrapper :only-child{margin-left:auto}.arco-timeline{display:flex;flex-direction:column}.arco-timeline-item{position:relative;min-height:78px;padding-left:6px;color:var(--color-text-1);font-size:14px}.arco-timeline-item-label{color:var(--color-text-3);font-size:12px;line-height:1.667}.arco-timeline-item-content{margin-bottom:4px;color:var(--color-text-1);font-size:14px;line-height:1.5715}.arco-timeline-item-content-wrapper{position:relative;margin-left:16px}.arco-timeline-item.arco-timeline-item-last>.arco-timeline-item-dot-wrapper .arco-timeline-item-dot-line{display:none}.arco-timeline-item-dot-wrapper{position:absolute;left:0;height:100%;text-align:center}.arco-timeline-item-dot-wrapper .arco-timeline-item-dot-content{position:relative;width:6px;height:22.001px;line-height:22.001px}.arco-timeline-item-dot{position:relative;top:50%;box-sizing:border-box;width:6px;height:6px;margin-top:-50%;color:rgb(var(--primary-6));border-radius:var(--border-radius-circle)}.arco-timeline-item-dot-solid{background-color:rgb(var(--primary-6))}.arco-timeline-item-dot-hollow{background-color:var(--color-bg-2);border:2px solid rgb(var(--primary-6))}.arco-timeline-item-dot-custom{position:absolute;top:50%;left:50%;display:inline-flex;box-sizing:border-box;color:rgb(var(--primary-6));background-color:var(--color-bg-2);transform:translate(-50%) translateY(-50%);transform-origin:center}.arco-timeline-item-dot-custom svg{color:inherit}.arco-timeline-item-dot-line{position:absolute;top:18.0005px;bottom:-4.0005px;left:50%;box-sizing:border-box;width:1px;border-color:var(--color-neutral-3);border-left-width:1px;transform:translate(-50%)}.arco-timeline-is-reverse{flex-direction:column-reverse}.arco-timeline-alternate{overflow:hidden}.arco-timeline-alternate .arco-timeline-item-vertical-left{padding-left:0}.arco-timeline-alternate .arco-timeline-item-vertical-left>.arco-timeline-item-dot-wrapper{left:50%}.arco-timeline-alternate .arco-timeline-item-vertical-left>.arco-timeline-item-content-wrapper{left:50%;width:50%;margin-left:22px;padding-right:22px}.arco-timeline-alternate .arco-timeline-item-vertical-right{padding-right:0}.arco-timeline-alternate .arco-timeline-item-vertical-right>.arco-timeline-item-dot-wrapper{left:50%}.arco-timeline-alternate .arco-timeline-item-vertical-right>.arco-timeline-item-content-wrapper{left:0;width:50%;margin-right:0;margin-left:-16px;padding-right:16px;text-align:right}.arco-timeline-right .arco-timeline-item-vertical-right{padding-right:6px}.arco-timeline-right .arco-timeline-item-vertical-right>.arco-timeline-item-dot-wrapper{right:0;left:unset}.arco-timeline-right .arco-timeline-item-vertical-right>.arco-timeline-item-content-wrapper{margin-right:16px;margin-left:0;text-align:right}.arco-timeline-item-label-relative>.arco-timeline-item-label{position:absolute;top:0;box-sizing:border-box;max-width:100px}.arco-timeline-item-vertical-left.arco-timeline-item-label-relative{margin-left:100px}.arco-timeline-item-vertical-left.arco-timeline-item-label-relative>.arco-timeline-item-label{left:0;padding-right:16px;text-align:right;transform:translate(-100%)}.arco-timeline-item-vertical-right.arco-timeline-item-label-relative{margin-right:100px}.arco-timeline-item-vertical-right.arco-timeline-item-label-relative>.arco-timeline-item-label{right:0;padding-left:16px;text-align:left;transform:translate(100%)}.arco-timeline-item-horizontal-top.arco-timeline-item-label-relative{margin-top:50px}.arco-timeline-item-horizontal-top.arco-timeline-item-label-relative>.arco-timeline-item-label{padding-bottom:16px;transform:translateY(-100%)}.arco-timeline-item-horizontal-top.arco-timeline-item-label-relative>.arco-timeline-item-content{margin-bottom:0}.arco-timeline-item-horizontal-bottom.arco-timeline-item-label-relative{margin-bottom:50px}.arco-timeline-item-horizontal-bottom.arco-timeline-item-label-relative>.arco-timeline-item-content{margin-bottom:0}.arco-timeline-item-horizontal-bottom.arco-timeline-item-label-relative>.arco-timeline-item-label{top:unset;bottom:0;padding-top:16px;text-align:left;transform:translateY(100%)}.arco-timeline-alternate .arco-timeline-item-vertical-left.arco-timeline-item-label-relative{margin-left:0}.arco-timeline-alternate .arco-timeline-item-vertical-left.arco-timeline-item-label-relative>.arco-timeline-item-label{left:0;width:50%;max-width:unset;transform:none}.arco-timeline-alternate .arco-timeline-item-vertical-right.arco-timeline-item-label-relative{margin-right:0}.arco-timeline-alternate .arco-timeline-item-vertical-right.arco-timeline-item-label-relative>.arco-timeline-item-label{right:0;width:50%;max-width:unset;transform:none}.arco-timeline-alternate .arco-timeline-item-horizontal-top.arco-timeline-item-label-relative{margin-top:0}.arco-timeline-alternate .arco-timeline-item-horizontal-bottom.arco-timeline-item-label-relative{margin-bottom:0}.arco-timeline-direction-horizontal{display:flex;flex-direction:row}.arco-timeline-direction-horizontal.arco-timeline-is-reverse{flex-direction:row-reverse}.arco-timeline-item-dot-line-is-horizontal{top:50%;right:4px;left:12px;width:unset;height:1px;border-top-width:1px;border-left:none;transform:translateY(-50%)}.arco-timeline-item-horizontal-bottom,.arco-timeline-item-horizontal-top{flex:1;min-height:unset;padding-right:0;padding-left:0}.arco-timeline-item-horizontal-bottom>.arco-timeline-item-dot-wrapper,.arco-timeline-item-horizontal-top>.arco-timeline-item-dot-wrapper{top:0;width:100%;height:auto}.arco-timeline-item-horizontal-bottom>.arco-timeline-item-dot-wrapper .arco-timeline-item-dot,.arco-timeline-item-horizontal-top>.arco-timeline-item-dot-wrapper .arco-timeline-item-dot{top:unset;margin-top:unset}.arco-timeline-item-horizontal-bottom>.arco-timeline-item-dot-wrapper .arco-timeline-item-dot-content,.arco-timeline-item-horizontal-top>.arco-timeline-item-dot-wrapper .arco-timeline-item-dot-content{height:6px;line-height:6px}.arco-timeline-item-horizontal-top{padding-top:6px}.arco-timeline-item-horizontal-top>.arco-timeline-item-dot-wrapper{top:0;bottom:unset}.arco-timeline-item-horizontal-top>.arco-timeline-item-content-wrapper{margin-top:16px;margin-left:0}.arco-timeline-item-horizontal-bottom{padding-bottom:6px}.arco-timeline-item-horizontal-bottom>.arco-timeline-item-dot-wrapper{top:unset;bottom:0}.arco-timeline-item-horizontal-bottom>.arco-timeline-item-content-wrapper{margin-bottom:16px;margin-left:0}.arco-timeline-alternate.arco-timeline-direction-horizontal{align-items:center;min-height:200px;overflow:visible}.arco-timeline-alternate.arco-timeline-direction-horizontal .arco-timeline-item-horizontal-bottom{margin-top:6px;transform:translateY(-50%)}.arco-timeline-alternate.arco-timeline-direction-horizontal .arco-timeline-item-horizontal-top{margin-top:-6px;transform:translateY(50%)}.arco-tooltip-content{max-width:350px;padding:8px 12px;color:#fff;font-size:14px;line-height:1.5715;text-align:left;word-wrap:break-word;background-color:var(--color-tooltip-bg);border-radius:var(--border-radius-small)}.arco-tooltip-mini{padding:4px 12px;font-size:14px}.arco-tooltip-popup-arrow{background-color:var(--color-tooltip-bg)}.arco-transfer{display:flex;align-items:center}.arco-transfer-view{display:flex;flex-direction:column;box-sizing:border-box;width:200px;height:224px;border:1px solid var(--color-neutral-3);border-radius:var(--border-radius-small)}.arco-transfer-view-search{padding:8px 12px 4px}.arco-transfer-view-list{flex:1}.arco-transfer-view-custom-list{flex:1;overflow:auto}.arco-transfer-view-header{display:flex;align-items:center;padding:0 10px}.arco-transfer-view-header>*:first-child{flex:1;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-transfer-view-header>*:first-child:not(:last-child){margin-right:8px}.arco-transfer-view-header{height:40px;color:var(--color-text-1);font-weight:500;font-size:14px;line-height:40px;background-color:var(--color-fill-1)}.arco-transfer-view-header-title{display:flex;align-items:center}.arco-transfer-view-header-title .arco-checkbox{overflow:hidden;white-space:nowrap;text-overflow:ellipsis;font-size:inherit}.arco-transfer-view-header-title .arco-checkbox-text{color:inherit}.arco-transfer-view-header-clear-btn{color:var(--color-text-2);font-size:12px;cursor:pointer}.arco-transfer-view-header-clear-btn:hover:before{background-color:var(--color-fill-3)}.arco-transfer-view-header-count{margin-right:2px;color:var(--color-text-3);font-weight:400;font-size:12px}.arco-transfer-view-body{flex:1 1 auto;overflow:hidden}.arco-transfer-view-body .arco-transfer-view-empty{display:flex;flex-direction:column;align-items:center;justify-content:center;height:100%}.arco-transfer-view .arco-scrollbar{height:100%}.arco-transfer-view .arco-scrollbar-container{height:100%;overflow:auto}.arco-transfer-view .arco-list{border-radius:0}.arco-transfer-view .arco-list-footer{position:relative;display:flex;align-items:center;box-sizing:border-box;height:40px;padding:0 8px}.arco-transfer-view .arco-list .arco-pagination{position:absolute;top:50%;right:8px;margin:0;transform:translateY(-50%)}.arco-transfer-view .arco-list .arco-pagination-jumper-input{width:24px}.arco-transfer-view .arco-list .arco-pagination-jumper-separator{padding:0 8px}.arco-transfer-view .arco-checkbox{padding-left:6px}.arco-transfer-view .arco-checkbox-wrapper{display:inline}.arco-transfer-view .arco-checkbox .arco-icon-hover:hover:before{background-color:var(--color-fill-3)}.arco-transfer-list-item{position:relative;display:flex;align-items:center;height:36px;padding:0 10px;color:var(--color-text-1);line-height:36px;list-style:none;background-color:transparent;cursor:default}.arco-transfer-list-item-content{font-size:14px;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-transfer-list-item-checkbox .arco-checkbox-label{overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.arco-transfer-list-item-disabled{color:var(--color-text-4);background-color:transparent;cursor:not-allowed}.arco-transfer-list-item:not(.arco-transfer-list-item-disabled):hover{color:var(--color-text-1);background-color:var(--color-fill-2)}.arco-transfer-list-item .arco-checkbox{width:100%}.arco-transfer-list-item .arco-checkbox-text{color:inherit}.arco-transfer-list-item-remove-btn{margin-left:auto;color:var(--color-text-2);font-size:12px;cursor:pointer}.arco-transfer-list-item-remove-btn:hover:before{background-color:var(--color-fill-3)}.arco-transfer-list-item-draggable:before{position:absolute;right:0;left:0;display:block;height:2px;border-radius:1px;content:""}.arco-transfer-list-item-gap-bottom:before{bottom:-2px;background-color:rgb(var(--primary-6))}.arco-transfer-list-item-gap-top:before{top:-2px;background-color:rgb(var(--primary-6))}.arco-transfer-list-item-dragging{color:var(--color-text-4)!important;background-color:var(--color-fill-1)!important}.arco-transfer-list-item-dragged{animation:arco-transfer-drag-item-blink .4s;animation-timing-function:cubic-bezier(0,0,1,1)}.arco-transfer-operations{padding:0 20px}.arco-transfer-operations .arco-btn{display:block}.arco-transfer-operations .arco-btn:last-child{margin-top:12px}.arco-transfer-operations-words .arco-btn{width:100%;padding:0 12px;text-align:left}.arco-transfer-simple .arco-transfer-view-source{border-right:none;border-top-right-radius:0;border-bottom-right-radius:0}.arco-transfer-simple .arco-transfer-view-target{border-top-left-radius:0;border-bottom-left-radius:0}.arco-transfer-disabled .arco-transfer-view-header{color:var(--color-text-4)}@keyframes arco-transfer-drag-item-blink{0%{background-color:var(--color-primary-light-1)}to{background-color:transparent}}.arco-tree-select-popup{box-sizing:border-box;padding:4px 0;background-color:var(--color-bg-popup);border:1px solid var(--color-fill-3);border-radius:var(--border-radius-medium);box-shadow:0 4px 10px #0000001a}.arco-tree-select-popup .arco-tree-select-tree-wrapper{height:100%;max-height:200px;padding-right:4px;padding-left:10px;overflow:auto}.arco-tree-select-popup .arco-tree-node{padding-left:0}.arco-tree-select-highlight{font-weight:500}.arco-icon-hover.arco-tree-node-icon-hover:before{width:16px;height:16px}.arco-tree-node-switcher{position:relative;display:flex;flex-shrink:0;align-items:center;width:12px;height:32px;margin-right:10px;color:var(--color-text-2);font-size:12px;cursor:pointer;user-select:none}.arco-tree-node-switcher-icon{position:relative;margin:0 auto}.arco-tree-node-switcher-icon svg{position:relative;transform:rotate(-90deg);transition:transform .2s cubic-bezier(.34,.69,.1,1)}.arco-tree-node-expanded .arco-tree-node-switcher-icon svg,.arco-tree-node-is-leaf .arco-tree-node-switcher-icon svg{transform:rotate(0)}.arco-tree-node-drag-icon{margin-left:120px;color:rgb(var(--primary-6));opacity:0}.arco-tree-node-custom-icon{margin-right:10px;font-size:inherit;line-height:1;cursor:pointer;user-select:none}.arco-tree-node .arco-icon-loading{color:rgb(var(--primary-6))}.arco-tree-node-minus-icon,.arco-tree-node-plus-icon{position:relative;display:block;width:14px;height:14px;background:var(--color-fill-2);border-radius:var(--border-radius-small);cursor:pointer}.arco-tree-node-minus-icon:after,.arco-tree-node-plus-icon:after{position:absolute;top:50%;left:50%;display:block;width:6px;height:2px;margin-top:-1px;margin-left:-3px;color:var(--color-text-2);background-color:var(--color-text-2);border-radius:.5px;content:""}.arco-tree-node-plus-icon:before{position:absolute;top:50%;left:50%;display:block;width:2px;height:6px;margin-top:-3px;margin-left:-1px;color:var(--color-text-2);background-color:var(--color-text-2);border-radius:.5px;content:""}.arco-tree{color:var(--color-text-1)}.arco-tree .arco-checkbox{margin-right:10px;padding-left:0;line-height:32px}.arco-tree-node{position:relative;display:flex;flex-wrap:nowrap;align-items:center;padding-left:2px;color:var(--color-text-1);line-height:1.5715;cursor:pointer}.arco-tree-node-selected .arco-tree-node-title,.arco-tree-node-selected .arco-tree-node-title:hover{color:rgb(var(--primary-6));transition:color .2s cubic-bezier(0,0,1,1)}.arco-tree-node-disabled-selectable .arco-tree-node-title,.arco-tree-node-disabled .arco-tree-node-title,.arco-tree-node-disabled-selectable .arco-tree-node-title:hover,.arco-tree-node-disabled .arco-tree-node-title:hover{color:var(--color-text-4);background:none;cursor:not-allowed}.arco-tree-node-disabled.arco-tree-node-selected .arco-tree-node-title{color:var(--color-primary-light-3)}.arco-tree-node-title-block{flex:1;box-sizing:content-box}.arco-tree-node-title-block .arco-tree-node-drag-icon{position:absolute;right:12px}.arco-tree-node-indent{position:relative;flex-shrink:0;align-self:stretch}.arco-tree-node-indent-block{position:relative;display:inline-block;width:12px;height:100%;margin-right:10px}.arco-tree-node-draggable{margin-top:2px}.arco-tree-node-title{position:relative;display:flex;align-items:center;margin-left:-4px;padding:5px 4px;font-size:14px;border-radius:var(--border-radius-small)}.arco-tree-node-title:hover{color:var(--color-text-1);background-color:var(--color-fill-2)}.arco-tree-node-title:hover .arco-tree-node-drag-icon{opacity:1}.arco-tree-node-title-draggable:before{position:absolute;top:-2px;right:0;left:0;display:block;height:2px;border-radius:1px;content:""}.arco-tree-node-title-gap-bottom:before{top:unset;bottom:-2px;background-color:rgb(var(--primary-6))}.arco-tree-node-title-gap-top:before{background-color:rgb(var(--primary-6))}.arco-tree-node-title-highlight{color:var(--color-text-1);background-color:var(--color-primary-light-1)}.arco-tree-node-title-dragging,.arco-tree-node-title-dragging:hover{color:var(--color-text-4);background-color:var(--color-fill-1)}.arco-tree-show-line{padding-left:1px}.arco-tree-show-line .arco-tree-node-switcher{width:14px;text-align:center}.arco-tree-show-line .arco-tree-node-switcher .arco-tree-node-icon-hover{width:100%}.arco-tree-show-line .arco-tree-node-indent-block{width:14px}.arco-tree-show-line .arco-tree-node-indent-block:before{position:absolute;left:50%;box-sizing:border-box;width:1px;border-left:1px solid var(--color-neutral-3);transform:translate(-50%);content:"";top:-5px;bottom:-5px}.arco-tree-show-line .arco-tree-node-is-leaf:not(.arco-tree-node-is-tail) .arco-tree-node-indent:after{position:absolute;right:-7px;box-sizing:border-box;width:1px;border-left:1px solid var(--color-neutral-3);transform:translate(50%);content:"";top:27px;bottom:-5px}.arco-tree-show-line .arco-tree-node-indent-block-lineless:before{display:none}.arco-tree-size-mini .arco-tree-node-switcher{height:24px}.arco-tree-size-mini .arco-checkbox{line-height:24px}.arco-tree-size-mini .arco-tree-node-title{padding-top:2px;padding-bottom:2px;font-size:12px;line-height:1.667}.arco-tree-size-mini .arco-tree-node-indent-block:after{top:23px;bottom:-1px}.arco-tree-size-mini .arco-tree-node-is-leaf:not(.arco-tree-node-is-tail) .arco-tree-node-indent:before{top:-1px;bottom:-1px}.arco-tree-size-small .arco-tree-node-switcher{height:28px}.arco-tree-size-small .arco-checkbox{line-height:28px}.arco-tree-size-small .arco-tree-node-title{padding-top:3px;padding-bottom:3px;font-size:14px}.arco-tree-size-small .arco-tree-node-indent-block:after{top:25px;bottom:-3px}.arco-tree-size-small .arco-tree-node-is-leaf:not(.arco-tree-node-is-tail) .arco-tree-node-indent:before{top:-3px;bottom:-3px}.arco-tree-size-large .arco-tree-node-switcher{height:36px}.arco-tree-size-large .arco-checkbox{line-height:36px}.arco-tree-size-large .arco-tree-node-title{padding-top:7px;padding-bottom:7px;font-size:14px}.arco-tree-size-large .arco-tree-node-indent-block:after{top:29px;bottom:-7px}.arco-tree-size-large .arco-tree-node-is-leaf:not(.arco-tree-node-is-tail) .arco-tree-node-indent:before{top:-7px;bottom:-7px}.arco-tree-node-list{overflow:hidden;transition:height .2s cubic-bezier(.34,.69,.1,1)}.arco-typography{color:var(--color-text-1);line-height:1.5715}h1.arco-typography,h2.arco-typography,h3.arco-typography,h4.arco-typography,h5.arco-typography,h6.arco-typography{margin-top:1em;margin-bottom:.5em;font-weight:500}h1.arco-typography{font-size:36px;line-height:1.23}h2.arco-typography{font-size:32px;line-height:1.25}h3.arco-typography{font-size:28px;line-height:1.29}h4.arco-typography{font-size:24px;line-height:1.33}h5.arco-typography{font-size:20px;line-height:1.4}h6.arco-typography{font-size:16px;line-height:1.5}div.arco-typography,p.arco-typography{margin-top:0;margin-bottom:1em}.arco-typography-primary{color:rgb(var(--primary-6))}.arco-typography-secondary{color:var(--color-text-2)}.arco-typography-success{color:rgb(var(--success-6))}.arco-typography-warning{color:rgb(var(--warning-6))}.arco-typography-danger{color:rgb(var(--danger-6))}.arco-typography-disabled{color:var(--color-text-4);cursor:not-allowed}.arco-typography mark{background-color:rgb(var(--yellow-4))}.arco-typography u{text-decoration:underline}.arco-typography del{text-decoration:line-through}.arco-typography b{font-weight:500}.arco-typography code{margin:0 2px;padding:2px 8px;color:var(--color-text-2);font-size:85%;background-color:var(--color-neutral-2);border:1px solid var(--color-neutral-3);border-radius:2px}.arco-typography blockquote{margin:0 0 1em;padding-left:8px;background-color:var(--color-bg-2);border-left:2px solid var(--color-neutral-6)}.arco-typography ol,.arco-typography ul{margin:0;padding:0}.arco-typography ul li,.arco-typography ol li{margin-left:20px}.arco-typography ul{list-style:circle}.arco-typography-spacing-close{line-height:1.3}.arco-typography-operation-copy,.arco-typography-operation-copied{margin-left:2px;padding:2px}.arco-typography-operation-copy{color:var(--color-text-2);background-color:transparent;border-radius:2px;cursor:pointer;transition:background-color .1s cubic-bezier(0,0,1,1)}.arco-typography-operation-copy:hover{color:var(--color-text-2);background-color:var(--color-fill-2)}.arco-typography-operation-copied{color:rgb(var(--success-6))}.arco-typography-operation-edit{margin-left:2px;padding:2px;color:var(--color-text-2);background-color:transparent;border-radius:2px;cursor:pointer;transition:background-color .1s cubic-bezier(0,0,1,1)}.arco-typography-operation-edit:hover{color:var(--color-text-2);background-color:var(--color-fill-2)}.arco-typography-operation-expand{margin:0 4px;color:rgb(var(--primary-6));cursor:pointer}.arco-typography-operation-expand:hover{color:rgb(var(--primary-5))}.arco-typography-edit-content{position:relative;left:-13px;margin-top:-5px;margin-right:-13px;margin-bottom:calc(1em - 5px)}.arco-typography-css-operation{margin-top:-1em;margin-bottom:1em;text-align:right}.arco-upload{display:inline-block;max-width:100%;cursor:pointer}.arco-upload.arco-upload-draggable{width:100%}.arco-upload-tip{margin-top:4px;overflow:hidden;color:var(--color-text-3);font-size:12px;line-height:1.5;white-space:nowrap;text-overflow:ellipsis}.arco-upload-picture-card{display:flex;flex-direction:column;justify-content:center;min-width:80px;height:80px;margin-bottom:0;color:var(--color-text-2);text-align:center;background:var(--color-fill-2);border:1px dashed var(--color-neutral-3);border-radius:var(--border-radius-small);transition:all .1s cubic-bezier(0,0,1,1)}.arco-upload-picture-card:hover{color:var(--color-text-2);background-color:var(--color-fill-3);border-color:var(--color-neutral-4)}.arco-upload-drag{width:100%;padding:50px 0;color:var(--color-text-1);text-align:center;background-color:var(--color-fill-1);border:1px dashed var(--color-neutral-3);border-radius:var(--border-radius-small);transition:all .2s ease}.arco-upload-drag .arco-icon-plus{margin-bottom:24px;color:var(--color-text-2);font-size:14px}.arco-upload-drag:hover{background-color:var(--color-fill-3);border-color:var(--color-neutral-4)}.arco-upload-drag:hover .arco-upload-drag-text{color:var(--color-text-1)}.arco-upload-drag:hover .arco-icon-plus{color:var(--color-text-2)}.arco-upload-drag-active{color:var(--color-text-1);background-color:var(--color-primary-light-1);border-color:rgb(var(--primary-6))}.arco-upload-drag-active .arco-upload-drag-text{color:var(--color-text-1)}.arco-upload-drag-active .arco-icon-plus{color:rgb(var(--primary-6))}.arco-upload-drag .arco-upload-tip{margin-top:0}.arco-upload-drag-text{color:var(--color-text-1);font-size:14px;line-height:1.5}.arco-upload-wrapper{width:100%}.arco-upload-wrapper.arco-upload-wrapper-type-picture-card{display:flex;justify-content:flex-start}.arco-upload-drag{width:100%}.arco-upload-hide{display:none}.arco-upload-disabled .arco-upload-picture-card,.arco-upload-disabled .arco-upload-picture-card:hover{color:var(--color-text-4);background-color:var(--color-fill-1);border-color:var(--color-neutral-4);cursor:not-allowed}.arco-upload-disabled .arco-upload-drag,.arco-upload-disabled .arco-upload-drag:hover{background-color:var(--color-fill-1);border-color:var(--color-text-4);cursor:not-allowed}.arco-upload-disabled .arco-upload-drag .arco-icon-plus,.arco-upload-disabled .arco-upload-drag:hover .arco-icon-plus,.arco-upload-disabled .arco-upload-drag .arco-upload-drag-text,.arco-upload-disabled .arco-upload-drag:hover .arco-upload-drag-text,.arco-upload-disabled .arco-upload-tip{color:var(--color-text-4)}.arco-upload-icon{cursor:pointer}.arco-upload-icon-error{margin-left:4px;color:rgb(var(--danger-6))}.arco-upload-icon-success{color:rgb(var(--success-6));font-size:14px;line-height:14px}.arco-upload-icon-remove{position:relative;font-size:14px}.arco-upload-icon-start,.arco-upload-icon-cancel{position:absolute;top:50%;left:50%;color:var(--color-white);font-size:12px;transform:translate(-50%) translateY(-50%)}.arco-upload-icon-upload{color:rgb(var(--primary-6));font-size:14px;cursor:pointer;transition:all .2s ease}.arco-upload-icon-upload:active,.arco-upload-icon-upload:hover{color:rgb(var(--primary-7))}.arco-upload-list{margin:0;padding:0;list-style:none}.arco-upload-list.arco-upload-list-type-text,.arco-upload-list.arco-upload-list-type-picture{width:100%}.arco-upload-list.arco-upload-list-type-text .arco-upload-list-item:first-of-type,.arco-upload-list.arco-upload-list-type-picture .arco-upload-list-item:first-of-type{margin-top:24px}.arco-upload-list-item-done .arco-upload-list-item-file-icon{color:rgb(var(--primary-6))}.arco-upload-list-item{position:relative;display:flex;align-items:center;box-sizing:border-box;margin-top:12px}.arco-upload-list-item-content{display:flex;flex:1;flex-wrap:nowrap;align-items:center;box-sizing:border-box;width:100%;padding:8px 10px 8px 12px;overflow:hidden;font-size:14px;background-color:var(--color-fill-1);border-radius:var(--border-radius-small);transition:background-color .1s cubic-bezier(0,0,1,1)}.arco-upload-list-item-file-icon{margin-right:12px;color:rgb(var(--primary-6));font-size:16px;line-height:16px}.arco-upload-list-item-thumbnail{flex-shrink:0;width:40px;height:40px;margin-right:12px}.arco-upload-list-item-thumbnail img{width:100%;height:100%}.arco-upload-list-item-name{display:flex;flex:1;align-items:center;margin-right:10px;overflow:hidden;color:var(--color-text-1);font-size:14px;line-height:1.4286;white-space:nowrap;text-overflow:ellipsis}.arco-upload-list-item-name-link{overflow:hidden;color:rgb(var(--link-6));text-decoration:none;text-overflow:ellipsis;cursor:pointer}.arco-upload-list-item-name-text{overflow:hidden;text-overflow:ellipsis;cursor:pointer}.arco-upload-list-item .arco-upload-progress{position:relative;margin-left:auto;line-height:12px}.arco-upload-list-item .arco-upload-progress:hover .arco-progress-circle-bg{stroke:rgba(var(--gray-10),.2)}.arco-upload-list-item .arco-upload-progress:hover .arco-progress-circle-bar{stroke:rgb(var(--primary-7))}.arco-upload-list-item-operation{margin-left:12px;color:var(--color-text-2);font-size:12px}.arco-upload-list-item-operation .arco-upload-icon-remove{font-size:inherit}.arco-upload-list-item-error .arco-upload-list-status,.arco-upload-list-item-done .arco-upload-list-status{display:none}.arco-upload-list-type-text .arco-upload-list-item-error .arco-upload-list-item-name-link,.arco-upload-list-type-text .arco-upload-list-item-error .arco-upload-list-item-name{color:rgb(var(--danger-6))}.arco-upload-list.arco-upload-list-type-picture-card{display:flex;flex-wrap:wrap;vertical-align:top}.arco-upload-list.arco-upload-list-type-picture-card .arco-upload-list-status{top:50%;margin-left:0;transform:translateY(-50%)}.arco-upload-list-picture{display:inline-block;margin-top:0;margin-right:8px;margin-bottom:8px;padding-right:0;overflow:hidden;vertical-align:top;transition:all .2s cubic-bezier(.34,.69,.1,1)}.arco-upload-list-picture-status-error .arco-upload-list-picture-mask{opacity:1}.arco-upload-list-picture{position:relative;box-sizing:border-box;width:80px;height:80px;overflow:hidden;line-height:80px;text-align:center;vertical-align:top;border-radius:var(--border-radius-small)}.arco-upload-list-picture img{width:100%;height:100%}.arco-upload-list-picture-mask{position:absolute;top:0;right:0;bottom:0;left:0;color:var(--color-white);font-size:16px;line-height:80px;text-align:center;background:rgba(0,0,0,.5);cursor:pointer;opacity:0;transition:opacity .1s cubic-bezier(0,0,1,1)}.arco-upload-list-picture-operation{display:none;font-size:14px}.arco-upload-list-picture-operation .arco-upload-icon-retry{color:var(--color-white)}.arco-upload-list-picture-error-tip .arco-upload-icon-error{color:var(--color-white);font-size:26px}.arco-upload-list-picture-mask:hover{opacity:1}.arco-upload-list-picture-mask:hover .arco-upload-list-picture-operation{display:flex;justify-content:space-evenly}.arco-upload-list-picture-mask:hover .arco-upload-list-picture-error-tip{display:none}.arco-upload-list-type-picture .arco-upload-list-item-content{padding-top:8px;padding-bottom:8px}.arco-upload-list-type-picture .arco-upload-list-item-error .arco-upload-list-item-content{background-color:var(--color-danger-light-1)}.arco-upload-list-type-picture .arco-upload-list-item-error .arco-upload-list-item-name-link,.arco-upload-list-type-picture .arco-upload-list-item-error .arco-upload-list-item-name{color:rgb(var(--danger-6))}.arco-upload-hide+.arco-upload-list .arco-upload-list-item:first-of-type{margin-top:0}.arco-upload-slide-up-enter{opacity:0}.arco-upload-slide-up-enter-active{opacity:1;transition:opacity .2s cubic-bezier(.34,.69,.1,1)}.arco-upload-slide-up-exit{opacity:1}.arco-upload-slide-up-exit-active{margin:0;overflow:hidden;opacity:0;transition:opacity .1s cubic-bezier(0,0,1,1),height .3s cubic-bezier(.34,.69,.1,1) .1s,margin .3s cubic-bezier(.34,.69,.1,1) .1s}.arco-upload-list-item.arco-upload-slide-inline-enter{opacity:0}.arco-upload-list-item.arco-upload-slide-inline-enter-active{opacity:1;transition:opacity .2s cubic-bezier(0,0,1,1)}.arco-upload-list-item.arco-upload-slide-inline-exit{opacity:1}.arco-upload-list-item.arco-upload-slide-inline-exit-active{margin:0;overflow:hidden;opacity:0;transition:opacity .1s cubic-bezier(0,0,1,1),width .3s cubic-bezier(.34,.69,.1,1) .1s,margin .3s cubic-bezier(.34,.69,.1,1) .1s}body{font-family:Nunito Sans-SemiBold,Nunito Sans}html,body,#app{height:100%}.arco-table-td-content{color:#4e5969;font-size:12px;font-weight:400;padding:8px 0}.arco-table-th-title{color:#1d2129;font-size:12px;font-weight:500}.loadingDirectiveElement{position:absolute;left:0;right:0;top:0;bottom:0;z-index:10;display:flex;justify-content:center;align-items:center;text-align:center;background-color:#fff9;transition:opacity .1s cubic-bezier(0,0,1,1);user-select:none}.loadingDirectiveElement.fullScreen{position:fixed;z-index:1000}.posRelative{position:relative}.spaceBTW{justify-content:space-between}.headerInner{height:100%;padding:0 16px}.headerInner .title{color:#1d2129;font-size:14px;font-weight:500}.v-binder-follower-content{max-width:300px}.typing-pre>.code_container:last-child pre code:after{display:block;color:#fff;content:"▋";margin-left:4px;animation:blink 1s steps(5,start) infinite}.typing-text>*:last-child:after{content:"▋";margin-left:4px;vertical-align:baseline;animation:blink 1s steps(5,start) infinite}@keyframes blink{to{visibility:hidden}}.rotate{animation:rotate 1.5s infinite linear}@keyframes rotate{0%{transform:rotate(0)}to{transform:rotate(360deg)}}.avatarWrap{height:40px;width:40px;border-radius:20px;box-sizing:border-box;overflow:hidden}.avatarWrap img{width:100%;height:100%;object-fit:cover}::-webkit-scrollbar{height:16px;width:8px}::-webkit-scrollbar:horizontal{height:8px;width:16px}::-webkit-scrollbar-track{background-color:transparent;border-radius:9999px}::-webkit-scrollbar-thumb{background-color:#d9d9e3cc;border-color:#fff;border-radius:9999px;border-width:1px}::-webkit-scrollbar-thumb:hover{background-color:#ececf1}.hide-scrollbar{-ms-overflow-style:none;scrollbar-width:none}.hide-scrollbar ::-webkit-scrollbar{display:none}.login{height:100%;display:flex}.login .loginbg{flex:2;background-size:cover;position:relative}.login .loginbg .logiWhite{position:absolute;top:20px;left:20px}.login .loginbg .title{color:var(--fill-color-bg-white, #fff);font-family:PingFang SC;font-size:34px;font-style:normal;font-weight:600;line-height:normal;margin-left:20%;margin-top:40%}.login .loginform{flex:3;display:flex;justify-content:center;align-items:center}.login .loginform .formTitle{color:#2d2a2a;font-size:24px;font-weight:500}.login .loginform .toolBox{line-height:50px}.login .loginform .toolBox .toolBoxBtn{cursor:pointer;font-weight:400}.login .loginform .desc{font-size:12px;font-weight:400;color:#86909c}.login .loginform .desc .arco-link{font-size:12px}.IconCommon{fill:currentColor;outline:none;width:1em;height:1em}.IconCommon.iconDisabled{filter:opacity(.5);cursor:not-allowed!important}dialog[data-v-6fddb6c7]:not([open]){opacity:0;visibility:hidden;display:block}.customDialog[data-v-6fddb6c7]{opacity:1;padding:20px;box-sizing:border-box;border:none;border-radius:20px;filter:drop-shadow(0px 0px 40px rgba(168,168,168,.25));transition:opacity .3s ease;display:flex;flex-direction:column;outline:none}.customDialog .header[data-v-6fddb6c7]{width:100%;display:flex;justify-content:flex-end}.customDialog .content[data-v-6fddb6c7]{flex:1}.wechatModal[data-v-e442bd8c]{height:407px;padding:0 20px;box-sizing:border-box;display:flex;flex-direction:column;align-items:center;justify-content:flex-start;gap:12px}.wechatModal .title[data-v-e442bd8c]{display:flex;align-items:center;justify-content:center;gap:10px}.wechatModal .title .titleText[data-v-e442bd8c]{color:#000;font-family:Helvetica Neue;font-size:24px;font-style:normal;font-weight:500;line-height:normal}.wechatModal .desc[data-v-e442bd8c]{color:var(--light-text-color-text-2, #4e5969);font-family:Helvetica Neue;font-size:16px;font-style:normal;font-weight:400;line-height:150%}.wechatModal .qrCode[data-v-e442bd8c]{width:242px;height:263.868px;flex-shrink:0;border-radius:20px;background:#fff;box-shadow:0 4px 40px 10px #0000000d;padding:20px;box-sizing:border-box;margin-top:4px}.wechatModal .qrCode .scanText[data-v-e442bd8c]{display:flex;flex-direction:row;align-items:center;gap:10px}.wechatModal .qrCode .scanText span[data-v-e442bd8c]{color:var(--light-text-color-text-1, #1d2129);font-family:Helvetica Neue;font-size:16px;font-style:normal;font-weight:400;line-height:normal}.baseFont[data-v-01535cfb],.heroWrapper .links .link .linkName[data-v-01535cfb],.heroWrapper .affiliations[data-v-01535cfb],.heroWrapper .affiliationIndex[data-v-01535cfb],.heroWrapper .contributor[data-v-01535cfb],.heroWrapper h1[data-v-01535cfb]{font-size:20px;font-weight:400;line-height:32px;letter-spacing:0em}.heroWrapper[data-v-01535cfb]{width:100%;display:flex;align-items:center;flex-direction:column;color:#1d2129;font-family:Helvetica Neue;padding-bottom:50px;overflow:hidden}.heroWrapper h1[data-v-01535cfb]{font-size:56px;font-weight:700;line-height:70px;text-align:center;color:#1d2129;max-width:1350px;margin:72px 0 0}.heroWrapper .contributors[data-v-01535cfb]{max-width:1350px;text-align:center;margin-top:24px}.heroWrapper .contributor[data-v-01535cfb]{text-align:center;color:#4080ff}.heroWrapper .affiliationIndex[data-v-01535cfb]{text-align:center;font-family:PingFang SC}.heroWrapper .affiliations[data-v-01535cfb]{text-align:center;color:#1d2129;margin-top:10px}.heroWrapper .links[data-v-01535cfb]{display:flex;flex-direction:row;gap:16px;margin-top:40px;z-index:1;flex-wrap:wrap;justify-content:center}.heroWrapper .links .link[data-v-01535cfb]{height:42px;padding:8px 16px;border-radius:50px;box-sizing:border-box;background:linear-gradient(90deg,#e8f3ff -1.99%,#e2e8ff 100%);color:#1d2129;display:flex;align-items:center;justify-content:center;gap:8px;user-select:none;cursor:not-allowed;transition:all .3s}.heroWrapper .links .link .linkName[data-v-01535cfb]{line-height:24px;color:#1d2129}.heroWrapper .links .enabled[data-v-01535cfb]{cursor:pointer}.heroWrapper .links .enabled[data-v-01535cfb]:hover{transform:scale(1.1)}.heroWrapper .bigTex[data-v-01535cfb]{margin-top:50px;width:100%;max-width:1251px;height:356px;flex-shrink:0;border-radius:20px;background:linear-gradient(129deg,#1d1c48 15.07%,#252436 76.51%);padding-top:19.49px;box-sizing:border-box;overflow:hidden;position:relative}.heroWrapper .bigTex .bigTexContent[data-v-01535cfb]{width:100%;height:301px;box-sizing:border-box;padding:29px 43px;z-index:999;position:absolute;margin-top:46px;color:var(--light-text-color-white, #fff);font-family:Helvetica Neue;font-size:20px;font-style:normal;font-weight:400;line-height:160%}.heroWrapper .bigTex .header[data-v-01535cfb]{position:absolute;top:46px;left:64px;z-index:999;color:#fff;font-size:24px;font-weight:700;line-height:0%}.heroWrapper .bigTex .copyBtn[data-v-01535cfb]{position:absolute;top:24px;right:22px;color:red;z-index:999;color:#fff;font-size:24px;font-weight:700;line-height:0%;cursor:pointer}.heroWrapper .bigTex .copyBtn[data-v-01535cfb]:active{scale:.9}.heroWrapper .galance[data-v-01535cfb]{margin-top:50px;max-width:1440px;position:relative;overflow:hidden;user-select:none;pointer-events:none}@media screen and (max-width: 1440px){.heroWrapper .galance[data-v-01535cfb]{margin:12px}}.wechatModal[data-v-d5d425dc]{height:407px;padding:0 20px;box-sizing:border-box;display:flex;flex-direction:column;align-items:center;justify-content:flex-start;gap:12px;font-family:Helvetica Neue}.wechatModal .title[data-v-d5d425dc]{display:flex;align-items:center;justify-content:center;gap:10px}.wechatModal .title .titleText[data-v-d5d425dc]{color:#000;font-size:24px;font-style:normal;font-weight:500;line-height:normal}.wechatModal .desc[data-v-d5d425dc]{color:var(--light-text-color-text-2, #4e5969);font-size:16px;font-style:normal;font-weight:400;line-height:150%}.wechatModal .links[data-v-d5d425dc]{width:100%;padding-top:8px;display:flex;gap:8px;flex-direction:column;color:var(--light-text-color-text-1, #1d2129);font-size:16px;font-style:normal;font-weight:400;line-height:normal;border-top:1px dashed #e5e6eb}.wechatModal .links .link[data-v-d5d425dc]{color:var(--light-text-color-text-1, #1d2129);font-size:16px;font-style:normal;font-weight:400;line-height:normal;cursor:pointer}.wechatModal .links .link[data-v-d5d425dc]:hover{text-decoration:underline}.wechatModal .viwer[data-v-d5d425dc]{width:548px;height:304.385px;flex-shrink:0;border-radius:20px;margin-top:8px}.wechatModal .button[data-v-d5d425dc]{display:flex;height:42px;padding:18px 24px;box-sizing:border-box;justify-content:center;align-items:center;gap:10px;border-radius:10px;background:linear-gradient(270deg,#5772ff 0%,#165dff 89.78%);color:#fff;margin-top:8px;cursor:pointer;transition:transform .3s}.wechatModal .button[data-v-d5d425dc]:hover{transform:scale(1.1)}.wechatModal .welcomText[data-v-d5d425dc]{color:var(--light-text-color-text-1, #1d2129);font-size:16px;font-style:normal;font-weight:500;line-height:normal;margin-top:10px}.wechatModal .contributor[data-v-d5d425dc]{color:var(--light-text-color-text-2, #4e5969);font-size:16px;font-style:normal;font-weight:400;line-height:normal;display:flex;align-items:center;gap:4px;margin-top:6px}.wechatModal .contributor .count[data-v-d5d425dc]{display:flex;padding:2px 5px;align-items:center;gap:4px;border-radius:40px;background:var(--color-fill-2, #f2f3f5)}.roleListWrapper[data-v-d4dce6b2]{width:429px;height:100%;background-image:linear-gradient(170.11deg,#e9e9ff 1.21%,#ffffff 10.31%,#ffffff 98.31%);font-family:Helvetica Neue;display:flex;flex-direction:column;overflow:hidden}.roleListWrapper[data-v-d4dce6b2] .arco-select-view-size-large{height:48px;width:369px;margin:20px 30px 0;border-radius:10px}.roleListWrapper .title[data-v-d4dce6b2]{font-size:24px;font-weight:700;line-height:29px;letter-spacing:0em;text-align:left;display:flex;align-items:center;gap:10px;padding:16px 32px;box-sizing:border-box}.roleListWrapper .keyFill[data-v-d4dce6b2]{margin:0 30px;height:78px;padding:0 28.75px;box-sizing:border-box;border-radius:10px;border:1px;text-align:left;border:1px solid #e5e6eb;box-shadow:0 2px 10px #0000001a;background:linear-gradient(0deg,#f7f8fa,#f7f8fa),linear-gradient(0deg,#e5e6eb,#e5e6eb);display:flex;align-items:center}.roleListWrapper .keyFill input[data-v-d4dce6b2]{width:100%;height:19px;resize:none;outline:none;border:none;background:none;color:var(--light-text-color-text-2, #4e5969);font-family:Helvetica Neue;font-size:16px;font-style:normal;font-weight:400;line-height:normal}.roleListWrapper .keyFill .placeholder[data-v-d4dce6b2]{color:#86909c;font-size:16px;font-weight:400;line-height:19px;letter-spacing:0em}.roleListWrapper .keyFill .showPassword[data-v-d4dce6b2]{width:50px;color:#86909c;font-size:16px;font-weight:400;line-height:19px;letter-spacing:0em;cursor:pointer;display:flex;justify-content:flex-end}.roleListWrapper .keyFilled[data-v-d4dce6b2]{border-radius:10px;border:1px solid var(--light-line-color-border-2, #e5e6eb);background:linear-gradient(90deg,#e8f3ff 0%,#e2e8ff 100%)}.roleListWrapper .shake[data-v-d4dce6b2]{animation:shake-d4dce6b2 .5s 1}@keyframes shake-d4dce6b2{0%,to{transform:translate(0)}10%,30%,50%,70%,90%{transform:translate(-10px)}20%,40%,60%,80%{transform:translate(10px)}}.roleListWrapper .roleList[data-v-d4dce6b2]{width:100%;overflow:hidden;display:flex;flex-direction:column;gap:14px;padding:3px 32px 32px;box-sizing:border-box}.roleListWrapper .roleList .role[data-v-d4dce6b2]{width:100%;height:92px;padding:0 0 16px;box-sizing:border-box;border-radius:4.8px;gap:12px;display:flex;flex-direction:row}.roleListWrapper .roleList .role .avatar[data-v-d4dce6b2]{width:54px;height:54px;border-radius:50%;border:3px solid #c9cdd4;position:relative}.roleListWrapper .roleList .role .avatar .innerPie[data-v-d4dce6b2]{margin:3px;border-radius:50%;position:absolute;width:calc(100% - 6px);height:calc(100% - 6px);background:linear-gradient(0deg,#e5e6eb,#e5e6eb),linear-gradient(0deg,#ffffff,#ffffff)}.roleListWrapper .roleList .role .avatar .rightPoint[data-v-d4dce6b2]{position:absolute;content:"";width:10px;height:10px;top:40px;left:40px;border:2px;border-radius:50%;background:linear-gradient(0deg,#c9cdd4,#c9cdd4),linear-gradient(0deg,#ffffff,#ffffff);border:2px solid #ffffff}.roleListWrapper .roleList .role .avatar .pointActive[data-v-d4dce6b2]{background:#0fd267}.roleListWrapper .roleList .role .avatar img[data-v-d4dce6b2]{width:32px;margin:8px 12px;position:absolute}.roleListWrapper .roleList .role .infomation[data-v-d4dce6b2]{flex:1;display:flex;flex-direction:column;justify-content:space-between;overflow:hidden}.roleListWrapper .roleList .role .infomation .job[data-v-d4dce6b2]{flex:1;display:flex;flex-direction:row;justify-content:space-between;overflow:hidden}.roleListWrapper .roleList .role .infomation .job .jobName[data-v-d4dce6b2]{font-size:16px;font-weight:500;line-height:20px;letter-spacing:.01em;text-align:left;color:#1d2129;margin-top:5px}.roleListWrapper .roleList .role .infomation .job .jobStatus[data-v-d4dce6b2]{font-size:16px;font-weight:400;letter-spacing:0em;text-align:right;color:#86909c}.roleListWrapper .roleList .role .infomation .tags[data-v-d4dce6b2]{flex:1;display:flex;flex-direction:row;justify-content:space-between}.roleListWrapper .roleList .role .infomation .tags .tagItem[data-v-d4dce6b2]{width:auto;height:21px;padding:2px 10px;box-sizing:border-box;border-radius:5px;gap:4px;background:#f2f3f5;font-family:Helvetica Neue;font-size:14px;font-weight:500;line-height:17px;letter-spacing:0em;text-align:left}.roleListWrapper .roleList .role .infomation .tags .action[data-v-d4dce6b2]{font-family:Helvetica Neue;font-size:16px;font-weight:500;line-height:20px;letter-spacing:0em;text-align:left;color:#165dff;cursor:pointer;user-select:none}.roleListWrapper .roleList .role .infomation .tags .action[data-v-d4dce6b2]:hover{text-decoration:underline}.loading_wrap[data-v-491f84be]{display:inline-flex;align-items:center}.loading[data-v-491f84be],.loading>span[data-v-491f84be]{position:relative;box-sizing:border-box}.loading[data-v-491f84be]{display:inline-block;font-size:0;color:inherit}.loading>span[data-v-491f84be]{display:inline-block;float:none;background-color:currentColor;border:0 solid inherit}.loading[data-v-491f84be]{width:27px;height:9px}.loading>span[data-v-491f84be]{width:5px;height:5px;margin:2px;border-radius:100%;animation:ball-beat-491f84be .7s -.15s infinite linear}.loading>span[data-v-491f84be]:nth-child(2n-1){animation-delay:-.5s}@keyframes ball-beat-491f84be{50%{opacity:.2;transform:scale(.75)}to{opacity:1;transform:scale(1)}}.message_info[data-v-de77c762]{display:flex;padding:16px;gap:16px}.message_info .avatar[data-v-de77c762]{width:40px;height:40px;position:relative;flex-shrink:0}.message_info .avatar[data-v-de77c762]:after{content:"";position:absolute;width:8px;height:8px;border-radius:50%;background-color:currentColor;right:6px;bottom:0}.message_info[data-v-de77c762] .avatar .arco-avatar-text{color:#fff}.message_info .info_box[data-v-de77c762]{display:flex;flex-direction:column;gap:8px;width:100%;overflow:hidden}.message_info .info_box .item_info[data-v-de77c762]{display:flex;gap:16px;font-size:14px;font-weight:400;color:#4e5969}.message_info .info_box .item_info .name[data-v-de77c762]{font-weight:500;color:var(--light-text-color-text-1, #1d2129)}.message_info .info_box .item_info .time[data-v-de77c762]{color:#86909c;font-size:14px;font-weight:400}.message_info .info_box .item_info .responseSwitcher[data-v-de77c762]{display:flex;align-items:center;column-gap:4px;color:#4e5969;user-select:none;font-size:14px;font-weight:400;margin-left:-8px;margin-right:-8px}.message_info .info_box .item_info .responseSwitcher>svg[data-v-de77c762]{cursor:pointer}.message_info .info_box .item_info .responseSwitcher .disabled[data-v-de77c762]{cursor:not-allowed;color:#c9cdd4}.message_info .info_box .item_info .rate_wrap[data-v-de77c762]{position:relative;display:none;align-items:center;height:22px;gap:8px}.message_info .info_box .item_info .rate_wrap[data-v-de77c762] .rate_box{background-color:#fff;border-radius:4px;padding:8px 16px;height:32px;font-size:12px;box-sizing:border-box}.message_info .info_box .item_info .rate_wrap[data-v-de77c762] .rate_box .arco-rate{font-size:16px;min-height:16px}.message_info .info_box:hover .rate_wrap[data-v-de77c762]{display:flex}.message_info .info_box .message_wrap[data-v-de77c762]{width:100%}.message_info .info_box .answer_feedback[data-v-de77c762]{position:relative;min-width:440px;max-width:min(700px,70%);display:flex;flex-direction:row;justify-content:flex-start;align-items:center;column-gap:32px;padding:12px 16px;box-sizing:border-box;color:var(--color-text-1);background-color:#fff;font-size:14px;font-weight:400;line-height:22px}.message_info .info_box .answer_feedback .icon_close[data-v-de77c762]{position:absolute;top:5px;right:5px;font-size:12px;font-weight:300}.message_info .info_box .answer_feedback .feedback[data-v-de77c762]{display:flex;align-items:center;column-gap:6px;cursor:pointer}.message_info .info_box .answer_feedback .feedback.active[data-v-de77c762],.message_info .info_box .answer_feedback .feedback[data-v-de77c762]:hover{color:#165dff}.message_info .right_pos[data-v-de77c762]{justify-content:flex-end;align-items:flex-end}.message_container[data-v-6f899d6f]{width:100%;display:flex;flex-direction:column;align-items:flex-end}.message_container .user_message[data-v-6f899d6f]{min-width:440px;max-width:min(700px,70%);position:relative;background:#eaf3ff;padding:16px 64px 16px 16px;border-radius:4px;box-sizing:border-box;display:flex;flex-direction:column;row-gap:30px}.message_container .user_message .msg_wrap[data-v-6f899d6f]{display:flex;align-items:center}.message_container .user_message[data-v-6f899d6f] .msg_wrap .arco-textarea-wrapper{background-color:transparent;border-color:transparent;box-shadow:none;padding:0}.message_container .user_message[data-v-6f899d6f] .msg_wrap .arco-textarea-wrapper .arco-textarea{padding:0}.message_container .user_message .icon_more_wrap[data-v-6f899d6f]{position:absolute;right:16px;top:16px;cursor:pointer}.message_container .user_message .icon_more_wrap .icon_more[data-v-6f899d6f]{display:none}.message_container .user_message .btn_group[data-v-6f899d6f]{align-self:flex-end;display:flex;column-gap:8px;margin-right:-48px}.message_container:hover .icon_more_wrap .icon_more[data-v-6f899d6f]{display:block}.step_skill[data-v-17bf8a16]{display:flex;align-items:center;margin-left:54px;font-weight:400;color:#1d2129;font-size:14px;line-height:22px;column-gap:8px}.step_skill .trigger[data-v-17bf8a16]{color:#4e5969;margin-right:16px;display:flex;align-items:center;column-gap:8px}.step_skill .link_group[data-v-17bf8a16]{margin-left:16px;display:flex;column-gap:8px}.step_skill .link_group>a[data-v-17bf8a16]{display:flex;align-items:center;column-gap:4px}.step_item[data-v-690b1166]{width:100%;height:100%}.step_item+.step_item[data-v-690b1166]{margin-top:16px}.step_item .step[data-v-690b1166]{width:100%;display:flex;align-items:center;min-height:initial!important}.step_item .step .step_title_wrap[data-v-690b1166]{width:100%;display:flex;flex-direction:row;align-items:center;font-weight:400}.step_item .step .step_title_wrap .title[data-v-690b1166]{color:#1d2129;font-size:16px;line-height:24px}.step_item .step .step_title_wrap .icon_loading[data-v-690b1166]{display:inline-flex;align-items:center;margin:0 8px}.step_item .step .step_title_wrap .description[data-v-690b1166]{margin-left:auto;color:#4e5969;font-size:14px;line-height:22px;text-wrap:wrap;max-width:500px}.step_item[data-v-690b1166] .step .arco-steps-item-content{flex:1}.step_item[data-v-690b1166] .step .arco-steps-item-content .arco-steps-item-title{width:100%;height:100%}.step_item .step_info[data-v-690b1166]{height:100%;margin-top:4px;display:flex;flex-direction:column;row-gap:8px}.step_item .step_info .step_content_wrap[data-v-690b1166]{display:flex;column-gap:28px;min-height:50px}.step_item .step_info .step_content_wrap .divider[data-v-690b1166]{flex-shrink:0;height:inherit;background:#165dff}.step_item .step_info .step_content_wrap .divider.active[data-v-690b1166]{background:#e5e6eb}.step_item .step_info .step_content_wrap .step_content[data-v-690b1166]{width:calc(100% - 54px);padding-top:3.5px;box-sizing:border-box}pre code.hljs{display:block;overflow-x:auto;padding:1em}code.hljs{padding:3px 5px}.hljs{color:#abb2bf;background:#282c34}.hljs-comment,.hljs-quote{color:#5c6370;font-style:italic}.hljs-doctag,.hljs-formula,.hljs-keyword{color:#c678dd}.hljs-deletion,.hljs-name,.hljs-section,.hljs-selector-tag,.hljs-subst{color:#e06c75}.hljs-literal{color:#56b6c2}.hljs-addition,.hljs-attribute,.hljs-meta .hljs-string,.hljs-regexp,.hljs-string{color:#98c379}.hljs-attr,.hljs-number,.hljs-selector-attr,.hljs-selector-class,.hljs-selector-pseudo,.hljs-template-variable,.hljs-type,.hljs-variable{color:#d19a66}.hljs-bullet,.hljs-link,.hljs-meta,.hljs-selector-id,.hljs-symbol,.hljs-title{color:#61aeee}.hljs-built_in,.hljs-class .hljs-title,.hljs-title.class_{color:#e6c07b}.hljs-emphasis{font-style:italic}.hljs-strong{font-weight:700}.hljs-link{text-decoration:underline}.operation_wrap[data-v-ea9cc5f9]{width:100%;position:relative}.operation_wrap .operate_icon[data-v-ea9cc5f9]{display:inline-block;position:absolute;left:calc(100% + 8px);top:0px;width:32px;height:32px;text-align:center;line-height:32px;box-sizing:border-box;border-radius:4px;background-color:#fff;color:#4e5969;cursor:pointer;visibility:hidden;transition:all .2s ease}.operation_wrap .operate_icon[data-v-ea9cc5f9]:hover{background-color:var(--color-fill-2)}.operation_wrap .operate_icon:hover svg[data-v-ea9cc5f9]{transform:scale(1.1)}.operation_wrap:hover .operate_icon[data-v-ea9cc5f9]{visibility:visible}.code_container[data-v-4f00a864]{display:flex;flex-direction:column;border-radius:6px;overflow:hidden}.code_container .tool_wrap[data-v-4f00a864]{font-size:10px;line-height:24px;display:flex;color:#d9d9e3;background:rgb(52,53,65);padding:8px 16px}.code_container .tool_wrap .copy_btn[data-v-4f00a864]{font-size:12px;color:#d9d9e3;background-color:transparent;margin-left:auto;cursor:pointer;outline:none;border:none;display:flex;padding:0;gap:6px;align-items:center}.code_container .tool_wrap .copy_btn .copy_icon[data-v-4f00a864]{width:16px;height:16px;display:block}.markdown_wrap{padding:16px;border-radius:4px;box-sizing:border-box;font-size:14px;font-weight:400;color:#1d2129;line-height:22px;background-color:#fff}.markdown_wrap>p:first-child{margin-top:0}.markdown_wrap>p:last-child{margin-bottom:0}.markdown_wrap pre{margin:0;padding:0}.markdown_wrap .hljs_code{width:100%;box-sizing:border-box;padding:15px;overflow-x:auto}.chatHistoryImageItem{background-color:#fff;display:inline-flex;flex-wrap:wrap;max-width:324px;padding:8px;gap:4px}.chatHistoryImageItem .imageItem{width:160px;height:160px;position:relative;display:flex;justify-content:center;align-items:center}.chatHistoryImageItem .imageItem .n-image{height:100%;width:100%}.chatHistoryImageItem .imageItem img{width:100%;height:100%}.chatHistoryImageItem .imageItem .maxCover{height:100%;width:100%;position:absolute;left:0;top:0;background:rgba(0,0,0,.4);display:flex;justify-content:center;align-items:center}.chatHistoryAudioItem{width:574px;display:inline-block;padding:4px 16px;background-color:#fff}.chatHistoryAudioItem .audio{display:flex;align-items:center;gap:16px;color:#4e5969}.chatHistoryAudioItem .audio .control{font-size:32px;color:#165dff;cursor:pointer}.chatHistoryAudioItem audio{display:none}.error_msg[data-v-84a7773a]{border:1px solid #f53f3f;background-color:#f53f3f1a;padding:16px;border-radius:4px;font-size:14px;font-weight:400;color:#1d2129;line-height:22px;box-sizing:border-box;white-space:normal;overflow-wrap:break-word}.agent_message_wrap[data-v-898355de]{display:flex;flex-direction:column;row-gap:8px}.steps_container[data-v-77479ba1]{width:100%;margin-top:12px}.steps_container .steps_wrap[data-v-77479ba1]{min-width:440px;max-width:min(700px,70%)}.steps_container .steps_wrap[data-v-77479ba1] .arco-steps-icon{background-color:#fff0!important}.status_btn[data-v-240aae5d]{position:absolute;top:0;right:0;transform:translateY(calc(-100% - 16px))}.status_btn .error_msg[data-v-240aae5d]{font-size:14px;font-weight:400;line-height:22px;color:#86909c}.chatRoomWrapper[data-v-ae197aef]{flex:1;height:100%;background-color:#f8faff;padding:40px 100px;box-sizing:border-box;display:flex;flex-direction:column;align-items:center;font-family:Helvetica Neue;overflow:hidden}.chatRoomWrapper .visionWrapper[data-v-ae197aef]{width:100%;flex:1;display:flex;overflow:hidden}.chatRoomWrapper .visionWrapper .emptyWrapper[data-v-ae197aef]{padding:53px 0;box-sizing:border-box;display:flex;flex-direction:column;align-items:center}.chatRoomWrapper .visionWrapper .emptyWrapper .descWrapper[data-v-ae197aef]{flex:1;width:100%;display:flex;flex-direction:column;align-items:center;justify-content:center;gap:16px}.chatRoomWrapper .visionWrapper .emptyWrapper .descWrapper .title[data-v-ae197aef]{font-size:40px;font-weight:500;line-height:49px;letter-spacing:0em;text-align:center;color:#86909c}.chatRoomWrapper .visionWrapper .emptyWrapper .descWrapper .desc[data-v-ae197aef]{text-align:center;font-size:20px;line-height:32px}.chatRoomWrapper .visionWrapper .emptyWrapper .descWrapper .desc .text2[data-v-ae197aef]{color:#86909c;font-weight:500}.chatRoomWrapper .visionWrapper .emptyWrapper .descWrapper .desc .text3[data-v-ae197aef]{font-weight:400;color:#c9cdd4}.chatRoomWrapper .visionWrapper .emptyWrapper .actionWrapper[data-v-ae197aef]{width:900px;height:136px;display:flex;flex-direction:row;flex-wrap:wrap;gap:20px;align-items:center;justify-content:center}.chatRoomWrapper .visionWrapper .emptyWrapper .actionWrapper .button[data-v-ae197aef]{width:440px;height:58px;padding:18px 24px;box-sizing:border-box;border-radius:10px;gap:10px;background:linear-gradient(180deg,#ffffff 0%,#f4f4f4 100%);border:1.32px solid #e5e6eb;cursor:pointer;transition:all .3s;display:flex;align-items:center;justify-content:center}.chatRoomWrapper .visionWrapper .emptyWrapper .actionWrapper .button span[data-v-ae197aef]{color:#1d2129;font-size:16px;font-style:normal;font-weight:500;line-height:normal;word-wrap:normal;word-break:keep-all;white-space:nowrap}.chatRoomWrapper .visionWrapper .emptyWrapper .actionWrapper .button[data-v-ae197aef]:hover{border-radius:10px;background:linear-gradient(0deg,#2c67f7 0%,#5486ff 100%)!important}.chatRoomWrapper .visionWrapper .emptyWrapper .actionWrapper .button:hover span[data-v-ae197aef]{color:#fff!important}.chatRoomWrapper .visionWrapper .emptyWrapper .actionWrapper .button[data-v-ae197aef]:active{transform:scale(.98)}.chatRoomWrapper .visionWrapper .chatWrapper[data-v-ae197aef]{width:100%;flex:1;display:flex;overflow:hidden;position:relative}.chatRoomWrapper .visionWrapper .chatWrapper .msg_history_area[data-v-ae197aef]{flex:1;display:flex;flex-direction:column;overflow:scroll;position:relative;padding-top:10px}.chatRoomWrapper .visionWrapper .chatWrapper .msg_history_area .scroll_wrap[data-v-ae197aef]{width:100%;height:100%;overflow-y:auto}.chatRoomWrapper .visionWrapper .chatWrapper .msg_history_area .msg_text[data-v-ae197aef]{min-width:440px;max-width:min(700px,70%);padding:16px;border-radius:4px;background-color:#fff;font-size:14px;font-weight:400;color:#1d2129;line-height:22px;box-sizing:border-box;white-space:normal;overflow-wrap:break-word}.chatRoomWrapper .visionWrapper .chatWrapper .msg_history_area .bottom_trigger[data-v-ae197aef]{width:200px;position:absolute;bottom:0px;left:calc(50% - 150px)}.chatRoomWrapper .inputWrapper[data-v-ae197aef]{width:100%;height:59px}.chatRoomWrapper .inputWrapper .inputInner[data-v-ae197aef]{display:flex;width:100%;height:100%;align-items:center;gap:10px;box-sizing:border-box;padding:12px;border-radius:10px;border:2px solid #e1e3e8;background:#fff;box-shadow:2.6px 2.6px 8px #00000014 inset}.chatRoomWrapper .inputWrapper .inputInner input[data-v-ae197aef]{flex:1;height:100%;color:#1d2129;font-size:16px;font-style:normal;font-weight:400;line-height:0%;border:none;outline:none}.chatRoomWrapper .inputWrapper .inputInner input[data-v-ae197aef]:is(:disabled){background:none;cursor:not-allowed}.chatRoomWrapper .inputWrapper .inputInner input[data-v-ae197aef]:is(:disabled)::placeholder{color:#86909c}.chatRoomWrapper .inputWrapper .inputInner .sendBtn[data-v-ae197aef]{width:42px;height:42px;flex-shrink:0;background:#165dff;border-radius:8px;margin-right:-4px;display:flex;align-items:center;justify-content:center;user-select:none;cursor:pointer;transition:transform .3s;border:none}.chatRoomWrapper .inputWrapper .inputInner .sendBtn[data-v-ae197aef]:is(:not(:disabled)):hover{transform:scale(1.1)}.chatRoomWrapper .inputWrapper .inputInner .sendBtn[data-v-ae197aef]:is(:not(:disabled)):active{transform:scale(.98)}.chatRoomWrapper .emptyWrapper[data-v-ae197aef]{width:100%;height:100%;display:flex}.chatWrapper[data-v-7d0d8d24]{width:100%;height:100vh;display:flex;flex-direction:row;overflow:hidden}.hfHomeWrapper[data-v-7444cb54]{width:100%;height:100vh} diff --git a/spaces/whitphx/gradio-static-test/dist/assets/Button-0391b19a.js b/spaces/whitphx/gradio-static-test/dist/assets/Button-0391b19a.js deleted file mode 100644 index a6f1197f3b9fc250035d926552ee352f5d3d4ca6..0000000000000000000000000000000000000000 --- a/spaces/whitphx/gradio-static-test/dist/assets/Button-0391b19a.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as C,i as E,s as K,q as z,t as k,V,W as I,y as W,H as L,X as q,N as m,E as y,h as O,Y as S,Z as Y,$ as D,j as X,r as H,D as c,K as Z,z as F}from"../lite.js";const G=(l,e)=>N[e](l[e]);function j(l,e){const t=e.reduce((i,n)=>(l[n]===void 0||!N[n]?i[n]=" ":i[n]=` ${G(l,n)} `,i),{});return t.styles=` ${Object.values(t).join(" ").replace(/\s+/g," ").trim()} `,t}const N={container(l){return l?"":"padding: 0; margin: 0; border-width: 0; box-shadow: none; overflow: visible; background: transparent;"},label_container(l){return l?"":"border-width: 0; box-shadow: none; overflow: visible; background: transparent;"},grid_cols(l){let e=["","sm-","md-","lg-","xl-","2xl-"],t=Array.isArray(l)?l:[l];return[0,0,0,0,0,0].map((i,n)=>`--${e[n]}grid-cols: var(--grid-${t?.[n]||t?.[t?.length-1]});`).join(" ")},grid_rows(l){let e=["","sm-","md-","lg-","xl-","2xl-"],t=Array.isArray(l)?l:[l];return[0,0,0,0,0,0].map((i,n)=>`--${e[n]}grid-rows: var(--grid-${t?.[n]||t?.[t?.length-1]});`).join(" ")},height(l){return l==="auto"?"height: auto;":""},full_width(l){return l?"width: var(--size-full); flex-grow: 1;":"flex-grow: 0; width: fit-content;"},equal_height(l){return l?"align-items: stretch;":"align-items: flex-start;"},visible(l){return l?"":"display:hidden;"},item_container(l){return l?"":"border-width:0;"},object_fit(l){return`--object-fit: ${l};`}};function J(l){let e,t,i,n;const o=l[16].default,f=I(o,l,l[15],null);let _=[{"data-testid":l[5]},{id:l[0]},{class:t="block "+l[1].join(" ")+" svelte-mppz8v"},{style:i=l[9]+" "+(l[8]||null)}],a={};for(let s=0;s<_.length;s+=1)a=W(a,_[s]);return{c(){e=L(l[10]),f&&f.c(),q(l[10])(e,a),m(e,"hidden",l[6]===!1),m(e,"padded",l[4]),m(e,"border_focus",l[3]==="focus"),y(e,"border-style",l[2]),y(e,"overflow",l[7]?"visible":"hidden")},m(s,u){O(s,e,u),f&&f.m(e,null),n=!0},p(s,u){f&&f.p&&(!n||u&32768)&&S(f,o,s,s[15],n?D(o,s[15],u,null):Y(s[15]),null),q(s[10])(e,a=X(_,[(!n||u&32)&&{"data-testid":s[5]},(!n||u&1)&&{id:s[0]},(!n||u&2&&t!==(t="block "+s[1].join(" ")+" svelte-mppz8v"))&&{class:t},(!n||u&768&&i!==(i=s[9]+" "+(s[8]||null)))&&{style:i}])),m(e,"hidden",s[6]===!1),m(e,"padded",s[4]),m(e,"border_focus",s[3]==="focus"),u&4&&y(e,"border-style",s[2]),u&128&&y(e,"overflow",s[7]?"visible":"hidden")},i(s){n||(z(f,s),n=!0)},o(s){k(f,s),n=!1},d(s){s&&H(e),f&&f.d(s)}}}function M(l){let e,t=l[10]&&J(l);return{c(){t&&t.c()},m(i,n){t&&t.m(i,n),e=!0},p(i,[n]){i[10]&&t.p(i,n)},i(i){e||(z(t),e=!0)},o(i){k(t),e=!1},d(i){t&&t.d(i)}}}function P(l,e,t){let i,n,{$$slots:o={},$$scope:f}=e,{style:_={}}=e,{elem_id:a=""}=e,{elem_classes:s=[]}=e,{variant:u="solid"}=e,{border_mode:b="base"}=e,{padding:g=!0}=e,{type:v="normal"}=e,{test_id:r=void 0}=e,{disable:h=!1}=e,{explicit_call:w=!1}=e,{visible:B=!0}=e,{allow_overflow:A=!0}=e,T=v==="fieldset"?"fieldset":"div";return V("BLOCK_KEY"),l.$$set=d=>{"style"in d&&t(11,_=d.style),"elem_id"in d&&t(0,a=d.elem_id),"elem_classes"in d&&t(1,s=d.elem_classes),"variant"in d&&t(2,u=d.variant),"border_mode"in d&&t(3,b=d.border_mode),"padding"in d&&t(4,g=d.padding),"type"in d&&t(12,v=d.type),"test_id"in d&&t(5,r=d.test_id),"disable"in d&&t(13,h=d.disable),"explicit_call"in d&&t(14,w=d.explicit_call),"visible"in d&&t(6,B=d.visible),"allow_overflow"in d&&t(7,A=d.allow_overflow),"$$scope"in d&&t(15,f=d.$$scope)},l.$$.update=()=>{l.$$.dirty&26624&&t(9,{styles:i}=w?j(_,[]):h?j({container:!1},["container"]):{styles:""},i),l.$$.dirty&2048&&t(8,n=(typeof _.height=="number"?`height: ${_.height}px; `:"")+(typeof _.width=="number"?`width: ${_.width}px;`:""))},[a,s,u,b,g,r,B,A,n,i,T,_,v,h,w,f,o]}class x extends C{constructor(e){super(),E(this,e,P,M,K,{style:11,elem_id:0,elem_classes:1,variant:2,border_mode:3,padding:4,type:12,test_id:5,disable:13,explicit_call:14,visible:6,allow_overflow:7})}}function Q(l){let e,t,i,n,o;const f=l[9].default,_=I(f,l,l[8],null);return{c(){e=L("button"),_&&_.c(),c(e,"class",t=l[4]+" "+l[3]+" "+l[1].join(" ")+" svelte-1ipelgc"),c(e,"style",l[6]),c(e,"id",l[0]),e.disabled=l[5],m(e,"hide",!l[2])},m(a,s){O(a,e,s),_&&_.m(e,null),i=!0,n||(o=Z(e,"click",l[10]),n=!0)},p(a,[s]){_&&_.p&&(!i||s&256)&&S(_,f,a,a[8],i?D(f,a[8],s,null):Y(a[8]),null),(!i||s&26&&t!==(t=a[4]+" "+a[3]+" "+a[1].join(" ")+" svelte-1ipelgc"))&&c(e,"class",t),(!i||s&64)&&c(e,"style",a[6]),(!i||s&1)&&c(e,"id",a[0]),(!i||s&32)&&(e.disabled=a[5]),(!i||s&30)&&m(e,"hide",!a[2])},i(a){i||(z(_,a),i=!0)},o(a){k(_,a),i=!1},d(a){a&&H(e),_&&_.d(a),n=!1,o()}}}function R(l,e,t){let i,{$$slots:n={},$$scope:o}=e,{style:f={}}=e,{elem_id:_=""}=e,{elem_classes:a=[]}=e,{visible:s=!0}=e,{variant:u="secondary"}=e,{size:b=f.size||"lg"}=e,{disabled:g=!1}=e;function v(r){F.call(this,l,r)}return l.$$set=r=>{"style"in r&&t(7,f=r.style),"elem_id"in r&&t(0,_=r.elem_id),"elem_classes"in r&&t(1,a=r.elem_classes),"visible"in r&&t(2,s=r.visible),"variant"in r&&t(3,u=r.variant),"size"in r&&t(4,b=r.size),"disabled"in r&&t(5,g=r.disabled),"$$scope"in r&&t(8,o=r.$$scope)},l.$$.update=()=>{l.$$.dirty&128&&t(6,{styles:i}=j(f,["full_width"]),i)},[_,a,s,u,b,g,i,f,o,n,v]}class p extends C{constructor(e){super(),E(this,e,R,Q,K,{style:7,elem_id:0,elem_classes:1,visible:2,variant:3,size:4,disabled:5})}}export{x as B,p as a,j as g}; -//# sourceMappingURL=Button-0391b19a.js.map diff --git a/spaces/wpeebles/DiT/diffusion/__init__.py b/spaces/wpeebles/DiT/diffusion/__init__.py deleted file mode 100644 index 8c536a98da92c4d051458803737661e5ecf974c2..0000000000000000000000000000000000000000 --- a/spaces/wpeebles/DiT/diffusion/__init__.py +++ /dev/null @@ -1,46 +0,0 @@ -# Modified from OpenAI's diffusion repos -# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py -# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion -# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py - -from . import gaussian_diffusion as gd -from .respace import SpacedDiffusion, space_timesteps - - -def create_diffusion( - timestep_respacing, - noise_schedule="linear", - use_kl=False, - sigma_small=False, - predict_xstart=False, - learn_sigma=True, - rescale_learned_sigmas=False, - diffusion_steps=1000 -): - betas = gd.get_named_beta_schedule(noise_schedule, diffusion_steps) - if use_kl: - loss_type = gd.LossType.RESCALED_KL - elif rescale_learned_sigmas: - loss_type = gd.LossType.RESCALED_MSE - else: - loss_type = gd.LossType.MSE - if timestep_respacing is None or timestep_respacing == "": - timestep_respacing = [diffusion_steps] - return SpacedDiffusion( - use_timesteps=space_timesteps(diffusion_steps, timestep_respacing), - betas=betas, - model_mean_type=( - gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X - ), - model_var_type=( - ( - gd.ModelVarType.FIXED_LARGE - if not sigma_small - else gd.ModelVarType.FIXED_SMALL - ) - if not learn_sigma - else gd.ModelVarType.LEARNED_RANGE - ), - loss_type=loss_type - # rescale_timesteps=rescale_timesteps, - ) diff --git a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/utils/model_complexity.py b/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/utils/model_complexity.py deleted file mode 100644 index 7d1dc1ed13ba747b510030a5690b81cd0d570822..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/utils/model_complexity.py +++ /dev/null @@ -1,363 +0,0 @@ -from __future__ import division, print_function, absolute_import -import math -import numpy as np -from itertools import repeat -from collections import namedtuple, defaultdict -import torch - -__all__ = ['compute_model_complexity'] -""" -Utility -""" - - -def _ntuple(n): - - def parse(x): - if isinstance(x, int): - return tuple(repeat(x, n)) - return x - - return parse - - -_single = _ntuple(1) -_pair = _ntuple(2) -_triple = _ntuple(3) -""" -Convolution -""" - - -def hook_convNd(m, x, y): - k = torch.prod(torch.Tensor(m.kernel_size)).item() - cin = m.in_channels - flops_per_ele = k * cin # + (k*cin-1) - if m.bias is not None: - flops_per_ele += 1 - flops = flops_per_ele * y.numel() / m.groups - return int(flops) - - -""" -Pooling -""" - - -def hook_maxpool1d(m, x, y): - flops_per_ele = m.kernel_size - 1 - flops = flops_per_ele * y.numel() - return int(flops) - - -def hook_maxpool2d(m, x, y): - k = _pair(m.kernel_size) - k = torch.prod(torch.Tensor(k)).item() - # ops: compare - flops_per_ele = k - 1 - flops = flops_per_ele * y.numel() - return int(flops) - - -def hook_maxpool3d(m, x, y): - k = _triple(m.kernel_size) - k = torch.prod(torch.Tensor(k)).item() - flops_per_ele = k - 1 - flops = flops_per_ele * y.numel() - return int(flops) - - -def hook_avgpool1d(m, x, y): - flops_per_ele = m.kernel_size - flops = flops_per_ele * y.numel() - return int(flops) - - -def hook_avgpool2d(m, x, y): - k = _pair(m.kernel_size) - k = torch.prod(torch.Tensor(k)).item() - flops_per_ele = k - flops = flops_per_ele * y.numel() - return int(flops) - - -def hook_avgpool3d(m, x, y): - k = _triple(m.kernel_size) - k = torch.prod(torch.Tensor(k)).item() - flops_per_ele = k - flops = flops_per_ele * y.numel() - return int(flops) - - -def hook_adapmaxpool1d(m, x, y): - x = x[0] - out_size = m.output_size - k = math.ceil(x.size(2) / out_size) - flops_per_ele = k - 1 - flops = flops_per_ele * y.numel() - return int(flops) - - -def hook_adapmaxpool2d(m, x, y): - x = x[0] - out_size = _pair(m.output_size) - k = torch.Tensor(list(x.size()[2:])) / torch.Tensor(out_size) - k = torch.prod(torch.ceil(k)).item() - flops_per_ele = k - 1 - flops = flops_per_ele * y.numel() - return int(flops) - - -def hook_adapmaxpool3d(m, x, y): - x = x[0] - out_size = _triple(m.output_size) - k = torch.Tensor(list(x.size()[2:])) / torch.Tensor(out_size) - k = torch.prod(torch.ceil(k)).item() - flops_per_ele = k - 1 - flops = flops_per_ele * y.numel() - return int(flops) - - -def hook_adapavgpool1d(m, x, y): - x = x[0] - out_size = m.output_size - k = math.ceil(x.size(2) / out_size) - flops_per_ele = k - flops = flops_per_ele * y.numel() - return int(flops) - - -def hook_adapavgpool2d(m, x, y): - x = x[0] - out_size = _pair(m.output_size) - k = torch.Tensor(list(x.size()[2:])) / torch.Tensor(out_size) - k = torch.prod(torch.ceil(k)).item() - flops_per_ele = k - flops = flops_per_ele * y.numel() - return int(flops) - - -def hook_adapavgpool3d(m, x, y): - x = x[0] - out_size = _triple(m.output_size) - k = torch.Tensor(list(x.size()[2:])) / torch.Tensor(out_size) - k = torch.prod(torch.ceil(k)).item() - flops_per_ele = k - flops = flops_per_ele * y.numel() - return int(flops) - - -""" -Non-linear activations -""" - - -def hook_relu(m, x, y): - # eq: max(0, x) - num_ele = y.numel() - return int(num_ele) - - -def hook_leakyrelu(m, x, y): - # eq: max(0, x) + negative_slope*min(0, x) - num_ele = y.numel() - flops = 3 * num_ele - return int(flops) - - -""" -Normalization -""" - - -def hook_batchnormNd(m, x, y): - num_ele = y.numel() - flops = 2 * num_ele # mean and std - if m.affine: - flops += 2 * num_ele # gamma and beta - return int(flops) - - -def hook_instancenormNd(m, x, y): - return hook_batchnormNd(m, x, y) - - -def hook_groupnorm(m, x, y): - return hook_batchnormNd(m, x, y) - - -def hook_layernorm(m, x, y): - num_ele = y.numel() - flops = 2 * num_ele # mean and std - if m.elementwise_affine: - flops += 2 * num_ele # gamma and beta - return int(flops) - - -""" -Linear -""" - - -def hook_linear(m, x, y): - flops_per_ele = m.in_features # + (m.in_features-1) - if m.bias is not None: - flops_per_ele += 1 - flops = flops_per_ele * y.numel() - return int(flops) - - -__generic_flops_counter = { - # Convolution - 'Conv1d': hook_convNd, - 'Conv2d': hook_convNd, - 'Conv3d': hook_convNd, - # Pooling - 'MaxPool1d': hook_maxpool1d, - 'MaxPool2d': hook_maxpool2d, - 'MaxPool3d': hook_maxpool3d, - 'AvgPool1d': hook_avgpool1d, - 'AvgPool2d': hook_avgpool2d, - 'AvgPool3d': hook_avgpool3d, - 'AdaptiveMaxPool1d': hook_adapmaxpool1d, - 'AdaptiveMaxPool2d': hook_adapmaxpool2d, - 'AdaptiveMaxPool3d': hook_adapmaxpool3d, - 'AdaptiveAvgPool1d': hook_adapavgpool1d, - 'AdaptiveAvgPool2d': hook_adapavgpool2d, - 'AdaptiveAvgPool3d': hook_adapavgpool3d, - # Non-linear activations - 'ReLU': hook_relu, - 'ReLU6': hook_relu, - 'LeakyReLU': hook_leakyrelu, - # Normalization - 'BatchNorm1d': hook_batchnormNd, - 'BatchNorm2d': hook_batchnormNd, - 'BatchNorm3d': hook_batchnormNd, - 'InstanceNorm1d': hook_instancenormNd, - 'InstanceNorm2d': hook_instancenormNd, - 'InstanceNorm3d': hook_instancenormNd, - 'GroupNorm': hook_groupnorm, - 'LayerNorm': hook_layernorm, - # Linear - 'Linear': hook_linear, -} - -__conv_linear_flops_counter = { - # Convolution - 'Conv1d': hook_convNd, - 'Conv2d': hook_convNd, - 'Conv3d': hook_convNd, - # Linear - 'Linear': hook_linear, -} - - -def _get_flops_counter(only_conv_linear): - if only_conv_linear: - return __conv_linear_flops_counter - return __generic_flops_counter - - -def compute_model_complexity( - model, input_size, verbose=False, only_conv_linear=True -): - """Returns number of parameters and FLOPs. - - .. note:: - (1) this function only provides an estimate of the theoretical time complexity - rather than the actual running time which depends on implementations and hardware, - and (2) the FLOPs is only counted for layers that are used at test time. This means - that redundant layers such as person ID classification layer will be ignored as it - is discarded when doing feature extraction. Note that the inference graph depends on - how you construct the computations in ``forward()``. - - Args: - model (nn.Module): network model. - input_size (tuple): input size, e.g. (1, 3, 256, 128). - verbose (bool, optional): shows detailed complexity of - each module. Default is False. - only_conv_linear (bool, optional): only considers convolution - and linear layers when counting flops. Default is True. - If set to False, flops of all layers will be counted. - - Examples:: - >>> from torchreid import models, utils - >>> model = models.build_model(name='resnet50', num_classes=1000) - >>> num_params, flops = utils.compute_model_complexity(model, (1, 3, 256, 128), verbose=True) - """ - registered_handles = [] - layer_list = [] - layer = namedtuple('layer', ['class_name', 'params', 'flops']) - - def _add_hooks(m): - - def _has_submodule(m): - return len(list(m.children())) > 0 - - def _hook(m, x, y): - params = sum(p.numel() for p in m.parameters()) - class_name = str(m.__class__.__name__) - flops_counter = _get_flops_counter(only_conv_linear) - if class_name in flops_counter: - flops = flops_counter[class_name](m, x, y) - else: - flops = 0 - layer_list.append( - layer(class_name=class_name, params=params, flops=flops) - ) - - # only consider the very basic nn layer - if _has_submodule(m): - return - - handle = m.register_forward_hook(_hook) - registered_handles.append(handle) - - default_train_mode = model.training - - model.eval().apply(_add_hooks) - input = torch.rand(input_size) - if next(model.parameters()).is_cuda: - input = input.cuda() - model(input) # forward - - for handle in registered_handles: - handle.remove() - - model.train(default_train_mode) - - if verbose: - per_module_params = defaultdict(list) - per_module_flops = defaultdict(list) - - total_params, total_flops = 0, 0 - - for layer in layer_list: - total_params += layer.params - total_flops += layer.flops - if verbose: - per_module_params[layer.class_name].append(layer.params) - per_module_flops[layer.class_name].append(layer.flops) - - if verbose: - num_udscore = 55 - print(' {}'.format('-' * num_udscore)) - print(' Model complexity with input size {}'.format(input_size)) - print(' {}'.format('-' * num_udscore)) - for class_name in per_module_params: - params = int(np.sum(per_module_params[class_name])) - flops = int(np.sum(per_module_flops[class_name])) - print( - ' {} (params={:,}, flops={:,})'.format( - class_name, params, flops - ) - ) - print(' {}'.format('-' * num_udscore)) - print( - ' Total (params={:,}, flops={:,})'.format( - total_params, total_flops - ) - ) - print(' {}'.format('-' * num_udscore)) - - return total_params, total_flops diff --git a/spaces/xuetao/bingo3/src/pages/api/blob.ts b/spaces/xuetao/bingo3/src/pages/api/blob.ts deleted file mode 100644 index fecd48031916b2284b8958892196e0a1ad420421..0000000000000000000000000000000000000000 --- a/spaces/xuetao/bingo3/src/pages/api/blob.ts +++ /dev/null @@ -1,40 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { Readable } from 'node:stream' -import { fetch } from '@/lib/isomorphic' - -const API_DOMAIN = 'https://www.bing.com' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const { bcid } = req.query - - const { headers, body } = await fetch(`${API_DOMAIN}/images/blob?bcid=${bcid}`, - { - method: 'GET', - headers: { - "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"", - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": "\"Windows\"", - "Referrer-Policy": "origin-when-cross-origin", - }, - }, - ) - - res.writeHead(200, { - 'Content-Length': headers.get('content-length')!, - 'Content-Type': headers.get('content-type')!, - }) - // @ts-ignore - return Readable.fromWeb(body!).pipe(res) - } catch (e) { - console.log('Error', e) - return res.json({ - result: { - value: 'UploadFailed', - message: `${e}` - } - }) - } -} diff --git a/spaces/xwsm/gpt/crazy_functions/test_project/cpp/libJPG/jpge.h b/spaces/xwsm/gpt/crazy_functions/test_project/cpp/libJPG/jpge.h deleted file mode 100644 index a46c805ab80aab491f7f9508b3a008b149866bee..0000000000000000000000000000000000000000 --- a/spaces/xwsm/gpt/crazy_functions/test_project/cpp/libJPG/jpge.h +++ /dev/null @@ -1,172 +0,0 @@ - -// jpge.h - C++ class for JPEG compression. -// Public domain, Rich Geldreich -// Alex Evans: Added RGBA support, linear memory allocator. -#ifndef JPEG_ENCODER_H -#define JPEG_ENCODER_H - -#include - -namespace jpge -{ - typedef unsigned char uint8; - typedef signed short int16; - typedef signed int int32; - typedef unsigned short uint16; - typedef unsigned int uint32; - typedef unsigned int uint; - - // JPEG chroma subsampling factors. Y_ONLY (grayscale images) and H2V2 (color images) are the most common. - enum subsampling_t { Y_ONLY = 0, H1V1 = 1, H2V1 = 2, H2V2 = 3 }; - - // JPEG compression parameters structure. - struct params - { - inline params() : m_quality(85), m_subsampling(H2V2), m_no_chroma_discrim_flag(false), m_two_pass_flag(false) { } - - inline bool check_valid() const - { - if ((m_quality < 1) || (m_quality > 100)) return false; - if ((uint)m_subsampling > (uint)H2V2) return false; - return true; - } - - // Quality: 1-100, higher is better. Typical values are around 50-95. - int m_quality; - - // m_subsampling: - // 0 = Y (grayscale) only - // 1 = YCbCr, no subsampling (H1V1, YCbCr 1x1x1, 3 blocks per MCU) - // 2 = YCbCr, H2V1 subsampling (YCbCr 2x1x1, 4 blocks per MCU) - // 3 = YCbCr, H2V2 subsampling (YCbCr 4x1x1, 6 blocks per MCU-- very common) - subsampling_t m_subsampling; - - // Disables CbCr discrimination - only intended for testing. - // If true, the Y quantization table is also used for the CbCr channels. - bool m_no_chroma_discrim_flag; - - bool m_two_pass_flag; - }; - - // Writes JPEG image to a file. - // num_channels must be 1 (Y) or 3 (RGB), image pitch must be width*num_channels. - bool compress_image_to_jpeg_file(const char *pFilename, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params = params()); - - // Writes JPEG image to memory buffer. - // On entry, buf_size is the size of the output buffer pointed at by pBuf, which should be at least ~1024 bytes. - // If return value is true, buf_size will be set to the size of the compressed data. - bool compress_image_to_jpeg_file_in_memory(void *pBuf, int64_t &buf_size, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params = params()); - - // Output stream abstract class - used by the jpeg_encoder class to write to the output stream. - // put_buf() is generally called with len==JPGE_OUT_BUF_SIZE bytes, but for headers it'll be called with smaller amounts. - class output_stream - { - public: - virtual ~output_stream() { }; - virtual bool put_buf(const void* Pbuf, int64_t len) = 0; - template inline bool put_obj(const T& obj) { return put_buf(&obj, sizeof(T)); } - }; - - // Lower level jpeg_encoder class - useful if more control is needed than the above helper functions. - class jpeg_encoder - { - public: - jpeg_encoder(); - ~jpeg_encoder(); - - // Initializes the compressor. - // pStream: The stream object to use for writing compressed data. - // params - Compression parameters structure, defined above. - // width, height - Image dimensions. - // channels - May be 1, or 3. 1 indicates grayscale, 3 indicates RGB source data. - // Returns false on out of memory or if a stream write fails. - bool init(output_stream *pStream, int64_t width, int64_t height, int64_t src_channels, const params &comp_params = params()); - - const params &get_params() const { return m_params; } - - // Deinitializes the compressor, freeing any allocated memory. May be called at any time. - void deinit(); - - uint get_total_passes() const { return m_params.m_two_pass_flag ? 2 : 1; } - inline uint get_cur_pass() { return m_pass_num; } - - // Call this method with each source scanline. - // width * src_channels bytes per scanline is expected (RGB or Y format). - // You must call with NULL after all scanlines are processed to finish compression. - // Returns false on out of memory or if a stream write fails. - bool process_scanline(const void* pScanline); - - private: - jpeg_encoder(const jpeg_encoder &); - jpeg_encoder &operator =(const jpeg_encoder &); - - typedef int32 sample_array_t; - - output_stream *m_pStream; - params m_params; - uint8 m_num_components; - uint8 m_comp_h_samp[3], m_comp_v_samp[3]; - int m_image_x, m_image_y, m_image_bpp, m_image_bpl; - int m_image_x_mcu, m_image_y_mcu; - int m_image_bpl_xlt, m_image_bpl_mcu; - int m_mcus_per_row; - int m_mcu_x, m_mcu_y; - uint8 *m_mcu_lines[16]; - uint8 m_mcu_y_ofs; - sample_array_t m_sample_array[64]; - int16 m_coefficient_array[64]; - int32 m_quantization_tables[2][64]; - uint m_huff_codes[4][256]; - uint8 m_huff_code_sizes[4][256]; - uint8 m_huff_bits[4][17]; - uint8 m_huff_val[4][256]; - uint32 m_huff_count[4][256]; - int m_last_dc_val[3]; - enum { JPGE_OUT_BUF_SIZE = 2048 }; - uint8 m_out_buf[JPGE_OUT_BUF_SIZE]; - uint8 *m_pOut_buf; - uint m_out_buf_left; - uint32 m_bit_buffer; - uint m_bits_in; - uint8 m_pass_num; - bool m_all_stream_writes_succeeded; - - void optimize_huffman_table(int table_num, int table_len); - void emit_byte(uint8 i); - void emit_word(uint i); - void emit_marker(int marker); - void emit_jfif_app0(); - void emit_dqt(); - void emit_sof(); - void emit_dht(uint8 *bits, uint8 *val, int index, bool ac_flag); - void emit_dhts(); - void emit_sos(); - void emit_markers(); - void compute_huffman_table(uint *codes, uint8 *code_sizes, uint8 *bits, uint8 *val); - void compute_quant_table(int32 *dst, int16 *src); - void adjust_quant_table(int32 *dst, int32 *src); - void first_pass_init(); - bool second_pass_init(); - bool jpg_open(int p_x_res, int p_y_res, int src_channels); - void load_block_8_8_grey(int x); - void load_block_8_8(int x, int y, int c); - void load_block_16_8(int x, int c); - void load_block_16_8_8(int x, int c); - void load_quantized_coefficients(int component_num); - void flush_output_buffer(); - void put_bits(uint bits, uint len); - void code_coefficients_pass_one(int component_num); - void code_coefficients_pass_two(int component_num); - void code_block(int component_num); - void process_mcu_row(); - bool terminate_pass_one(); - bool terminate_pass_two(); - bool process_end_of_image(); - void load_mcu(const void* src); - void clear(); - void init(); - }; - -} // namespace jpge - -#endif // JPEG_ENCODER \ No newline at end of file diff --git a/spaces/yangliuyi601/rvc-models/infer_pack/models.py b/spaces/yangliuyi601/rvc-models/infer_pack/models.py deleted file mode 100644 index 96165f73644e6fb92d0ffedb4a3c9e1a457cb989..0000000000000000000000000000000000000000 --- a/spaces/yangliuyi601/rvc-models/infer_pack/models.py +++ /dev/null @@ -1,982 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder256Sim(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_sim(nn.Module): - """ - Synthesizer for Training - """ - - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - # hop_length, - gin_channels=0, - use_sdp=True, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256Sim( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - is_half=kwargs["is_half"], - ) - - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y_lengths, ds - ): # y是spec不需要了现在 - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - z_slice, ids_slice = commons.rand_slice_segments( - x, y_lengths, self.segment_size - ) - - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice - - def infer( - self, phone, phone_lengths, pitch, pitchf, ds, max_len=None - ): # y是spec不需要了现在 - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g) - return o, o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/yanli01/gpt01/modules/webui_locale.py b/spaces/yanli01/gpt01/modules/webui_locale.py deleted file mode 100644 index efcbb6ec66dc4c2fb9d98e51a2b3dccb7b5b2c70..0000000000000000000000000000000000000000 --- a/spaces/yanli01/gpt01/modules/webui_locale.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -import locale -import commentjson as json - -class I18nAuto: - def __init__(self): - if os.path.exists("config.json"): - with open("config.json", "r", encoding='utf-8') as f: - config = json.load(f) - else: - config = {} - language = config.get("language", "auto") # 在这里输入你的 API 密钥 - language = os.environ.get("LANGUAGE", language) - if language == "auto": - language = locale.getdefaultlocale()[0] # get the language code of the system (ex. zh_CN) - self.language_map = {} - self.file_is_exists = os.path.isfile(f"./locale/{language}.json") - if self.file_is_exists: - with open(f"./locale/{language}.json", "r", encoding="utf-8") as f: - self.language_map.update(json.load(f)) - - def __call__(self, key): - if self.file_is_exists and key in self.language_map: - return self.language_map[key] - else: - return key diff --git a/spaces/ybelkada/petals/utils_display.py b/spaces/ybelkada/petals/utils_display.py deleted file mode 100644 index 911690ff18e2a959142b133779d3453dd2cd08aa..0000000000000000000000000000000000000000 --- a/spaces/ybelkada/petals/utils_display.py +++ /dev/null @@ -1,48 +0,0 @@ -import time -import random - -import streamlit as st - -from example_prompts import EXAMPLE_PROMPTS - -HEADER = """ - ## The online demo has now migrated to https://petals.ml/ and http://chat.petals.ml/ please refer to these links -""" - -SIDE_BAR_TEXT = """ - -# *PETALS: A Collaborative Inference and Fine-tuning of Large Models* - -A BigScience initiative. - -- [Introduction](#introduction) - * [What is *PETALS* ?](#what-is--petals---) - * [Generation parameters](#generation-parameters) - -# Introduction - -This Space is an interactive Space of *PETALS* paper that aims to run BLOOM-176 in a distributed manner for efficient and cost-effective inference and fine-tuning. - -## What is *PETALS* ? - -With the release of BLOOM-176B and OPT-175B, everyone can download pretrained models of this scale. Still, using these models requires supercomputer-grade hardware, which is unavailable to many researchers. -PETALS proposes to run BLOOM-176 in a distributed manner. The model is run on multiple computers from different users. Each user can benefit from the large model's inference by checking the official links: [petals](https://petals.ml/) | [chat-petals](http://chat.petals.ml/) - - -""" - -def write_incremental(text, place_holder, delay=0.05): - """ - Write a text in a streamlit widget, one character at a time. - Adapted from: https://discuss.streamlit.io/t/display-several-pieces-of-strings-incrementally-on-the-same-line/9279 - """ - for i in range(len(text) + 1): - place_holder.markdown("### %s " % text[0:i].replace("\n", "
                            "), unsafe_allow_html=True) - # place_holder.markdown("#### %s" % text[0:i]) - time.sleep(delay) - -def i_am_feeling_lucky(): - """ - Return a random prompt from EXAMPLE_PROMPT - """ - return EXAMPLE_PROMPTS[random.randint(0, len(EXAMPLE_PROMPTS) - 1)] \ No newline at end of file diff --git a/spaces/yenumulanarendraprasad/mygenaivoicebot/README.md b/spaces/yenumulanarendraprasad/mygenaivoicebot/README.md deleted file mode 100644 index ab443ec2a82094a7b83c41506fe06ef5bb974cad..0000000000000000000000000000000000000000 --- a/spaces/yenumulanarendraprasad/mygenaivoicebot/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Mygenaivoicebot -emoji: 📈 -colorFrom: red -colorTo: green -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/yiningmao/metaphor-detection-baseline/README.md b/spaces/yiningmao/metaphor-detection-baseline/README.md deleted file mode 100644 index b7f56adb5e1365fd4662865fab51e49d70f0da2c..0000000000000000000000000000000000000000 --- a/spaces/yiningmao/metaphor-detection-baseline/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Metaphor Detection Baseline -emoji: 🌍 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/yl12053/so-vits-4.1-Kitasan-Black/modules/losses.py b/spaces/yl12053/so-vits-4.1-Kitasan-Black/modules/losses.py deleted file mode 100644 index cd21799eccde350c3aac0bdd661baf96ed220147..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Kitasan-Black/modules/losses.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from torch.nn import functional as F - -import modules.commons as commons - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - dr = dr.float() - dg = dg.float() - r_loss = torch.mean((1-dr)**2) - g_loss = torch.mean(dg**2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - dg = dg.float() - l = torch.mean((1-dg)**2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - -def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): - """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] - """ - z_p = z_p.float() - logs_q = logs_q.float() - m_p = m_p.float() - logs_p = logs_p.float() - z_mask = z_mask.float() - #print(logs_p) - kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) - kl = torch.sum(kl * z_mask) - l = kl / torch.sum(z_mask) - return l diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.py deleted file mode 100644 index 1aad53bfef62fb584d5022585d567e346f671a55..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.py +++ /dev/null @@ -1,8 +0,0 @@ -from ..common.optim import SGD as optimizer -from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier -from ..common.data.coco_keypoint import dataloader -from ..common.models.keypoint_rcnn_fpn import model -from ..common.train import train - -model.backbone.bottom_up.freeze_at = 2 -train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl" diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/configs/common/models/panoptic_fpn.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/configs/common/models/panoptic_fpn.py deleted file mode 100644 index 88f55d2ce9db62e61445d6a3700067d9d864ecae..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/configs/common/models/panoptic_fpn.py +++ /dev/null @@ -1,20 +0,0 @@ -from detectron2.config import LazyCall as L -from detectron2.layers import ShapeSpec -from detectron2.modeling import PanopticFPN -from detectron2.modeling.meta_arch.semantic_seg import SemSegFPNHead - -from .mask_rcnn_fpn import model - -model._target_ = PanopticFPN -model.sem_seg_head = L(SemSegFPNHead)( - input_shape={ - f: L(ShapeSpec)(stride=s, channels="${....backbone.out_channels}") - for f, s in zip(["p2", "p3", "p4", "p5"], [4, 8, 16, 32]) - }, - ignore_value=255, - num_classes=54, # COCO stuff + 1 - conv_dims=128, - common_stride=4, - loss_weight=0.5, - norm="GN", -) diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/layers/roi_align.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/layers/roi_align.py deleted file mode 100644 index 163462e1f194e1e4100da92d76d9516f7cc22e35..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/layers/roi_align.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from torch import nn -from torchvision.ops import roi_align - - -# NOTE: torchvision's RoIAlign has a different default aligned=False -class ROIAlign(nn.Module): - def __init__(self, output_size, spatial_scale, sampling_ratio, aligned=True): - """ - Args: - output_size (tuple): h, w - spatial_scale (float): scale the input boxes by this number - sampling_ratio (int): number of inputs samples to take for each output - sample. 0 to take samples densely. - aligned (bool): if False, use the legacy implementation in - Detectron. If True, align the results more perfectly. - - Note: - The meaning of aligned=True: - - Given a continuous coordinate c, its two neighboring pixel indices (in our - pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example, - c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled - from the underlying signal at continuous coordinates 0.5 and 1.5). But the original - roi_align (aligned=False) does not subtract the 0.5 when computing neighboring - pixel indices and therefore it uses pixels with a slightly incorrect alignment - (relative to our pixel model) when performing bilinear interpolation. - - With `aligned=True`, - we first appropriately scale the ROI and then shift it by -0.5 - prior to calling roi_align. This produces the correct neighbors; see - detectron2/tests/test_roi_align.py for verification. - - The difference does not make a difference to the model's performance if - ROIAlign is used together with conv layers. - """ - super().__init__() - self.output_size = output_size - self.spatial_scale = spatial_scale - self.sampling_ratio = sampling_ratio - self.aligned = aligned - - from torchvision import __version__ - - version = tuple(int(x) for x in __version__.split(".")[:2]) - # https://github.com/pytorch/vision/pull/2438 - assert version >= (0, 7), "Require torchvision >= 0.7" - - def forward(self, input, rois): - """ - Args: - input: NCHW images - rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy. - """ - assert rois.dim() == 2 and rois.size(1) == 5 - if input.is_quantized: - input = input.dequantize() - return roi_align( - input, - rois.to(dtype=input.dtype), - self.output_size, - self.spatial_scale, - self.sampling_ratio, - self.aligned, - ) - - def __repr__(self): - tmpstr = self.__class__.__name__ + "(" - tmpstr += "output_size=" + str(self.output_size) - tmpstr += ", spatial_scale=" + str(self.spatial_scale) - tmpstr += ", sampling_ratio=" + str(self.sampling_ratio) - tmpstr += ", aligned=" + str(self.aligned) - tmpstr += ")" - return tmpstr diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/dev/parse_results.sh b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/dev/parse_results.sh deleted file mode 100644 index 80768a4005753447c49339790fe66c9b82a80aaf..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/dev/parse_results.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. - -# A shell script that parses metrics from the log file. -# Make it easier for developers to track performance of models. - -LOG="$1" - -if [[ -z "$LOG" ]]; then - echo "Usage: $0 /path/to/log/file" - exit 1 -fi - -# [12/15 11:47:32] trainer INFO: Total training time: 12:15:04.446477 (0.4900 s / it) -# [12/15 11:49:03] inference INFO: Total inference time: 0:01:25.326167 (0.13652186737060548 s / img per device, on 8 devices) -# [12/15 11:49:03] inference INFO: Total inference pure compute time: ..... - -# training time -trainspeed=$(grep -o 'Overall training.*' "$LOG" | grep -Eo '\(.*\)' | grep -o '[0-9\.]*') -echo "Training speed: $trainspeed s/it" - -# inference time: there could be multiple inference during training -inferencespeed=$(grep -o 'Total inference pure.*' "$LOG" | tail -n1 | grep -Eo '\(.*\)' | grep -o '[0-9\.]*' | head -n1) -echo "Inference speed: $inferencespeed s/it" - -# [12/15 11:47:18] trainer INFO: eta: 0:00:00 iter: 90000 loss: 0.5407 (0.7256) loss_classifier: 0.1744 (0.2446) loss_box_reg: 0.0838 (0.1160) loss_mask: 0.2159 (0.2722) loss_objectness: 0.0244 (0.0429) loss_rpn_box_reg: 0.0279 (0.0500) time: 0.4487 (0.4899) data: 0.0076 (0.0975) lr: 0.000200 max mem: 4161 -memory=$(grep -o 'max[_ ]mem: [0-9]*' "$LOG" | tail -n1 | grep -o '[0-9]*') -echo "Training memory: $memory MB" - -echo "Easy to copypaste:" -echo "$trainspeed","$inferencespeed","$memory" - -echo "------------------------------" - -# [12/26 17:26:32] engine.coco_evaluation: copypaste: Task: bbox -# [12/26 17:26:32] engine.coco_evaluation: copypaste: AP,AP50,AP75,APs,APm,APl -# [12/26 17:26:32] engine.coco_evaluation: copypaste: 0.0017,0.0024,0.0017,0.0005,0.0019,0.0011 -# [12/26 17:26:32] engine.coco_evaluation: copypaste: Task: segm -# [12/26 17:26:32] engine.coco_evaluation: copypaste: AP,AP50,AP75,APs,APm,APl -# [12/26 17:26:32] engine.coco_evaluation: copypaste: 0.0014,0.0021,0.0016,0.0005,0.0016,0.0011 - -echo "COCO Results:" -num_tasks=$(grep -o 'copypaste:.*Task.*' "$LOG" | sort -u | wc -l) -# each task has 3 lines -grep -o 'copypaste:.*' "$LOG" | cut -d ' ' -f 2- | tail -n $((num_tasks * 3)) diff --git a/spaces/yunfei0710/gpt-academic/crazy_functions/latex_utils.py b/spaces/yunfei0710/gpt-academic/crazy_functions/latex_utils.py deleted file mode 100644 index eb65a8a915d2cbc66a346e42a5f2a17ee07bb585..0000000000000000000000000000000000000000 --- a/spaces/yunfei0710/gpt-academic/crazy_functions/latex_utils.py +++ /dev/null @@ -1,788 +0,0 @@ -from toolbox import update_ui, update_ui_lastest_msg # 刷新Gradio前端界面 -from toolbox import zip_folder, objdump, objload, promote_file_to_downloadzone -import os, shutil -import re -import numpy as np -pj = os.path.join - -""" -======================================================================== -Part One -Latex segmentation with a binary mask (PRESERVE=0, TRANSFORM=1) -======================================================================== -""" -PRESERVE = 0 -TRANSFORM = 1 - -def set_forbidden_text(text, mask, pattern, flags=0): - """ - Add a preserve text area in this paper - e.g. with pattern = r"\\begin\{algorithm\}(.*?)\\end\{algorithm\}" - you can mask out (mask = PRESERVE so that text become untouchable for GPT) - everything between "\begin{equation}" and "\end{equation}" - """ - if isinstance(pattern, list): pattern = '|'.join(pattern) - pattern_compile = re.compile(pattern, flags) - for res in pattern_compile.finditer(text): - mask[res.span()[0]:res.span()[1]] = PRESERVE - return text, mask - -def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True): - """ - Move area out of preserve area (make text editable for GPT) - count the number of the braces so as to catch compelete text area. - e.g. - \begin{abstract} blablablablablabla. \end{abstract} - """ - if isinstance(pattern, list): pattern = '|'.join(pattern) - pattern_compile = re.compile(pattern, flags) - for res in pattern_compile.finditer(text): - if not forbid_wrapper: - mask[res.span()[0]:res.span()[1]] = TRANSFORM - else: - mask[res.regs[0][0]: res.regs[1][0]] = PRESERVE # '\\begin{abstract}' - mask[res.regs[1][0]: res.regs[1][1]] = TRANSFORM # abstract - mask[res.regs[1][1]: res.regs[0][1]] = PRESERVE # abstract - return text, mask - -def set_forbidden_text_careful_brace(text, mask, pattern, flags=0): - """ - Add a preserve text area in this paper (text become untouchable for GPT). - count the number of the braces so as to catch compelete text area. - e.g. - \caption{blablablablabla\texbf{blablabla}blablabla.} - """ - pattern_compile = re.compile(pattern, flags) - for res in pattern_compile.finditer(text): - brace_level = -1 - p = begin = end = res.regs[0][0] - for _ in range(1024*16): - if text[p] == '}' and brace_level == 0: break - elif text[p] == '}': brace_level -= 1 - elif text[p] == '{': brace_level += 1 - p += 1 - end = p+1 - mask[begin:end] = PRESERVE - return text, mask - -def reverse_forbidden_text_careful_brace(text, mask, pattern, flags=0, forbid_wrapper=True): - """ - Move area out of preserve area (make text editable for GPT) - count the number of the braces so as to catch compelete text area. - e.g. - \caption{blablablablabla\texbf{blablabla}blablabla.} - """ - pattern_compile = re.compile(pattern, flags) - for res in pattern_compile.finditer(text): - brace_level = 0 - p = begin = end = res.regs[1][0] - for _ in range(1024*16): - if text[p] == '}' and brace_level == 0: break - elif text[p] == '}': brace_level -= 1 - elif text[p] == '{': brace_level += 1 - p += 1 - end = p - mask[begin:end] = TRANSFORM - if forbid_wrapper: - mask[res.regs[0][0]:begin] = PRESERVE - mask[end:res.regs[0][1]] = PRESERVE - return text, mask - -def set_forbidden_text_begin_end(text, mask, pattern, flags=0, limit_n_lines=42): - """ - Find all \begin{} ... \end{} text block that with less than limit_n_lines lines. - Add it to preserve area - """ - pattern_compile = re.compile(pattern, flags) - def search_with_line_limit(text, mask): - for res in pattern_compile.finditer(text): - cmd = res.group(1) # begin{what} - this = res.group(2) # content between begin and end - this_mask = mask[res.regs[2][0]:res.regs[2][1]] - white_list = ['document', 'abstract', 'lemma', 'definition', 'sproof', - 'em', 'emph', 'textit', 'textbf', 'itemize', 'enumerate'] - if (cmd in white_list) or this.count('\n') >= limit_n_lines: # use a magical number 42 - this, this_mask = search_with_line_limit(this, this_mask) - mask[res.regs[2][0]:res.regs[2][1]] = this_mask - else: - mask[res.regs[0][0]:res.regs[0][1]] = PRESERVE - return text, mask - return search_with_line_limit(text, mask) - -class LinkedListNode(): - """ - Linked List Node - """ - def __init__(self, string, preserve=True) -> None: - self.string = string - self.preserve = preserve - self.next = None - # self.begin_line = 0 - # self.begin_char = 0 - -def convert_to_linklist(text, mask): - root = LinkedListNode("", preserve=True) - current_node = root - for c, m, i in zip(text, mask, range(len(text))): - if (m==PRESERVE and current_node.preserve) \ - or (m==TRANSFORM and not current_node.preserve): - # add - current_node.string += c - else: - current_node.next = LinkedListNode(c, preserve=(m==PRESERVE)) - current_node = current_node.next - return root -""" -======================================================================== -Latex Merge File -======================================================================== -""" - -def 寻找Latex主文件(file_manifest, mode): - """ - 在多Tex文档中,寻找主文件,必须包含documentclass,返回找到的第一个。 - P.S. 但愿没人把latex模板放在里面传进来 (6.25 加入判定latex模板的代码) - """ - canidates = [] - for texf in file_manifest: - if os.path.basename(texf).startswith('merge'): - continue - with open(texf, 'r', encoding='utf8') as f: - file_content = f.read() - if r'\documentclass' in file_content: - canidates.append(texf) - else: - continue - - if len(canidates) == 0: - raise RuntimeError('无法找到一个主Tex文件(包含documentclass关键字)') - elif len(canidates) == 1: - return canidates[0] - else: # if len(canidates) >= 2 通过一些Latex模板中常见(但通常不会出现在正文)的单词,对不同latex源文件扣分,取评分最高者返回 - canidates_score = [] - # 给出一些判定模板文档的词作为扣分项 - unexpected_words = ['\LaTeX', 'manuscript', 'Guidelines', 'font', 'citations', 'rejected', 'blind review', 'reviewers'] - expected_words = ['\input', '\ref', '\cite'] - for texf in canidates: - canidates_score.append(0) - with open(texf, 'r', encoding='utf8') as f: - file_content = f.read() - for uw in unexpected_words: - if uw in file_content: - canidates_score[-1] -= 1 - for uw in expected_words: - if uw in file_content: - canidates_score[-1] += 1 - select = np.argmax(canidates_score) # 取评分最高者返回 - return canidates[select] - -def rm_comments(main_file): - new_file_remove_comment_lines = [] - for l in main_file.splitlines(): - # 删除整行的空注释 - if l.lstrip().startswith("%"): - pass - else: - new_file_remove_comment_lines.append(l) - main_file = '\n'.join(new_file_remove_comment_lines) - # main_file = re.sub(r"\\include{(.*?)}", r"\\input{\1}", main_file) # 将 \include 命令转换为 \input 命令 - main_file = re.sub(r'(? 0 and node_string.count('\_') > final_tex.count('\_'): - # walk and replace any _ without \ - final_tex = re.sub(r"(?') - if not node.preserve: - segment_parts_for_gpt.append(node.string) - f.write(f'

                            #{show_html}#

                            ') - else: - f.write(f'

                            {show_html}

                            ') - node = node.next - if node is None: break - - for n in nodes: n.next = None # break - return_dict['nodes'] = nodes - return_dict['segment_parts_for_gpt'] = segment_parts_for_gpt - return return_dict - - - -class LatexPaperSplit(): - """ - break down latex file to a linked list, - each node use a preserve flag to indicate whether it should - be proccessed by GPT. - """ - def __init__(self) -> None: - self.nodes = None - self.msg = "*{\\scriptsize\\textbf{警告:该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成," + \ - "版权归原文作者所有。翻译内容可靠性无保障,请仔细鉴别并以原文为准。" + \ - "项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。" - # 请您不要删除或修改这行警告,除非您是论文的原作者(如果您是论文原作者,欢迎加REAME中的QQ联系开发者) - self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\" - - def merge_result(self, arr, mode, msg): - """ - Merge the result after the GPT process completed - """ - result_string = "" - p = 0 - for node in self.nodes: - if node.preserve: - result_string += node.string - else: - result_string += fix_content(arr[p], node.string) - p += 1 - if mode == 'translate_zh': - pattern = re.compile(r'\\begin\{abstract\}.*\n') - match = pattern.search(result_string) - if not match: - # match \abstract{xxxx} - pattern_compile = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL) - match = pattern_compile.search(result_string) - position = match.regs[1][0] - else: - # match \begin{abstract}xxxx\end{abstract} - position = match.end() - result_string = result_string[:position] + self.msg + msg + self.msg_declare + result_string[position:] - return result_string - - def split(self, txt, project_folder, opts): - """ - break down latex file to a linked list, - each node use a preserve flag to indicate whether it should - be proccessed by GPT. - P.S. use multiprocessing to avoid timeout error - """ - import multiprocessing - manager = multiprocessing.Manager() - return_dict = manager.dict() - p = multiprocessing.Process( - target=split_subprocess, - args=(txt, project_folder, return_dict, opts)) - p.start() - p.join() - p.close() - self.nodes = return_dict['nodes'] - self.sp = return_dict['segment_parts_for_gpt'] - return self.sp - - - -class LatexPaperFileGroup(): - """ - use tokenizer to break down text according to max_token_limit - """ - def __init__(self): - self.file_paths = [] - self.file_contents = [] - self.sp_file_contents = [] - self.sp_file_index = [] - self.sp_file_tag = [] - - # count_token - from request_llm.bridge_all import model_info - enc = model_info["gpt-3.5-turbo"]['tokenizer'] - def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) - self.get_token_num = get_token_num - - def run_file_split(self, max_token_limit=1900): - """ - use tokenizer to break down text according to max_token_limit - """ - for index, file_content in enumerate(self.file_contents): - if self.get_token_num(file_content) < max_token_limit: - self.sp_file_contents.append(file_content) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index]) - else: - from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf - segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit) - for j, segment in enumerate(segments): - self.sp_file_contents.append(segment) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex") - print('Segmentation: done') - - def merge_result(self): - self.file_result = ["" for _ in range(len(self.file_paths))] - for r, k in zip(self.sp_file_result, self.sp_file_index): - self.file_result[k] += r - - def write_result(self): - manifest = [] - for path, res in zip(self.file_paths, self.file_result): - with open(path + '.polish.tex', 'w', encoding='utf8') as f: - manifest.append(path + '.polish.tex') - f.write(res) - return manifest - -def write_html(sp_file_contents, sp_file_result, chatbot, project_folder): - - # write html - try: - import shutil - from .crazy_utils import construct_html - from toolbox import gen_time_str - ch = construct_html() - orig = "" - trans = "" - final = [] - for c,r in zip(sp_file_contents, sp_file_result): - final.append(c) - final.append(r) - for i, k in enumerate(final): - if i%2==0: - orig = k - if i%2==1: - trans = k - ch.add_row(a=orig, b=trans) - create_report_file_name = f"{gen_time_str()}.trans.html" - ch.save_file(create_report_file_name) - shutil.copyfile(pj('./gpt_log/', create_report_file_name), pj(project_folder, create_report_file_name)) - promote_file_to_downloadzone(file=f'./gpt_log/{create_report_file_name}', chatbot=chatbot) - except: - from toolbox import trimmed_format_exc - print('writing html result failed:', trimmed_format_exc()) - -def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, mode='proofread', switch_prompt=None, opts=[]): - import time, os, re - from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - from .latex_utils import LatexPaperFileGroup, merge_tex_files, LatexPaperSplit, 寻找Latex主文件 - - # <-------- 寻找主tex文件 ----------> - maintex = 寻找Latex主文件(file_manifest, mode) - chatbot.append((f"定位主Latex文件", f'[Local Message] 分析结果:该项目的Latex主文件是{maintex}, 如果分析错误, 请立即终止程序, 删除或修改歧义文件, 然后重试。主程序即将开始, 请稍候。')) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - time.sleep(3) - - # <-------- 读取Latex文件, 将多文件tex工程融合为一个巨型tex ----------> - main_tex_basename = os.path.basename(maintex) - assert main_tex_basename.endswith('.tex') - main_tex_basename_bare = main_tex_basename[:-4] - may_exist_bbl = pj(project_folder, f'{main_tex_basename_bare}.bbl') - if os.path.exists(may_exist_bbl): - shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge.bbl')) - shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge_{mode}.bbl')) - shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge_diff.bbl')) - - with open(maintex, 'r', encoding='utf-8', errors='replace') as f: - content = f.read() - merged_content = merge_tex_files(project_folder, content, mode) - - with open(project_folder + '/merge.tex', 'w', encoding='utf-8', errors='replace') as f: - f.write(merged_content) - - # <-------- 精细切分latex文件 ----------> - chatbot.append((f"Latex文件融合完成", f'[Local Message] 正在精细切分latex文件,这需要一段时间计算,文档越长耗时越长,请耐心等待。')) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - lps = LatexPaperSplit() - res = lps.split(merged_content, project_folder, opts) # 消耗时间的函数 - - # <-------- 拆分过长的latex片段 ----------> - pfg = LatexPaperFileGroup() - for index, r in enumerate(res): - pfg.file_paths.append('segment-' + str(index)) - pfg.file_contents.append(r) - - pfg.run_file_split(max_token_limit=1024) - n_split = len(pfg.sp_file_contents) - - # <-------- 根据需要切换prompt ----------> - inputs_array, sys_prompt_array = switch_prompt(pfg, mode) - inputs_show_user_array = [f"{mode} {f}" for f in pfg.sp_file_tag] - - if os.path.exists(pj(project_folder,'temp.pkl')): - - # <-------- 【仅调试】如果存在调试缓存文件,则跳过GPT请求环节 ----------> - pfg = objload(file=pj(project_folder,'temp.pkl')) - - else: - # <-------- gpt 多线程请求 ----------> - gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array=inputs_array, - inputs_show_user_array=inputs_show_user_array, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history_array=[[""] for _ in range(n_split)], - sys_prompt_array=sys_prompt_array, - # max_workers=5, # 并行任务数量限制, 最多同时执行5个, 其他的排队等待 - scroller_max_len = 40 - ) - - # <-------- 文本碎片重组为完整的tex片段 ----------> - pfg.sp_file_result = [] - for i_say, gpt_say, orig_content in zip(gpt_response_collection[0::2], gpt_response_collection[1::2], pfg.sp_file_contents): - pfg.sp_file_result.append(gpt_say) - pfg.merge_result() - - # <-------- 临时存储用于调试 ----------> - pfg.get_token_num = None - objdump(pfg, file=pj(project_folder,'temp.pkl')) - - write_html(pfg.sp_file_contents, pfg.sp_file_result, chatbot=chatbot, project_folder=project_folder) - - # <-------- 写出文件 ----------> - msg = f"当前大语言模型: {llm_kwargs['llm_model']},当前语言模型温度设定: {llm_kwargs['temperature']}。" - final_tex = lps.merge_result(pfg.file_result, mode, msg) - with open(project_folder + f'/merge_{mode}.tex', 'w', encoding='utf-8', errors='replace') as f: - if mode != 'translate_zh' or "binary" in final_tex: f.write(final_tex) - - - # <-------- 整理结果, 退出 ----------> - chatbot.append((f"完成了吗?", 'GPT结果已输出, 正在编译PDF')) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # <-------- 返回 ----------> - return project_folder + f'/merge_{mode}.tex' - - - -def remove_buggy_lines(file_path, log_path, tex_name, tex_name_pure, n_fix, work_folder_modified): - try: - with open(log_path, 'r', encoding='utf-8', errors='replace') as f: - log = f.read() - with open(file_path, 'r', encoding='utf-8', errors='replace') as f: - file_lines = f.readlines() - import re - buggy_lines = re.findall(tex_name+':([0-9]{1,5}):', log) - buggy_lines = [int(l) for l in buggy_lines] - buggy_lines = sorted(buggy_lines) - print("removing lines that has errors", buggy_lines) - file_lines.pop(buggy_lines[0]-1) - with open(pj(work_folder_modified, f"{tex_name_pure}_fix_{n_fix}.tex"), 'w', encoding='utf-8', errors='replace') as f: - f.writelines(file_lines) - return True, f"{tex_name_pure}_fix_{n_fix}", buggy_lines - except: - print("Fatal error occurred, but we cannot identify error, please download zip, read latex log, and compile manually.") - return False, -1, [-1] - -def compile_latex_with_timeout(command, cwd, timeout=60): - import subprocess - process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd) - try: - stdout, stderr = process.communicate(timeout=timeout) - except subprocess.TimeoutExpired: - process.kill() - stdout, stderr = process.communicate() - print("Process timed out!") - return False - return True - -def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_folder_original, work_folder_modified, work_folder, mode='default'): - import os, time - current_dir = os.getcwd() - n_fix = 1 - max_try = 32 - chatbot.append([f"正在编译PDF文档", f'编译已经开始。当前工作路径为{work_folder},如果程序停顿5分钟以上,请直接去该路径下取回翻译结果,或者重启之后再度尝试 ...']); yield from update_ui(chatbot=chatbot, history=history) - chatbot.append([f"正在编译PDF文档", '...']); yield from update_ui(chatbot=chatbot, history=history); time.sleep(1); chatbot[-1] = list(chatbot[-1]) # 刷新界面 - yield from update_ui_lastest_msg('编译已经开始...', chatbot, history) # 刷新Gradio前端界面 - - while True: - import os - - # https://stackoverflow.com/questions/738755/dont-make-me-manually-abort-a-latex-compile-when-theres-an-error - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译原始PDF ...', chatbot, history) # 刷新Gradio前端界面 - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original) - - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译转化后的PDF ...', chatbot, history) # 刷新Gradio前端界面 - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified) - - if ok and os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf')): - # 只有第二步成功,才能继续下面的步骤 - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译BibTex ...', chatbot, history) # 刷新Gradio前端界面 - if not os.path.exists(pj(work_folder_original, f'{main_file_original}.bbl')): - ok = compile_latex_with_timeout(f'bibtex {main_file_original}.aux', work_folder_original) - if not os.path.exists(pj(work_folder_modified, f'{main_file_modified}.bbl')): - ok = compile_latex_with_timeout(f'bibtex {main_file_modified}.aux', work_folder_modified) - - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译文献交叉引用 ...', chatbot, history) # 刷新Gradio前端界面 - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified) - - if mode!='translate_zh': - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 使用latexdiff生成论文转化前后对比 ...', chatbot, history) # 刷新Gradio前端界面 - print( f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex') - ok = compile_latex_with_timeout(f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex') - - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 正在编译对比PDF ...', chatbot, history) # 刷新Gradio前端界面 - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder) - ok = compile_latex_with_timeout(f'bibtex merge_diff.aux', work_folder) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder) - - - # <---------- 检查结果 -----------> - results_ = "" - original_pdf_success = os.path.exists(pj(work_folder_original, f'{main_file_original}.pdf')) - modified_pdf_success = os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf')) - diff_pdf_success = os.path.exists(pj(work_folder, f'merge_diff.pdf')) - results_ += f"原始PDF编译是否成功: {original_pdf_success};" - results_ += f"转化PDF编译是否成功: {modified_pdf_success};" - results_ += f"对比PDF编译是否成功: {diff_pdf_success};" - yield from update_ui_lastest_msg(f'第{n_fix}编译结束:
                            {results_}...', chatbot, history) # 刷新Gradio前端界面 - - if diff_pdf_success: - result_pdf = pj(work_folder_modified, f'merge_diff.pdf') # get pdf path - promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI - if modified_pdf_success: - yield from update_ui_lastest_msg(f'转化PDF编译已经成功, 即将退出 ...', chatbot, history) # 刷新Gradio前端界面 - result_pdf = pj(work_folder_modified, f'{main_file_modified}.pdf') # get pdf path - if os.path.exists(pj(work_folder, '..', 'translation')): - shutil.copyfile(result_pdf, pj(work_folder, '..', 'translation', 'translate_zh.pdf')) - promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI - return True # 成功啦 - else: - if n_fix>=max_try: break - n_fix += 1 - can_retry, main_file_modified, buggy_lines = remove_buggy_lines( - file_path=pj(work_folder_modified, f'{main_file_modified}.tex'), - log_path=pj(work_folder_modified, f'{main_file_modified}.log'), - tex_name=f'{main_file_modified}.tex', - tex_name_pure=f'{main_file_modified}', - n_fix=n_fix, - work_folder_modified=work_folder_modified, - ) - yield from update_ui_lastest_msg(f'由于最为关键的转化PDF编译失败, 将根据报错信息修正tex源文件并重试, 当前报错的latex代码处于第{buggy_lines}行 ...', chatbot, history) # 刷新Gradio前端界面 - if not can_retry: break - - return False # 失败啦 - - - diff --git a/spaces/zdxiaoda/sovits-4.0-V1-anime-character-model/so-vits-svc/hubert/hubert_model_onnx.py b/spaces/zdxiaoda/sovits-4.0-V1-anime-character-model/so-vits-svc/hubert/hubert_model_onnx.py deleted file mode 100644 index d18f3c2a0fc29592a573a9780308d38f059640b9..0000000000000000000000000000000000000000 --- a/spaces/zdxiaoda/sovits-4.0-V1-anime-character-model/so-vits-svc/hubert/hubert_model_onnx.py +++ /dev/null @@ -1,217 +0,0 @@ -import copy -import random -from typing import Optional, Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as t_func -from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present - - -class Hubert(nn.Module): - def __init__(self, num_label_embeddings: int = 100, mask: bool = True): - super().__init__() - self._mask = mask - self.feature_extractor = FeatureExtractor() - self.feature_projection = FeatureProjection() - self.positional_embedding = PositionalConvEmbedding() - self.norm = nn.LayerNorm(768) - self.dropout = nn.Dropout(0.1) - self.encoder = TransformerEncoder( - nn.TransformerEncoderLayer( - 768, 12, 3072, activation="gelu", batch_first=True - ), - 12, - ) - self.proj = nn.Linear(768, 256) - - self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_()) - self.label_embedding = nn.Embedding(num_label_embeddings, 256) - - def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - mask = None - if self.training and self._mask: - mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2) - x[mask] = self.masked_spec_embed.to(x.dtype) - return x, mask - - def encode( - self, x: torch.Tensor, layer: Optional[int] = None - ) -> Tuple[torch.Tensor, torch.Tensor]: - x = self.feature_extractor(x) - x = self.feature_projection(x.transpose(1, 2)) - x, mask = self.mask(x) - x = x + self.positional_embedding(x) - x = self.dropout(self.norm(x)) - x = self.encoder(x, output_layer=layer) - return x, mask - - def logits(self, x: torch.Tensor) -> torch.Tensor: - logits = torch.cosine_similarity( - x.unsqueeze(2), - self.label_embedding.weight.unsqueeze(0).unsqueeze(0), - dim=-1, - ) - return logits / 0.1 - - -class HubertSoft(Hubert): - def __init__(self): - super().__init__() - - def units(self, wav: torch.Tensor) -> torch.Tensor: - wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2)) - x, _ = self.encode(wav) - return self.proj(x) - - def forward(self, x): - return self.units(x) - -class FeatureExtractor(nn.Module): - def __init__(self): - super().__init__() - self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False) - self.norm0 = nn.GroupNorm(512, 512) - self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False) - self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = t_func.gelu(self.norm0(self.conv0(x))) - x = t_func.gelu(self.conv1(x)) - x = t_func.gelu(self.conv2(x)) - x = t_func.gelu(self.conv3(x)) - x = t_func.gelu(self.conv4(x)) - x = t_func.gelu(self.conv5(x)) - x = t_func.gelu(self.conv6(x)) - return x - - -class FeatureProjection(nn.Module): - def __init__(self): - super().__init__() - self.norm = nn.LayerNorm(512) - self.projection = nn.Linear(512, 768) - self.dropout = nn.Dropout(0.1) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.norm(x) - x = self.projection(x) - x = self.dropout(x) - return x - - -class PositionalConvEmbedding(nn.Module): - def __init__(self): - super().__init__() - self.conv = nn.Conv1d( - 768, - 768, - kernel_size=128, - padding=128 // 2, - groups=16, - ) - self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.conv(x.transpose(1, 2)) - x = t_func.gelu(x[:, :, :-1]) - return x.transpose(1, 2) - - -class TransformerEncoder(nn.Module): - def __init__( - self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int - ) -> None: - super(TransformerEncoder, self).__init__() - self.layers = nn.ModuleList( - [copy.deepcopy(encoder_layer) for _ in range(num_layers)] - ) - self.num_layers = num_layers - - def forward( - self, - src: torch.Tensor, - mask: torch.Tensor = None, - src_key_padding_mask: torch.Tensor = None, - output_layer: Optional[int] = None, - ) -> torch.Tensor: - output = src - for layer in self.layers[:output_layer]: - output = layer( - output, src_mask=mask, src_key_padding_mask=src_key_padding_mask - ) - return output - - -def _compute_mask( - shape: Tuple[int, int], - mask_prob: float, - mask_length: int, - device: torch.device, - min_masks: int = 0, -) -> torch.Tensor: - batch_size, sequence_length = shape - - if mask_length < 1: - raise ValueError("`mask_length` has to be bigger than 0.") - - if mask_length > sequence_length: - raise ValueError( - f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`" - ) - - # compute number of masked spans in batch - num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random()) - num_masked_spans = max(num_masked_spans, min_masks) - - # make sure num masked indices <= sequence_length - if num_masked_spans * mask_length > sequence_length: - num_masked_spans = sequence_length // mask_length - - # SpecAugment mask to fill - mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool) - - # uniform distribution to sample from, make sure that offset samples are < sequence_length - uniform_dist = torch.ones( - (batch_size, sequence_length - (mask_length - 1)), device=device - ) - - # get random indices to mask - mask_indices = torch.multinomial(uniform_dist, num_masked_spans) - - # expand masked indices to masked spans - mask_indices = ( - mask_indices.unsqueeze(dim=-1) - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - offsets = ( - torch.arange(mask_length, device=device)[None, None, :] - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - mask_idxs = mask_indices + offsets - - # scatter indices to mask - mask = mask.scatter(1, mask_idxs, True) - - return mask - - -def hubert_soft( - path: str, -) -> HubertSoft: - r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`. - Args: - path (str): path of a pretrained model - """ - hubert = HubertSoft() - checkpoint = torch.load(path) - consume_prefix_in_state_dict_if_present(checkpoint, "module.") - hubert.load_state_dict(checkpoint) - hubert.eval() - return hubert diff --git a/spaces/zhenwusw/JoJoGAN/e4e/datasets/__init__.py b/spaces/zhenwusw/JoJoGAN/e4e/datasets/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/zht1/test2/utils/tools.py b/spaces/zht1/test2/utils/tools.py deleted file mode 100644 index 274353226c313bc300ee778ed518c65d443f655c..0000000000000000000000000000000000000000 --- a/spaces/zht1/test2/utils/tools.py +++ /dev/null @@ -1,412 +0,0 @@ -import os -import sys - -import cv2 -import matplotlib.pyplot as plt -import numpy as np -import torch -from PIL import Image - - -def convert_box_xywh_to_xyxy(box): - x1 = box[0] - y1 = box[1] - x2 = box[0] + box[2] - y2 = box[1] + box[3] - return [x1, y1, x2, y2] - - -def segment_image(image, bbox): - image_array = np.array(image) - segmented_image_array = np.zeros_like(image_array) - x1, y1, x2, y2 = bbox - segmented_image_array[y1:y2, x1:x2] = image_array[y1:y2, x1:x2] - segmented_image = Image.fromarray(segmented_image_array) - black_image = Image.new("RGB", image.size, (255, 255, 255)) - # transparency_mask = np.zeros_like((), dtype=np.uint8) - transparency_mask = np.zeros( - (image_array.shape[0], image_array.shape[1]), dtype=np.uint8 - ) - transparency_mask[y1:y2, x1:x2] = 255 - transparency_mask_image = Image.fromarray(transparency_mask, mode="L") - black_image.paste(segmented_image, mask=transparency_mask_image) - return black_image - - -def format_results(masks, scores, logits, filter=0): - annotations = [] - n = len(scores) - for i in range(n): - annotation = {} - - mask = masks[i] - tmp = np.where(mask != 0) #返回坐标索引 - # print("lllllllllllllllllllllllllll",tmp) - if np.sum(mask) < filter: - continue - annotation["id"] = i - annotation["segmentation"] = mask - annotation["bbox"] = [ - np.min(tmp[0]), - np.min(tmp[1]), - np.max(tmp[1]), - np.max(tmp[0]), - ] - annotation["score"] = scores[i] - annotation["area"] = annotation["segmentation"].sum() #返回的是所有数值之和 - # print("00000000000000000000000",annotation["area"]) - annotations.append(annotation) - return annotations - - -def filter_masks(annotations): # filter the overlap mask - annotations.sort(key=lambda x: x["area"], reverse=True) - to_remove = set() - for i in range(0, len(annotations)): - a = annotations[i] - for j in range(i + 1, len(annotations)): - b = annotations[j] - if i != j and j not in to_remove: - # check if - if b["area"] < a["area"]: - if (a["segmentation"] & b["segmentation"]).sum() / b[ - "segmentation" - ].sum() > 0.8: - to_remove.add(j) - - return [a for i, a in enumerate(annotations) if i not in to_remove], to_remove - - -def get_bbox_from_mask(mask): - mask = mask.astype(np.uint8) - contours, hierarchy = cv2.findContours( - mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE - ) - x1, y1, w, h = cv2.boundingRect(contours[0]) - x2, y2 = x1 + w, y1 + h - if len(contours) > 1: - for b in contours: - x_t, y_t, w_t, h_t = cv2.boundingRect(b) - # 将多个bbox合并成一个 - x1 = min(x1, x_t) - y1 = min(y1, y_t) - x2 = max(x2, x_t + w_t) - y2 = max(y2, y_t + h_t) - h = y2 - y1 - w = x2 - x1 - return [x1, y1, x2, y2] - - -def fast_process( - annotations, args, mask_random_color, bbox=None, points=None, edges=False -): - if isinstance(annotations[0], dict): - annotations = [annotation["segmentation"] for annotation in annotations] - result_name = os.path.basename(args.img_path) - image = cv2.imread(args.img_path) - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - original_h = image.shape[0] - original_w = image.shape[1] - if sys.platform == "darwin": - plt.switch_backend("TkAgg") - plt.figure(figsize=(original_w / 100, original_h / 100)) - # Add subplot with no margin. - plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0) - plt.margins(0, 0) - plt.gca().xaxis.set_major_locator(plt.NullLocator()) - plt.gca().yaxis.set_major_locator(plt.NullLocator()) - plt.imshow(image) - if args.better_quality == True: - if isinstance(annotations[0], torch.Tensor): - annotations = np.array(annotations.cpu()) - for i, mask in enumerate(annotations): - mask = cv2.morphologyEx( - mask.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8) - ) - annotations[i] = cv2.morphologyEx( - mask.astype(np.uint8), cv2.MORPH_OPEN, np.ones((8, 8), np.uint8) - ) - if args.device == "cpu": - annotations = np.array(annotations) - fast_show_mask( - annotations, - plt.gca(), - random_color=mask_random_color, - bbox=bbox, - points=points, - point_label=args.point_label, - retinamask=args.retina, - target_height=original_h, - target_width=original_w, - ) - else: - if isinstance(annotations[0], np.ndarray): - annotations = torch.from_numpy(annotations) - fast_show_mask_gpu( - annotations, - plt.gca(), - random_color=args.randomcolor, - bbox=bbox, - points=points, - point_label=args.point_label, - retinamask=args.retina, - target_height=original_h, - target_width=original_w, - ) - if isinstance(annotations, torch.Tensor): - annotations = annotations.cpu().numpy() - if args.withContours == True: - contour_all = [] - temp = np.zeros((original_h, original_w, 1)) - for i, mask in enumerate(annotations): - if type(mask) == dict: - mask = mask["segmentation"] - annotation = mask.astype(np.uint8) - if args.retina == False: - annotation = cv2.resize( - annotation, - (original_w, original_h), - interpolation=cv2.INTER_NEAREST, - ) - contours, hierarchy = cv2.findContours( - annotation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE - ) - for contour in contours: - contour_all.append(contour) - cv2.drawContours(temp, contour_all, -1, (255, 255, 255), 2) - color = np.array([0 / 255, 0 / 255, 255 / 255, 0.8]) - contour_mask = temp / 255 * color.reshape(1, 1, -1) - plt.imshow(contour_mask) - - save_path = args.output - if not os.path.exists(save_path): - os.makedirs(save_path) - plt.axis("off") - fig = plt.gcf() - plt.draw() - - try: - buf = fig.canvas.tostring_rgb() - except AttributeError: - fig.canvas.draw() - buf = fig.canvas.tostring_rgb() - - cols, rows = fig.canvas.get_width_height() - img_array = np.fromstring(buf, dtype=np.uint8).reshape(rows, cols, 3) - cv2.imwrite( - os.path.join(save_path, result_name), cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR) - ) - - -# CPU post process -def fast_show_mask( - annotation, - ax, - random_color=False, - bbox=None, - points=None, - point_label=None, - retinamask=True, - target_height=960, - target_width=960, -): - msak_sum = annotation.shape[0] - height = annotation.shape[1] - weight = annotation.shape[2] - # 将annotation 按照面积 排序 - areas = np.sum(annotation, axis=(1, 2)) - sorted_indices = np.argsort(areas) - annotation = annotation[sorted_indices] - - index = (annotation != 0).argmax(axis=0) - if random_color == True: - color = np.random.random((msak_sum, 1, 1, 3)) - else: - color = np.ones((msak_sum, 1, 1, 3)) * np.array( - [30 / 255, 144 / 255, 255 / 255] - ) - transparency = np.ones((msak_sum, 1, 1, 1)) * 0.6 - visual = np.concatenate([color, transparency], axis=-1) - mask_image = np.expand_dims(annotation, -1) * visual - - show = np.zeros((height, weight, 4)) - h_indices, w_indices = np.meshgrid( - np.arange(height), np.arange(weight), indexing="ij" - ) - indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None)) - # 使用向量化索引更新show的值 - show[h_indices, w_indices, :] = mask_image[indices] - if bbox is not None: - x1, y1, x2, y2 = bbox - ax.add_patch( - plt.Rectangle( - (x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1 - ) - ) - # draw point - if points is not None: - plt.scatter( - [point[0] for i, point in enumerate(points) if point_label[i] == 1], - [point[1] for i, point in enumerate(points) if point_label[i] == 1], - s=20, - c="y", - ) - plt.scatter( - [point[0] for i, point in enumerate(points) if point_label[i] == 0], - [point[1] for i, point in enumerate(points) if point_label[i] == 0], - s=20, - c="m", - ) - - if retinamask == False: - show = cv2.resize( - show, (target_width, target_height), interpolation=cv2.INTER_NEAREST - ) - ax.imshow(show) - - -def fast_show_mask_gpu( - annotation, - ax, - random_color=False, - bbox=None, - points=None, - point_label=None, - retinamask=True, - target_height=960, - target_width=960, -): - msak_sum = annotation.shape[0] - height = annotation.shape[1] - weight = annotation.shape[2] - areas = torch.sum(annotation, dim=(1, 2)) - sorted_indices = torch.argsort(areas, descending=False) - annotation = annotation[sorted_indices] - # 找每个位置第一个非零值下标 - index = (annotation != 0).to(torch.long).argmax(dim=0) - if random_color == True: - color = torch.rand((msak_sum, 1, 1, 3)).to(annotation.device) - else: - color = torch.ones((msak_sum, 1, 1, 3)).to(annotation.device) * torch.tensor( - [30 / 255, 144 / 255, 255 / 255] - ).to(annotation.device) - transparency = torch.ones((msak_sum, 1, 1, 1)).to(annotation.device) * 0.6 - visual = torch.cat([color, transparency], dim=-1) - mask_image = torch.unsqueeze(annotation, -1) * visual - # 按index取数,index指每个位置选哪个batch的数,把mask_image转成一个batch的形式 - show = torch.zeros((height, weight, 4)).to(annotation.device) - h_indices, w_indices = torch.meshgrid( - torch.arange(height), torch.arange(weight), indexing="ij" - ) - indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None)) - # 使用向量化索引更新show的值 - show[h_indices, w_indices, :] = mask_image[indices] - show_cpu = show.cpu().numpy() - if bbox is not None: - x1, y1, x2, y2 = bbox - ax.add_patch( - plt.Rectangle( - (x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1 - ) - ) - # draw point - if points is not None: - plt.scatter( - [point[0] for i, point in enumerate(points) if point_label[i] == 1], - [point[1] for i, point in enumerate(points) if point_label[i] == 1], - s=20, - c="y", - ) - plt.scatter( - [point[0] for i, point in enumerate(points) if point_label[i] == 0], - [point[1] for i, point in enumerate(points) if point_label[i] == 0], - s=20, - c="m", - ) - if retinamask == False: - show_cpu = cv2.resize( - show_cpu, (target_width, target_height), interpolation=cv2.INTER_NEAREST - ) - ax.imshow(show_cpu) - - -def crop_image(annotations, image_like): - if isinstance(image_like, str): - image = Image.open(image_like) - else: - image = image_like - ori_w, ori_h = image.size - mask_h, mask_w = annotations[0]["segmentation"].shape - if ori_w != mask_w or ori_h != mask_h: - image = image.resize((mask_w, mask_h)) - cropped_boxes = [] - cropped_images = [] - not_crop = [] - filter_id = [] - # annotations, _ = filter_masks(annotations) - # filter_id = list(_) - for _, mask in enumerate(annotations): - if np.sum(mask["segmentation"]) <= 100: - filter_id.append(_) - continue - bbox = get_bbox_from_mask(mask["segmentation"]) # mask 的 bbox - cropped_boxes.append(segment_image(image, bbox)) # 保存裁剪的图片 - # cropped_boxes.append(segment_image(image,mask["segmentation"])) - cropped_images.append(bbox) # 保存裁剪的图片的bbox - - return cropped_boxes, cropped_images, not_crop, filter_id, annotations - - -def box_prompt(masks, bbox, target_height, target_width): - h = masks.shape[1] - w = masks.shape[2] - if h != target_height or w != target_width: - bbox = [ - int(bbox[0] * w / target_width), - int(bbox[1] * h / target_height), - int(bbox[2] * w / target_width), - int(bbox[3] * h / target_height), - ] - bbox[0] = round(bbox[0]) if round(bbox[0]) > 0 else 0 - bbox[1] = round(bbox[1]) if round(bbox[1]) > 0 else 0 - bbox[2] = round(bbox[2]) if round(bbox[2]) < w else w - bbox[3] = round(bbox[3]) if round(bbox[3]) < h else h - - # IoUs = torch.zeros(len(masks), dtype=torch.float32) - bbox_area = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0]) - - masks_area = torch.sum(masks[:, bbox[1] : bbox[3], bbox[0] : bbox[2]], dim=(1, 2)) - orig_masks_area = torch.sum(masks, dim=(1, 2)) - - union = bbox_area + orig_masks_area - masks_area - IoUs = masks_area / union - max_iou_index = torch.argmax(IoUs) - - return masks[max_iou_index].cpu().numpy(), max_iou_index - - -def point_prompt(masks, points, point_label, target_height, target_width): # numpy 处理 - h = masks[0]["segmentation"].shape[0] - w = masks[0]["segmentation"].shape[1] - if h != target_height or w != target_width: - print(".................................................") - points = [ - [int(point[0] * w / target_width), int(point[1] * h / target_height)] - for point in points - ] - onemask = np.zeros((h, w)) # (685, 1024) - for i, annotation in enumerate(masks): - if type(annotation) == dict: - # print("22222222222222222222222222222") 3 - mask = annotation["segmentation"] - else: - # print("333333333333333333333333333333") - mask = annotation - for i, point in enumerate(points): - # print("llllllllllllllllllllllllllllllllllllll",i,point_label) - if mask[point[1], point[0]] == 1 and point_label[i] == 1: - onemask += mask - if mask[point[1], point[0]] == 1 and point_label[i] == 0: - onemask -= mask - onemask = onemask >= 1 - return onemask, 0 diff --git a/spaces/zhuce/vits/README.md b/spaces/zhuce/vits/README.md deleted file mode 100644 index 57a571ba140a6e6b9e8a776a5e8eae3eb6859deb..0000000000000000000000000000000000000000 --- a/spaces/zhuce/vits/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -license: apache-2.0 -title: ' vits-uma-genshin-honkai' -sdk: gradio -sdk_version: 3.7 -emoji: 🐨 -colorTo: yellow -pinned: false -app_file: app.py -duplicated_from: kanden/vits-uma-genshin-honkai ---- diff --git a/spaces/zxy666/bingo-chatai666/src/components/toaster.tsx b/spaces/zxy666/bingo-chatai666/src/components/toaster.tsx deleted file mode 100644 index 4d2693460b61307a1d4c127fd01df9bee16e59ff..0000000000000000000000000000000000000000 --- a/spaces/zxy666/bingo-chatai666/src/components/toaster.tsx +++ /dev/null @@ -1,3 +0,0 @@ -'use client' - -export { Toaster } from 'react-hot-toast'