diff --git a/errors.txt b/errors.txt deleted file mode 100644 index bffb2da1857ee9acfaec082d59101e7064dcb01d..0000000000000000000000000000000000000000 --- a/errors.txt +++ /dev/null @@ -1,24 +0,0 @@ -ky2k/Toxicity_Classifier_POC -tialenAdioni/chat-gpt-api -Narsil/myspace -arxify/RVC-beta-v2-0618 -WitchHuntTV/WinnieThePoohSVC_sovits4 -yizhangliu/Grounded-Segment-Anything -Robert001/UniControl-Demo -internetsignal/audioLDM -inamXcontru/PoeticTTS -dcarpintero/nlp-summarizer-pegasus -SungBeom/chatwine-korean -x6/BingAi -1gistliPinn/ChatGPT4 -colakin/video-generater -stomexserde/gpt4-ui -quidiaMuxgu/Expedit-SAM -NasirKhalid24/Dalle2-Diffusion-Prior -joaopereirajp/livvieChatBot -diacanFperku/AutoGPT -tioseFevbu/cartoon-converter -chuan-hd/law-assistant-chatbot -mshukor/UnIVAL -xuyingliKepler/openai_play_tts -TNR-5/lib111 \ No newline at end of file diff --git a/spaces/07jeancms/minima/app.py b/spaces/07jeancms/minima/app.py deleted file mode 100644 index a699bc5b3c2e987102ca93e0ee28d601e0a93d02..0000000000000000000000000000000000000000 --- a/spaces/07jeancms/minima/app.py +++ /dev/null @@ -1,7 +0,0 @@ -import gradio as gr - -def greet(name): - return "Hello " + name + "!!" - -iface = gr.Interface(fn=greet, inputs="text", outputs="text") -iface.launch() \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cracked Trials A Common but Costly Phenomenon in the Courts.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cracked Trials A Common but Costly Phenomenon in the Courts.md deleted file mode 100644 index babeb36e33ba018f75d008341d3ecc13cf961113..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cracked Trials A Common but Costly Phenomenon in the Courts.md +++ /dev/null @@ -1,13 +0,0 @@ -
-

What is a Cracked Trial and Why Does It Matter?

-

A cracked trial is a term used in the criminal justice system to describe a trial that has been scheduled for a not guilty hearing but does not proceed on the day, either because the defendant changes their plea to guilty or the prosecution drops the case. A cracked trial means that the case is resolved without a trial, but it also means that the court time and resources have been wasted, and the witnesses have been inconvenienced or distressed.

-

According to the guidance issued by the judiciary, a cracked trial can have a negative impact on the confidence in the system, as it may suggest that the case was not properly prepared or reviewed, or that there was undue pressure on the parties to reach a resolution. A cracked trial can also affect the victim's satisfaction and sense of justice, as they may feel that their voice was not heard or that the outcome was not fair.

-

cracked trial definition


Download ✑ ✑ ✑ https://byltly.com/2uKyQ0



-

The statistics published by Full Fact show that in 2014/15, about 35% of trials in the crown court and 37% in the magistrates' court were cracked, and that the main reason for this was late guilty pleas by the defendants. The report also found that only 2.1% of trials in the crown court and 6.8% of trials in the magistrates' court were cracked because of witness issues, such as absence or withdrawal of evidence.

-

The definition of a cracked trial in A Dictionary of Law Enforcement states that a trial that has been listed for a not guilty hearing on a particular day but does not proceed, either because the defendant pleads guilty to the whole or part of the indictment, or an alternative charge, or because the prosecution offers no evidence.

-

A cracked trial is different from an ineffective trial, which is a trial that has been listed for a hearing but cannot start or continue on the day for reasons beyond the control of the parties, such as illness, unavailability of a judge or jury, or technical problems. An ineffective trial has to be rescheduled for another date.

-

A cracked trial is also different from a vacated trial, which is a trial that has been listed for a hearing but is cancelled before the day for reasons within the control of the parties, such as an agreement to resolve the case by another means, such as a plea bargain or a diversion scheme. A vacated trial does not require any further court time.

-

Conclusion

-

A cracked trial is a common occurrence in the criminal justice system, but it can have negative consequences for the efficiency and effectiveness of the system, as well as for the satisfaction and well-being of the victims and witnesses. Reducing the number of cracked trials is one of the challenges faced by the courts and prosecutors, who have to balance the interests of justice with the realities of resource constraints and human factors.

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crackle Crackle Free Movies.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crackle Crackle Free Movies.md deleted file mode 100644 index ad1db2e7ad362200c8cfe7a793c85817517e77e0..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crackle Crackle Free Movies.md +++ /dev/null @@ -1,19 +0,0 @@ -
-

How to Watch Crackle Crackle Free Movies Online

-

If you are looking for a way to watch free movies online, you may have heard of Crackle Crackle. Crackle Crackle is a website that offers a large collection of movies and TV shows that you can stream for free. You can find movies from various genres, such as action, comedy, drama, horror, thriller, and more. You can also watch original content from Crackle Crackle, such as The Oath, Snatch, and StartUp.

-

crackle crackle free movies


Download Zip >>> https://byltly.com/2uKvVZ



-

However, Crackle Crackle is not available in all countries, and you may encounter some issues when trying to access it. For example, you may see a message that says "Sorry, this content is not available in your region" or "This video is not available in your country". This is because Crackle Crackle uses geo-restrictions to limit its content to certain regions. If you are outside of those regions, you will not be able to watch Crackle Crackle free movies online.

-

But don't worry, there is a solution to this problem. You can use a VPN (Virtual Private Network) to bypass the geo-restrictions and watch Crackle Crackle free movies online from anywhere in the world. A VPN is a service that allows you to connect to a server in another country and change your IP address. This way, you can trick Crackle Crackle into thinking that you are in a region where its content is available. You can also enjoy other benefits of using a VPN, such as protecting your privacy and security online.

-

Here are the steps to watch Crackle Crackle free movies online with a VPN:

-
    -
  1. Choose a VPN service that has servers in the countries where Crackle Crackle is available, such as the US, Canada, Australia, or the UK. Some of the best VPNs for streaming are ExpressVPN, NordVPN, Surfshark, and CyberGhost.
  2. -
  3. Download and install the VPN app on your device. You can use a VPN on your computer, smartphone, tablet, or smart TV.
  4. -
  5. Launch the VPN app and sign in with your account. If you don't have an account yet, you can create one on the VPN website.
  6. -
  7. Select a server in a country where Crackle Crackle is available and connect to it. For example, if you want to watch Crackle Crackle free movies online from India, you can connect to a server in the US.
  8. -
  9. Open your browser and go to the Crackle Crackle website. You should be able to access it without any issues.
  10. -
  11. Browse through the categories and genres and choose a movie or TV show that you want to watch. Click on it and enjoy watching Crackle Crackle free movies online with a VPN.
  12. -
-

Note: You may need to disable your ad blocker or allow pop-ups on the Crackle Crackle website, as some of its content may be supported by ads.

-

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Patch Bad Piggies 1.5.0 Pc and Build Your Own Crazy Vehicles.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Patch Bad Piggies 1.5.0 Pc and Build Your Own Crazy Vehicles.md deleted file mode 100644 index e62d0e72991cf5aac1508e038038e5d2493a9829..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Patch Bad Piggies 1.5.0 Pc and Build Your Own Crazy Vehicles.md +++ /dev/null @@ -1,142 +0,0 @@ - -

Download Patch Bad Piggies 1.5.0 Pc: A Guide for Angry Birds Fans

-

Are you a fan of Angry Birds, the popular physics-based puzzle game that has taken the world by storm? If so, you might be interested in trying out Bad Piggies, a spin-off game that lets you play as the villains instead of the heroes. In this game, you have to help the greedy pigs build vehicles and machines to steal eggs from the angry birds.

-

Download Patch Bad Piggies 1.5.0 Pc


DOWNLOAD ✫✫✫ https://byltly.com/2uKwU3



-

But wait, there's more! If you want to enhance your gaming experience and enjoy more levels, features, and fun, you can download patch Bad Piggies 1.5.0 for PC and install it on your computer. This patch will update your game to the latest version and give you access to new sandbox modes, achievements, and more.

-

In this article, we will show you how to download and install Bad Piggies 1.5.0 on PC, as well as how to download and install patch Bad Piggies 1.5.0 on PC. Follow our step-by-step guide and you'll be playing this addictive game in no time.

-

What is Bad Piggies?

-

Bad Piggies is a game developed by Rovio Entertainment Corporation, the same company that created Angry Birds. It was released in September 2012 for various platforms, including Windows, Android, iOS, Mac, and more.

-

Unlike Angry Birds, where you have to launch birds at pigs using a slingshot, in Bad Piggies you have to construct vehicles and machines using various objects and materials to help the pigs reach their goal. The goal can be an egg, a star, a button, or anything else that the pigs desire.

-

The game has over 200 levels of egg-snatching and pig-flying fun, as well as over 40 bonus levels that you can unlock by earning three stars in each level. You can also play in sandbox mode, where you can create your own levels and vehicles using unlimited items.

-

What are the features of Bad Piggies?

-

Bad Piggies is a game that offers a lot of features and benefits for its players. Some of them are:

- -

What are the requirements to play Bad Piggies on PC?

-

If you want to play Bad Piggies on PC, you need to make sure that your computer meets the minimum system requirements for the game. These are:

- -

If your computer fulfills these requirements, you can proceed to download and install Bad Piggies on PC.

-

How to download and install Bad Piggies 1.5.0 on PC?

-

To download and install Bad Piggies 1.5.0 on PC, you need to use an emulator that can run Android apps on your computer. One of the best emulators for this purpose is BlueStacks, which is free, fast, and easy to use.

-

How to download patch bad piggies 1.5.0 for pc
-Bad piggies 1.5.0 patch download free pc
-Download bad piggies 1.5.0 full version with patch for pc
-Bad piggies 1.5.0 pc patch download link
-Patch bad piggies 1.5.0 pc download no survey
-Download patch bad piggies 1.5.0 for windows 10 pc
-Bad piggies 1.5.0 patch download pc offline
-Download patch bad piggies 1.5.0 for pc crack
-Bad piggies 1.5.0 patch download pc full game
-Patch bad piggies 1.5.0 pc download without password
-Download patch bad piggies 1.5.0 for mac pc
-Bad piggies 1.5.0 patch download pc latest version
-Download patch bad piggies 1.5.0 for pc softonic
-Bad piggies 1.5.0 patch download pc rar file
-Patch bad piggies 1.5.0 pc download mediafire
-Download patch bad piggies 1.5.0 for pc apk
-Bad piggies 1.5.0 patch download pc zip file
-Download patch bad piggies 1.5.0 for pc mod
-Bad piggies 1.5.0 patch download pc torrent
-Patch bad piggies 1.5.0 pc download mega
-Download patch bad piggies 1.5.0 for linux pc
-Bad piggies 1.5.0 patch download pc direct link
-Download patch bad piggies 1.5.0 for pc online
-Bad piggies 1.5.0 patch download pc setup file
-Patch bad piggies 1.5.0 pc download google drive
-Download patch bad piggies 1.5.0 for android pc
-Bad piggies 1.5.0 patch download pc exe file
-Download patch bad piggies 1.5.0 for ios pc
-Bad piggies 1.5.0 patch download pc iso file
-Patch bad piggies 1.5.0 pc download zippyshare
-Download patch bad piggies 1.5.0 for chromebook pc
-Bad piggies 1.5.0 patch download pc compressed file
-Download patch bad piggies 1.5.0 for ubuntu pc
-Bad piggies 1.5.0 patch download pc highly compressed
-Patch bad piggies 1

-

Here are the steps to download and install Bad Piggies 1.5.0 on PC using BlueStacks:

-

Step 1: Download BlueStacks emulator

-

The first thing you need to do is to download BlueStacks emulator from its official website https://www.bluestacks.com/. You can choose between BlueStacks 4 or BlueStacks 5 depending on your preference.

-

Once you have downloaded the installer file, double-click on it to start the installation process.

-

Step 2: Install BlueStacks on your PC

-

The next thing you need to do is to install BlueStacks on your PC by following the instructions on the screen.

-

You may need to grant some permissions or accept some terms and conditions during the installation process.

-

You may also need to sign in with your Google account or create one if you don't have one already.

-

After the installation is complete, launch BlueStacks from your desktop or start menu.

-

Step 3: Search for Bad Piggies on BlueStacks

-

The third thing you need to do is to search for Bad Piggies on BlueStacks using its built-in search bar.

-

Type "Bad Piggies" in the search bar and hit enter.

-

You will see a list of results from various sources such as Google Play Store, App Center, or Game Center.

-

Step 4: Install Bad Piggies from the search results

-

The fourth thing you need to do is to install Bad Piggies from the search results by clicking on its icon.

-

You will be redirected to its page where you can see more information about the game such as its description, rating, reviews, screenshots, etc.

-

To install it, click on the "Install" button at the top right corner of the page.

-

The installation process will begin and may take a few minutes depending on your internet speed.

-

Step 5: Launch Bad Piggies and enjoy the game

-

The fifth thing you need to do is to launch Bad Piggies and enjoy the game.

-

To launch it, click on its icon on your home screen or app drawer.

-

You will see a loading screen followed by a welcome screen where you can choose between playing online or offline.

-

Select your preferred option and start playing this fun and addictive game.

-

How to download and install patch Bad Piggies 1.5.0 on PC?

-

If you want to download and install patch Bad Piggies 1.5.0 on PC, you need to follow these steps:

-

Step 1: Download patch Bad Piggies 1.5.0 from a reliable source

- 487.weebly.com/blog/bad-piggies-150-download">https://lasopabg487.weebly.com/blog/bad-piggies-150-download. This is a website that provides a link to download the patch file for free and without any viruses or malware.

-

Once you have downloaded the patch file, which is in ZIP format, save it to your computer and remember its location.

-

Step 2: Extract the patch files to your game folder

-

The next thing you need to do is to extract the patch files to your game folder where you have installed Bad Piggies.

-

To do this, you need to use a program that can unzip ZIP files such as WinRAR, 7-Zip, or PeaZip.

-

Right-click on the patch file and select "Extract here" or "Extract to" depending on your program.

-

You will see a folder named "Bad Piggies 1.5.0" that contains two files: "BadPiggies.exe" and "Patch.exe".

-

Copy these two files and paste them into your game folder, which is usually located at "C:\Program Files (x86)\Rovio Entertainment Ltd\Bad Piggies".

-

Replace the existing files if prompted.

-

Step 3: Run the patch executable file and follow the instructions

-

The third thing you need to do is to run the patch executable file and follow the instructions.

-

To do this, double-click on the "Patch.exe" file that you have copied to your game folder.

-

You will see a window that asks you to select your language. Choose English or any other language that you prefer.

-

Then, you will see another window that asks you to select your game version. Choose "Bad Piggies 1.5.0" from the drop-down menu.

-

Finally, you will see a window that shows the progress of the patching process. Wait until it is finished and click on "Exit".

-

Step 4: Restart your game and enjoy the new features

-

The fourth thing you need to do is to restart your game and enjoy the new features.

-

To do this, close your game if it is running and launch it again from BlueStacks or from your desktop shortcut.

-

You will see a new splash screen that shows the version number 1.5.0 at the bottom right corner.

-

You will also notice some new features such as:

- -

Conclusion

-

In conclusion, Bad Piggies is a fun and addictive game that lets you play as the pigs from Angry Birds and help them build vehicles and machines to steal eggs from the birds. You can download and install Bad Piggies 1.5.0 on PC using BlueStacks emulator and enjoy over 200 levels of pig-flying fun. You can also download and install patch Bad Piggies 1.5.0 on PC using our guide and enjoy new features such as new sandbox mode, new achievements, new items, new levels, new mechanics, and more.

-

We hope you found this article helpful and informative. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading and happy gaming!

-

Frequently Asked Questions

-
    -
  1. Is Bad Piggies free to play?
  2. -

    Yes, Bad Piggies is free to play on PC using BlueStacks emulator. However, some features may require in-app purchases or watching ads.

    -
  3. Is Bad Piggies safe to download?
  4. -

    Yes, Bad Piggies is safe to download from Google Play Store or App Center on BlueStacks emulator. However, if you download it from other sources, make sure they are reliable and trustworthy.

    -
  5. Is patch Bad Piggies 1.5.0 safe to download?
  6. -

    Yes, patch Bad Piggies 1.5.0 is safe to download from https://lasopabg487.weebly.com/blog/bad-piggies-150-download. However, if you download it from other sources, make sure they are reliable and trustworthy.

    -
  7. Can I play Bad Piggies offline?
  8. -

    Yes, you can play Bad Piggies offline on PC using BlueStacks emulator. However, some features may require internet connection such as online leaderboards or cloud save.

    -
  9. Can I play Bad Piggies with friends?
  10. -

    No, Bad Piggies does not have a multiplayer mode or a co-op mode. However, you can compete with your friends and other players on online leaderboards or share your creations on social media.

    -
-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Download Film Al Fatih 1453 Subtitle Indonesia Download WORK.md b/spaces/1gistliPinn/ChatGPT4/Examples/Download Film Al Fatih 1453 Subtitle Indonesia Download WORK.md deleted file mode 100644 index c0f961c023ac2542a25783252a8769ffa4eb5e05..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Download Film Al Fatih 1453 Subtitle Indonesia Download WORK.md +++ /dev/null @@ -1,9 +0,0 @@ - -

please install quicktime for downloading videos. try different methods for free! imdb picks. download or stream the new movie! watch all the latest new movies online or download. watch latest movies online download subtitle indonesia tv dramas and movies. download

-

download film al fatih 1453 subtitle indonesia download


Download Ziphttps://imgfil.com/2uy1NH



-

watch murakami film sejarah islam with english subtitle indonesia bollywood kollywood movie online, download murakami film sejarah islam in mp3, and watch murakami film sejarah islam. watch film fetih 1453 (2012) with english subtitle indonesia free download movie, watch film fetih 1453 (2012) with english subtitle indonesia 3gp, download film fetih 1453 (2012) with english subtitle indonesia free mp3 download, buy film fetih 1453 (2012) dvd book from online and download book store.

-

there are many reasons for the different rating of movies and tv shows on imdb, including copyright, which is automatically applied by the system. if a lower rating is available for a title it is because a lower rating is available for the movie. get the best download movies as you want. enjoy the best streaming and download collection online. mobile application only have to scan the qr code and connect to the preferred servers. as soon as we find the exact solution to a problem, we will post it on the web. the term madhya pradesh is not allowed for naming a state in india.

-

partners with the best and brightest print, television, radio, and digital media to deliver a unique audience experience with a world-class publication that embraces. if you are facing any issues or confusion, please contact help me. if you have any queries, please feel free to contact us. you can always unsubscribe from the list with a single click.

-

899543212b
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/8 Ball Pool Hile 2022 APK Play Offline or Online with Standard or Snooker Rules.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/8 Ball Pool Hile 2022 APK Play Offline or Online with Standard or Snooker Rules.md deleted file mode 100644 index 7efc312105e269d01a7c95b6e83684aa87dd05ec..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/8 Ball Pool Hile 2022 APK Play Offline or Online with Standard or Snooker Rules.md +++ /dev/null @@ -1,161 +0,0 @@ - -

8 Ball Pool Hile 2022 Apk: How to Download and Use the Best Cheat for 8 Ball Pool

-

Introduction

-

Do you love playing pool games on your smartphone or tablet? If yes, then you must have heard of 8 Ball Pool, the most popular and addictive pool game in the world. Developed by Miniclip, this game lets you play with millions of players online, compete in tournaments, win trophies, and collect cues and coins.

-

8 ball pool hile 2022 apk


Download Filehttps://urlin.us/2uSX21



-

But what if you want to have more fun and excitement in your pool games? What if you want to have an edge over your opponents and win every match easily? Well, there is a way to do that. You just need to download and use 8 Ball Pool Hile 2022 Apk, the best cheat for 8 Ball Pool.

-

What is 8 Ball Pool Hile 2022 Apk? It is a modified version of the original game that gives you unlimited access to all the features and resources of the game. With this cheat, you can:

- On Line ToolsMoreIdeal Body Weight CalculatorInches Cm Foot ConverterWeight ConverterWorld TimeYouTube ResponsiveGoogle Classic Image SearchCurrency ConverterGet The Power © TheFreeWindows / Follow!

-

If you want to download the latest version of Battery Bar APK, then you must come to apkmody. In apkmody you can download Battery Bar Mod APK v3.0 for free. Next is a detailed introduction about Battery Bar Mod APK v3.0.

-

Battery Bar Mod APK is the PRO version of Battery Bar APK. By using the Battery Bar Mod APK, you can easily complete any tasks and requirements in it. Often you need to spend a lot of time or money to get rewards easily, but by using Battery Bar Mod APK, you often achieve your goals in a very short time. Battery Bar Mod APK is a great way for you to outshine your competition. Now in apkmody you can download Battery Bar APK v3.0 for free. This process doesn't cost anything, and you can use it with confidence.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Descarga CONCAR CB 2016 V5 Activado GRATIS !!.md b/spaces/usbethFlerru/sovits-modelsV2/example/Descarga CONCAR CB 2016 V5 Activado GRATIS !!.md deleted file mode 100644 index 9e1b56c0da0f6ba6ce8a32b1a3c75a0b9942e977..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Descarga CONCAR CB 2016 V5 Activado GRATIS !!.md +++ /dev/null @@ -1,5 +0,0 @@ - -

NOTA: Al no haber una versión de prueba del programa, Concar, para descargar gratis, a través de nuestra descarga obtendrás un videotutorial desde el cual aprenderás a realizar las operaciones básicas con el programa.

-

Descarga CONCAR CB 2016 v5 Activado GRATIS !!


Download ✓✓✓ https://urlcod.com/2uyX6j



aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/user238921933/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/generate.py b/spaces/user238921933/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/generate.py deleted file mode 100644 index 4203405bdf3a06330a655ebc6b58c5bd9dcccca6..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/generate.py +++ /dev/null @@ -1,244 +0,0 @@ -import numpy as np -import cv2 -from PIL import Image -from .prompt import split_weighted_subprompts -from .load_images import load_img, prepare_mask, check_mask_for_errors -from .webui_sd_pipeline import get_webui_sd_pipeline -from .animation import sample_from_cv2, sample_to_cv2 -from .rich import console -#Webui -import cv2 -from .animation import sample_from_cv2, sample_to_cv2 -from modules import processing, sd_models -from modules.shared import opts, sd_model -from modules.processing import process_images, StableDiffusionProcessingTxt2Img -from .deforum_controlnet import is_controlnet_enabled, process_txt2img_with_controlnet, process_img2img_with_controlnet - -import math, json, itertools -import requests - -def load_mask_latent(mask_input, shape): - # mask_input (str or PIL Image.Image): Path to the mask image or a PIL Image object - # shape (list-like len(4)): shape of the image to match, usually latent_image.shape - - if isinstance(mask_input, str): # mask input is probably a file name - if mask_input.startswith('http://') or mask_input.startswith('https://'): - mask_image = Image.open(requests.get(mask_input, stream=True).raw).convert('RGBA') - else: - mask_image = Image.open(mask_input).convert('RGBA') - elif isinstance(mask_input, Image.Image): - mask_image = mask_input - else: - raise Exception("mask_input must be a PIL image or a file name") - - mask_w_h = (shape[-1], shape[-2]) - mask = mask_image.resize(mask_w_h, resample=Image.LANCZOS) - mask = mask.convert("L") - return mask - -def isJson(myjson): - try: - json.loads(myjson) - except ValueError as e: - return False - return True - -# Add pairwise implementation here not to upgrade -# the whole python to 3.10 just for one function -def pairwise_repl(iterable): - a, b = itertools.tee(iterable) - next(b, None) - return zip(a, b) - -def generate(args, anim_args, loop_args, controlnet_args, root, frame = 0, return_sample=False, sampler_name=None): - assert args.prompt is not None - - # Setup the pipeline - p = get_webui_sd_pipeline(args, root, frame) - p.prompt, p.negative_prompt = split_weighted_subprompts(args.prompt, frame) - - if not args.use_init and args.strength > 0 and args.strength_0_no_init: - print("\nNo init image, but strength > 0. Strength has been auto set to 0, since use_init is False.") - print("If you want to force strength > 0 with no init, please set strength_0_no_init to False.\n") - args.strength = 0 - processed = None - mask_image = None - init_image = None - image_init0 = None - - if loop_args.use_looper: - # TODO find out why we need to set this in the init tab - if args.strength == 0: - raise RuntimeError("Strength needs to be greater than 0 in Init tab and strength_0_no_init should *not* be checked") - if args.seed_behavior != "schedule": - raise RuntimeError("seed_behavior needs to be set to schedule in under 'Keyframes' tab --> 'Seed scheduling'") - if not isJson(loop_args.imagesToKeyframe): - raise RuntimeError("The images set for use with keyframe-guidance are not in a proper JSON format") - args.strength = loop_args.imageStrength - tweeningFrames = loop_args.tweeningFrameSchedule - blendFactor = .07 - colorCorrectionFactor = loop_args.colorCorrectionFactor - jsonImages = json.loads(loop_args.imagesToKeyframe) - framesToImageSwapOn = list(map(int, list(jsonImages.keys()))) - # find which image to show - frameToChoose = 0 - for swappingFrame in framesToImageSwapOn[1:]: - frameToChoose += (frame >= int(swappingFrame)) - - #find which frame to do our swapping on for tweening - skipFrame = 25 - for fs, fe in pairwise_repl(framesToImageSwapOn): - if fs <= frame <= fe: - skipFrame = fe - fs - - if frame % skipFrame <= tweeningFrames: # number of tweening frames - blendFactor = loop_args.blendFactorMax - loop_args.blendFactorSlope*math.cos((frame % tweeningFrames) / (tweeningFrames / 2)) - init_image2, _ = load_img(list(jsonImages.values())[frameToChoose], - shape=(args.W, args.H), - use_alpha_as_mask=args.use_alpha_as_mask) - image_init0 = list(jsonImages.values())[0] - - else: # they passed in a single init image - image_init0 = args.init_image - - - available_samplers = { - 'euler a':'Euler a', - 'euler':'Euler', - 'lms':'LMS', - 'heun':'Heun', - 'dpm2':'DPM2', - 'dpm2 a':'DPM2 a', - 'dpm++ 2s a':'DPM++ 2S a', - 'dpm++ 2m':'DPM++ 2M', - 'dpm++ sde':'DPM++ SDE', - 'dpm fast':'DPM fast', - 'dpm adaptive':'DPM adaptive', - 'lms karras':'LMS Karras' , - 'dpm2 karras':'DPM2 Karras', - 'dpm2 a karras':'DPM2 a Karras', - 'dpm++ 2s a karras':'DPM++ 2S a Karras', - 'dpm++ 2m karras':'DPM++ 2M Karras', - 'dpm++ sde karras':'DPM++ SDE Karras' - } - if sampler_name is not None: - if sampler_name in available_samplers.keys(): - args.sampler = available_samplers[sampler_name] - - if args.checkpoint is not None: - info = sd_models.get_closet_checkpoint_match(args.checkpoint) - if info is None: - raise RuntimeError(f"Unknown checkpoint: {args.checkpoint}") - sd_models.reload_model_weights(info=info) - - if args.init_sample is not None: - # TODO: cleanup init_sample remains later - img = args.init_sample - init_image = img - image_init0 = img - if loop_args.use_looper and isJson(loop_args.imagesToKeyframe): - init_image = Image.blend(init_image, init_image2, blendFactor) - correction_colors = Image.blend(init_image, init_image2, colorCorrectionFactor) - p.color_corrections = [processing.setup_color_correction(correction_colors)] - - # this is the first pass - elif loop_args.use_looper or (args.use_init and ((args.init_image != None and args.init_image != ''))): - init_image, mask_image = load_img(image_init0, # initial init image - shape=(args.W, args.H), - use_alpha_as_mask=args.use_alpha_as_mask) - - else: - - if anim_args.animation_mode != 'Interpolation': - print(f"Not using an init image (doing pure txt2img)") - p_txt = StableDiffusionProcessingTxt2Img( - sd_model=sd_model, - outpath_samples=root.tmp_deforum_run_duplicated_folder, - outpath_grids=root.tmp_deforum_run_duplicated_folder, - prompt=p.prompt, - styles=p.styles, - negative_prompt=p.negative_prompt, - seed=p.seed, - subseed=p.subseed, - subseed_strength=p.subseed_strength, - seed_resize_from_h=p.seed_resize_from_h, - seed_resize_from_w=p.seed_resize_from_w, - sampler_name=p.sampler_name, - batch_size=p.batch_size, - n_iter=p.n_iter, - steps=p.steps, - cfg_scale=p.cfg_scale, - width=p.width, - height=p.height, - restore_faces=p.restore_faces, - tiling=p.tiling, - enable_hr=None, - denoising_strength=None, - ) - # print dynamic table to cli - print_generate_table(args, anim_args, p_txt) - - if is_controlnet_enabled(controlnet_args): - processed = process_txt2img_with_controlnet(p, args, anim_args, loop_args, controlnet_args, root, frame) - else: - processed = processing.process_images(p_txt) - - if processed is None: - # Mask functions - if args.use_mask: - mask = args.mask_image - #assign masking options to pipeline - if mask is not None: - p.inpainting_mask_invert = args.invert_mask - p.inpainting_fill = args.fill - p.inpaint_full_res= args.full_res_mask - p.inpaint_full_res_padding = args.full_res_mask_padding - else: - mask = None - - assert not ( (mask is not None and args.use_mask and args.overlay_mask) and (args.init_sample is None and init_image is None)), "Need an init image when use_mask == True and overlay_mask == True" - - p.init_images = [init_image] - p.image_mask = mask - p.image_cfg_scale = args.pix2pix_img_cfg_scale - - # print dynamic table to cli - print_generate_table(args, anim_args, p) - - if is_controlnet_enabled(controlnet_args): - processed = process_img2img_with_controlnet(p, args, anim_args, loop_args, controlnet_args, root, frame) - else: - processed = processing.process_images(p) - - if root.initial_info == None: - root.initial_seed = processed.seed - root.initial_info = processed.info - - if root.first_frame == None: - root.first_frame = processed.images[0] - - results = processed.images[0] - - return results - -def print_generate_table(args, anim_args, p): - from rich.table import Table - from rich import box - table = Table(padding=0, box=box.ROUNDED) - field_names = ["Steps", "CFG"] - if anim_args.animation_mode != 'Interpolation': - field_names.append("Denoise") - field_names += ["Subseed", "Subs. str"] * (anim_args.enable_subseed_scheduling) - field_names += ["Sampler"] * anim_args.enable_sampler_scheduling - field_names += ["Checkpoint"] * anim_args.enable_checkpoint_scheduling - for field_name in field_names: - table.add_column(field_name, justify="center") - rows = [str(p.steps), str(p.cfg_scale)] - if anim_args.animation_mode != 'Interpolation': - rows.append(str(p.denoising_strength)) - rows += [str(p.subseed), str(p.subseed_strength)] * (anim_args.enable_subseed_scheduling) - rows += [p.sampler_name] * anim_args.enable_sampler_scheduling - rows += [str(args.checkpoint)] * anim_args.enable_checkpoint_scheduling - table.add_row(*rows) - - console.print(table) \ No newline at end of file diff --git a/spaces/vialibre/edia_full_es/modules/module_customPllLabel.py b/spaces/vialibre/edia_full_es/modules/module_customPllLabel.py deleted file mode 100644 index 906d7b7ad4bd84582977172138e29d3dfd66e112..0000000000000000000000000000000000000000 --- a/spaces/vialibre/edia_full_es/modules/module_customPllLabel.py +++ /dev/null @@ -1,111 +0,0 @@ -from typing import List, Dict - -class CustomPllLabel: - def __init__( - self - ) -> None: - - self.html_head = """ - - - - - - - - """ - - self.html_footer ="" - - def __progressbar( - self, - percentage: int, - sent: str, - ratio: float, - score: float, - size: int=15 - ) -> str: - - html = f""" -
- - x{round(ratio,3)} - - -

{sent}

-
- """ - return html - - def __render( - self, - sents: List[str], - scores: List[float], - ratios: List[float] - ) -> str: - - max_ratio = max(ratios) - ratio2percentage = lambda ratio: int(ratio*100/max_ratio) - - html = "" - for sent, ratio, score in zip(sents, ratios, scores): - html += self.__progressbar( - percentage=ratio2percentage(ratio), - sent=sent, - ratio=ratio, - score=score - ) - - return self.html_head + html + self.html_footer - - def __getProportions( - self, - scores: List[float], - ) -> List[float]: - - min_score = min(scores) - return [min_score/s for s in scores] - - def compute( - self, - pll_dict: Dict[str, float] - ) -> str: - - sorted_pll_dict = sorted(pll_dict.items(), key=lambda x: x[1], reverse=True) - - sents = [k for k,_ in sorted_pll_dict] - scores = [v for _,v in sorted_pll_dict] - - # Scape < and > marks from hightlight word/s - sents = [s.replace("<","<").replace(">",">") for s in sents] - - ratios = self.__getProportions(scores) - - return self.__render(sents, scores, ratios) \ No newline at end of file diff --git a/spaces/voltcutter/stable-diffusion-webui/app.py b/spaces/voltcutter/stable-diffusion-webui/app.py deleted file mode 100644 index 920a4be5bb197128b50da8b0bd4c6a368797a134..0000000000000000000000000000000000000000 --- a/spaces/voltcutter/stable-diffusion-webui/app.py +++ /dev/null @@ -1,72 +0,0 @@ -import os -from subprocess import getoutput - -gpu_info = getoutput('nvidia-smi') -if("A10G" in gpu_info): - os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+4c06c79.d20221205-cp38-cp38-linux_x86_64.whl") -elif("T4" in gpu_info): - os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+1515f77.d20221130-cp38-cp38-linux_x86_64.whl") - -os.system(f"git clone https://github.com/camenduru/stable-diffusion-webui /home/user/app/stable-diffusion-webui") -os.chdir("/home/user/app/stable-diffusion-webui") - -os.system(f"wget -q https://github.com/camenduru/webui/raw/main/env_patch.py -O /home/user/app/env_patch.py") -os.system(f"sed -i -e '/import image_from_url_text/r /home/user/app/env_patch.py' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/(modelmerger_interface, \"Checkpoint Merger\", \"modelmerger\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/(train_interface, \"Train\", \"ti\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/extensions_interface, \"Extensions\", \"extensions\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/settings_interface, \"Settings\", \"settings\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f'''sed -i -e "s/document.getElementsByTagName('gradio-app')\[0\].shadowRoot/!!document.getElementsByTagName('gradio-app')[0].shadowRoot ? document.getElementsByTagName('gradio-app')[0].shadowRoot : document/g" /home/user/app/stable-diffusion-webui/script.js''') -os.system(f"sed -i -e 's/ show_progress=False,/ show_progress=True,/g' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e 's/shared.demo.launch/shared.demo.queue().launch/g' /home/user/app/stable-diffusion-webui/webui.py") -os.system(f"sed -i -e 's/ outputs=\[/queue=False, &/g' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e 's/ queue=False, / /g' /home/user/app/stable-diffusion-webui/modules/ui.py") - -# ----------------------------Please duplicate this space and delete this block if you don't want to see the extra header---------------------------- -os.system(f"wget -q https://github.com/camenduru/webui/raw/main/header_patch.py -O /home/user/app/header_patch.py") -os.system(f"sed -i -e '/demo:/r /home/user/app/header_patch.py' /home/user/app/stable-diffusion-webui/modules/ui.py") -# --------------------------------------------------------------------------------------------------------------------------------------------------- - -if "IS_SHARED_UI" in os.environ: - os.system(f"rm -rfv /home/user/app/stable-diffusion-webui/scripts/") - - os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-config.json -O /home/user/app/shared-config.json") - os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-ui-config.json -O /home/user/app/shared-ui-config.json") - - os.system(f"wget -q {os.getenv('MODEL_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('MODEL_NAME')}") - os.system(f"wget -q {os.getenv('VAE_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('VAE_NAME')}") - os.system(f"wget -q {os.getenv('YAML_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('YAML_NAME')}") - - os.system(f"python launch.py --force-enable-xformers --disable-console-progressbars --enable-console-prompts --ui-config-file /home/user/app/shared-ui-config.json --ui-settings-file /home/user/app/shared-config.json --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding") -else: - # Please duplicate this space and delete # character in front of the custom script you want to use or add here more custom scripts with same structure os.system(f"wget -q https://CUSTOM_SCRIPT_URL -O /home/user/app/stable-diffusion-webui/scripts/CUSTOM_SCRIPT_NAME.py") - os.system(f"wget -q https://gist.github.com/camenduru/9ec5f8141db9902e375967e93250860f/raw/d0bcf01786f20107c329c03f8968584ee67be12a/run_n_times.py -O /home/user/app/stable-diffusion-webui/scripts/run_n_times.py") - - # Please duplicate this space and delete # character in front of the extension you want to use or add here more extensions with same structure os.system(f"git clone https://EXTENSION_GIT_URL /home/user/app/stable-diffusion-webui/extensions/EXTENSION_NAME") - #os.system(f"git clone https://github.com/camenduru/stable-diffusion-webui-artists-to-study /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-artists-to-study") - os.system(f"git clone https://github.com/yfszzx/stable-diffusion-webui-images-browser /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-images-browser") - os.system(f"git clone https://github.com/deforum-art/deforum-for-automatic1111-webui /home/user/app/stable-diffusion-webui/extensions/deforum-for-automatic1111-webui") - - # Please duplicate this space and delete # character in front of the model you want to use or add here more ckpts with same structure os.system(f"wget -q https://CKPT_URL -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/CKPT_NAME.ckpt") - #os.system(f"wget -q https://huggingface.co/nitrosocke/Arcane-Diffusion/resolve/main/arcane-diffusion-v3.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/arcane-diffusion-v3.ckpt") - #os.system(f"wget -q https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/Cyberpunk-Anime-Diffusion.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Cyberpunk-Anime-Diffusion.ckpt") - #os.system(f"wget -q https://huggingface.co/prompthero/midjourney-v4-diffusion/resolve/main/mdjrny-v4.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/mdjrny-v4.ckpt") - #os.system(f"wget -q https://huggingface.co/nitrosocke/mo-di-diffusion/resolve/main/moDi-v1-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/moDi-v1-pruned.ckpt") - #os.system(f"wget -q https://huggingface.co/Fictiverse/Stable_Diffusion_PaperCut_Model/resolve/main/PaperCut_v1.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/PaperCut_v1.ckpt") - #os.system(f"wget -q https://huggingface.co/lilpotat/sa/resolve/main/samdoesarts_style.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/samdoesarts_style.ckpt") - #os.system(f"wget -q https://huggingface.co/hakurei/waifu-diffusion-v1-3/resolve/main/wd-v1-3-float32.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/wd-v1-3-float32.ckpt") - #os.system(f"wget -q https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/sd-v1-4.ckpt") - #os.system(f"wget -q https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v1-5-pruned-emaonly.ckpt") - #os.system(f"wget -q https://huggingface.co/runwayml/stable-diffusion-inpainting/resolve/main/sd-v1-5-inpainting.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/sd-v1-5-inpainting.ckpt") - - #os.system(f"wget -q https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.ckpt") - #os.system(f"wget -q https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0.vae.pt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.vae.pt") - - #os.system(f"wget -q https://huggingface.co/stabilityai/stable-diffusion-2/resolve/main/768-v-ema.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/768-v-ema.ckpt") - #os.system(f"wget -q https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/768-v-ema.yaml") - os.system(f"wget -q https://r2.kamiya-b.me/dreambooth_lib/akakura-sn.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/akakura-sn.ckpt") - #os.system(f"wget -q https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v2-1_768-ema-pruned.ckpt") - os.system(f"wget -q https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v2-1_768-ema-pruned.yaml") - - os.system(f"python launch.py --ui-config-file /home/user/app/ui-config.json --ui-settings-file /home/user/app/config.json --disable-console-progressbars --enable-console-prompts --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding --api --skip-torch-cuda-test") - \ No newline at end of file diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/configs/_base_/models/danet_r50-d8.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/configs/_base_/models/danet_r50-d8.py deleted file mode 100644 index 2c934939fac48525f22ad86f489a041dd7db7d09..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/configs/_base_/models/danet_r50-d8.py +++ /dev/null @@ -1,44 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='DAHead', - in_channels=2048, - in_index=3, - channels=512, - pam_channels=64, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/visualization/image.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/visualization/image.py deleted file mode 100644 index 61a56c75b67f593c298408462c63c0468be8e276..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/visualization/image.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import cv2 -import numpy as np - -from annotator.uniformer.mmcv.image import imread, imwrite -from .color import color_val - - -def imshow(img, win_name='', wait_time=0): - """Show an image. - - Args: - img (str or ndarray): The image to be displayed. - win_name (str): The window name. - wait_time (int): Value of waitKey param. - """ - cv2.imshow(win_name, imread(img)) - if wait_time == 0: # prevent from hanging if windows was closed - while True: - ret = cv2.waitKey(1) - - closed = cv2.getWindowProperty(win_name, cv2.WND_PROP_VISIBLE) < 1 - # if user closed window or if some key pressed - if closed or ret != -1: - break - else: - ret = cv2.waitKey(wait_time) - - -def imshow_bboxes(img, - bboxes, - colors='green', - top_k=-1, - thickness=1, - show=True, - win_name='', - wait_time=0, - out_file=None): - """Draw bboxes on an image. - - Args: - img (str or ndarray): The image to be displayed. - bboxes (list or ndarray): A list of ndarray of shape (k, 4). - colors (list[str or tuple or Color]): A list of colors. - top_k (int): Plot the first k bboxes only if set positive. - thickness (int): Thickness of lines. - show (bool): Whether to show the image. - win_name (str): The window name. - wait_time (int): Value of waitKey param. - out_file (str, optional): The filename to write the image. - - Returns: - ndarray: The image with bboxes drawn on it. - """ - img = imread(img) - img = np.ascontiguousarray(img) - - if isinstance(bboxes, np.ndarray): - bboxes = [bboxes] - if not isinstance(colors, list): - colors = [colors for _ in range(len(bboxes))] - colors = [color_val(c) for c in colors] - assert len(bboxes) == len(colors) - - for i, _bboxes in enumerate(bboxes): - _bboxes = _bboxes.astype(np.int32) - if top_k <= 0: - _top_k = _bboxes.shape[0] - else: - _top_k = min(top_k, _bboxes.shape[0]) - for j in range(_top_k): - left_top = (_bboxes[j, 0], _bboxes[j, 1]) - right_bottom = (_bboxes[j, 2], _bboxes[j, 3]) - cv2.rectangle( - img, left_top, right_bottom, colors[i], thickness=thickness) - - if show: - imshow(img, win_name, wait_time) - if out_file is not None: - imwrite(img, out_file) - return img - - -def imshow_det_bboxes(img, - bboxes, - labels, - class_names=None, - score_thr=0, - bbox_color='green', - text_color='green', - thickness=1, - font_scale=0.5, - show=True, - win_name='', - wait_time=0, - out_file=None): - """Draw bboxes and class labels (with scores) on an image. - - Args: - img (str or ndarray): The image to be displayed. - bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or - (n, 5). - labels (ndarray): Labels of bboxes. - class_names (list[str]): Names of each classes. - score_thr (float): Minimum score of bboxes to be shown. - bbox_color (str or tuple or :obj:`Color`): Color of bbox lines. - text_color (str or tuple or :obj:`Color`): Color of texts. - thickness (int): Thickness of lines. - font_scale (float): Font scales of texts. - show (bool): Whether to show the image. - win_name (str): The window name. - wait_time (int): Value of waitKey param. - out_file (str or None): The filename to write the image. - - Returns: - ndarray: The image with bboxes drawn on it. - """ - assert bboxes.ndim == 2 - assert labels.ndim == 1 - assert bboxes.shape[0] == labels.shape[0] - assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5 - img = imread(img) - img = np.ascontiguousarray(img) - - if score_thr > 0: - assert bboxes.shape[1] == 5 - scores = bboxes[:, -1] - inds = scores > score_thr - bboxes = bboxes[inds, :] - labels = labels[inds] - - bbox_color = color_val(bbox_color) - text_color = color_val(text_color) - - for bbox, label in zip(bboxes, labels): - bbox_int = bbox.astype(np.int32) - left_top = (bbox_int[0], bbox_int[1]) - right_bottom = (bbox_int[2], bbox_int[3]) - cv2.rectangle( - img, left_top, right_bottom, bbox_color, thickness=thickness) - label_text = class_names[ - label] if class_names is not None else f'cls {label}' - if len(bbox) > 4: - label_text += f'|{bbox[-1]:.02f}' - cv2.putText(img, label_text, (bbox_int[0], bbox_int[1] - 2), - cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color) - - if show: - imshow(img, win_name, wait_time) - if out_file is not None: - imwrite(img, out_file) - return img diff --git a/spaces/w1zrd/MusicGen/app_batched.py b/spaces/w1zrd/MusicGen/app_batched.py deleted file mode 100644 index 0d2a4b526e4b8ef94034a1c661a4fa68816c285a..0000000000000000000000000000000000000000 --- a/spaces/w1zrd/MusicGen/app_batched.py +++ /dev/null @@ -1,222 +0,0 @@ -""" -Copyright (c) Meta Platforms, Inc. and affiliates. -All rights reserved. - -This source code is licensed under the license found in the -LICENSE file in the root directory of this source tree. -""" - -import argparse -from concurrent.futures import ProcessPoolExecutor -import subprocess as sp -from tempfile import NamedTemporaryFile -import time -import warnings -import torch -import gradio as gr -from audiocraft.data.audio_utils import convert_audio -from audiocraft.data.audio import audio_write -from audiocraft.models import MusicGen - - -MODEL = None - -_old_call = sp.call - - -def _call_nostderr(*args, **kwargs): - # Avoid ffmpeg vomitting on the logs. - kwargs['stderr'] = sp.DEVNULL - kwargs['stdout'] = sp.DEVNULL - _old_call(*args, **kwargs) - - -sp.call = _call_nostderr -pool = ProcessPoolExecutor(3) -pool.__enter__() - - -def make_waveform(*args, **kwargs): - be = time.time() - with warnings.catch_warnings(): - warnings.simplefilter('ignore') - out = gr.make_waveform(*args, **kwargs) - print("Make a video took", time.time() - be) - return out - - -def load_model(): - print("Loading model") - return MusicGen.get_pretrained("melody") - - -def predict(texts, melodies): - global MODEL - if MODEL is None: - MODEL = load_model() - - duration = 12 - max_text_length = 512 - texts = [text[:max_text_length] for text in texts] - MODEL.set_generation_params(duration=duration) - - print("new batch", len(texts), texts, [None if m is None else (m[0], m[1].shape) for m in melodies]) - be = time.time() - processed_melodies = [] - target_sr = 32000 - target_ac = 1 - for melody in melodies: - if melody is None: - processed_melodies.append(None) - else: - sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t() - if melody.dim() == 1: - melody = melody[None] - melody = melody[..., :int(sr * duration)] - melody = convert_audio(melody, sr, target_sr, target_ac) - processed_melodies.append(melody) - - outputs = MODEL.generate_with_chroma( - descriptions=texts, - melody_wavs=processed_melodies, - melody_sample_rate=target_sr, - progress=False - ) - - outputs = outputs.detach().cpu().float() - out_files = [] - for output in outputs: - with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file: - audio_write( - file.name, output, MODEL.sample_rate, strategy="loudness", - loudness_headroom_db=16, loudness_compressor=True, add_suffix=False) - out_files.append(pool.submit(make_waveform, file.name)) - res = [[out_file.result() for out_file in out_files]] - print("batch finished", len(texts), time.time() - be) - return res - - -def ui(**kwargs): - with gr.Blocks() as demo: - gr.Markdown( - """ - # MusicGen - - This is the demo for [MusicGen](https://github.com/facebookresearch/audiocraft), a simple and controllable model for music generation - presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284). -
- - Duplicate Space - for longer sequences, more control and no queue.

- """ - ) - with gr.Row(): - with gr.Column(): - with gr.Row(): - text = gr.Text(label="Describe your music", lines=2, interactive=True) - melody = gr.Audio(source="upload", type="numpy", label="Condition on a melody (optional)", interactive=True) - with gr.Row(): - submit = gr.Button("Generate") - with gr.Column(): - output = gr.Video(label="Generated Music") - submit.click(predict, inputs=[text, melody], outputs=[output], batch=True, max_batch_size=8) - gr.Examples( - fn=predict, - examples=[ - [ - "An 80s driving pop song with heavy drums and synth pads in the background", - "./assets/bach.mp3", - ], - [ - "A cheerful country song with acoustic guitars", - "./assets/bolero_ravel.mp3", - ], - [ - "90s rock song with electric guitar and heavy drums", - None, - ], - [ - "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130", - "./assets/bach.mp3", - ], - [ - "lofi slow bpm electro chill with organic samples", - None, - ], - ], - inputs=[text, melody], - outputs=[output] - ) - gr.Markdown(""" - ### More details - - The model will generate 12 seconds of audio based on the description you provided. - You can optionaly provide a reference audio from which a broad melody will be extracted. - The model will then try to follow both the description and melody provided. - All samples are generated with the `melody` model. - - You can also use your own GPU or a Google Colab by following the instructions on our repo. - - See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft) - for more details. - """) - - # Show the interface - launch_kwargs = {} - username = kwargs.get('username') - password = kwargs.get('password') - server_port = kwargs.get('server_port', 0) - inbrowser = kwargs.get('inbrowser', False) - share = kwargs.get('share', False) - server_name = kwargs.get('listen') - - launch_kwargs['server_name'] = server_name - - if username and password: - launch_kwargs['auth'] = (username, password) - if server_port > 0: - launch_kwargs['server_port'] = server_port - if inbrowser: - launch_kwargs['inbrowser'] = inbrowser - if share: - launch_kwargs['share'] = share - demo.queue(max_size=8 * 4).launch(**launch_kwargs) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - '--listen', - type=str, - default='0.0.0.0', - help='IP to listen on for connections to Gradio', - ) - parser.add_argument( - '--username', type=str, default='', help='Username for authentication' - ) - parser.add_argument( - '--password', type=str, default='', help='Password for authentication' - ) - parser.add_argument( - '--server_port', - type=int, - default=0, - help='Port to run the server listener on', - ) - parser.add_argument( - '--inbrowser', action='store_true', help='Open in browser' - ) - parser.add_argument( - '--share', action='store_true', help='Share the gradio UI' - ) - - args = parser.parse_args() - - ui( - username=args.username, - password=args.password, - inbrowser=args.inbrowser, - server_port=args.server_port, - share=args.share, - listen=args.listen - ) diff --git a/spaces/wenpeng/Sod_Inpaint/sod/PGNet.py b/spaces/wenpeng/Sod_Inpaint/sod/PGNet.py deleted file mode 100644 index 8447a62b43832649ecbcc691efd65787ebea8610..0000000000000000000000000000000000000000 --- a/spaces/wenpeng/Sod_Inpaint/sod/PGNet.py +++ /dev/null @@ -1,270 +0,0 @@ - -import torch -import torch.nn as nn -import torch.nn.functional as F -from .Res import resnet18 -from .Swin import Swintransformer -Act = nn.ReLU - - -def weight_init(module): - for n, m in module.named_children(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu') - if m.bias is not None: - nn.init.zeros_(m.bias) - elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d,nn.BatchNorm1d)): - nn.init.ones_(m.weight) - if m.bias is not None: - nn.init.zeros_(m.bias) - elif isinstance(m, nn.Linear): - nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu') - if m.bias is not None: - nn.init.zeros_(m.bias) - elif isinstance(m, nn.Sequential): - weight_init(m) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - elif isinstance(m, (nn.ReLU,Act,nn.AdaptiveAvgPool2d,nn.Softmax)): - pass - else: - m.initialize() - - -class Grafting(nn.Module): - def __init__(self, dim, num_heads=8, qkv_bias=True, qk_scale=None): - super().__init__() - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim ** -0.5 - self.k = nn.Linear(dim, dim , bias=qkv_bias) - self.qv = nn.Linear(dim, dim * 2, bias=qkv_bias) - self.proj = nn.Linear(dim, dim) - self.act = nn.ReLU(inplace=True) - self.conv = nn.Conv2d(8,8,kernel_size=3, stride=1, padding=1) - self.lnx = nn.LayerNorm(64) - self.lny = nn.LayerNorm(64) - self.bn = nn.BatchNorm2d(8) - self.conv2 = nn.Sequential( - nn.Conv2d(64,64,kernel_size=3, stride=1, padding=1), - nn.BatchNorm2d(64), - nn.ReLU(inplace=True), - nn.Conv2d(64,64,kernel_size=3, stride=1, padding=1), - nn.BatchNorm2d(64), - nn.ReLU(inplace=True) - ) - def forward(self, x, y): - batch_size = x.shape[0] - chanel = x.shape[1] - sc = x - x = x.view(batch_size, chanel, -1).permute(0, 2, 1) - sc1 = x - x = self.lnx(x) - y = y.view(batch_size, chanel, -1).permute(0, 2, 1) - y = self.lny(y) - - B, N, C = x.shape - y_k = self.k(y).reshape(B, N, 1, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - x_qv= self.qv(x).reshape(B,N,2,self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - x_q, x_v = x_qv[0], x_qv[1] - y_k = y_k[0] - attn = (x_q @ y_k.transpose(-2, -1)) * self.scale - attn = attn.softmax(dim=-1) - x = (attn @ x_v).transpose(1, 2).reshape(B, N, C) - - x = self.proj(x) - x = (x+sc1) - - x = x.permute(0,2,1) - x = x.view(batch_size,chanel,*sc.size()[2:]) - x = self.conv2(x)+x - return x,self.act(self.bn(self.conv(attn+attn.transpose(-1,-2)))) - - - def initialize(self): - weight_init(self) - -class DB1(nn.Module): - def __init__(self,inplanes,outplanes): - super(DB1,self).__init__() - self.squeeze1 = nn.Sequential( - nn.Conv2d(inplanes, outplanes,kernel_size=1,stride=1,padding=0), - nn.BatchNorm2d(64), - nn.ReLU(inplace=True) - ) - self.squeeze2 = nn.Sequential( - nn.Conv2d(64, 64, kernel_size=3,stride=1,dilation=2,padding=2), - nn.BatchNorm2d(64), - nn.ReLU(inplace=True) - ) - - def forward(self, x): - z = self.squeeze2(self.squeeze1(x)) - return z,z - - def initialize(self): - weight_init(self) - -class DB2(nn.Module): - def __init__(self,inplanes,outplanes): - super(DB2,self).__init__() - self.short_cut = nn.Conv2d(outplanes, outplanes, kernel_size=1, stride=1, padding=0) - self.conv = nn.Sequential( - nn.Conv2d(inplanes+outplanes,outplanes,kernel_size=3, stride=1, padding=1), - nn.BatchNorm2d(outplanes), - nn.ReLU(inplace=True), - nn.Conv2d(outplanes,outplanes,kernel_size=3, stride=1, padding=1), - nn.BatchNorm2d(outplanes), - nn.ReLU(inplace=True) - ) - self.conv2 = nn.Sequential( - nn.Conv2d(outplanes,outplanes,kernel_size=3, stride=1, padding=1), - nn.BatchNorm2d(outplanes), - nn.ReLU(inplace=True), - nn.Conv2d(outplanes,outplanes,kernel_size=3, stride=1, padding=1), - nn.BatchNorm2d(outplanes), - nn.ReLU(inplace=True) - ) - - def forward(self,x,z): - z = F.interpolate(z,size=x.size()[2:],mode='bilinear',align_corners=True) - p = self.conv(torch.cat((x,z),1)) - sc = self.short_cut(z) - p = p+sc - p2 = self.conv2(p) - p = p+p2 - return p,p - - def initialize(self): - weight_init(self) - -class DB3(nn.Module): - def __init__(self) -> None: - super(DB3,self).__init__() - - self.db2 = DB2(64,64) - - self.conv3x3 = nn.Sequential( - nn.Conv2d(64,64,kernel_size=3, stride=1, padding=1), - nn.BatchNorm2d(64), - nn.ReLU(inplace=True) - ) - self.sqz_r4 = nn.Sequential( - nn.Conv2d(256, 64, kernel_size=3,stride=1,dilation=1,padding=1), - nn.BatchNorm2d(64), - nn.ReLU(inplace=True) - ) - - self.sqz_s1=nn.Sequential( - nn.Conv2d(128, 64, kernel_size=3,stride=1,dilation=1,padding=1), - nn.BatchNorm2d(64), - nn.ReLU(inplace=True) - ) - def forward(self,s,r,up): - up = F.interpolate(up,size=s.size()[2:],mode='bilinear',align_corners=True) - s = self.sqz_s1(s) - r = self.sqz_r4(r) - sr = self.conv3x3(s+r) - out,_ =self.db2(sr,up) - return out,out - def initialize(self): - weight_init(self) - - - -class decoder(nn.Module): - def __init__(self) -> None: - super(decoder,self).__init__() - self.sqz_s2=nn.Sequential( - nn.Conv2d(256, 64, kernel_size=3,stride=1,dilation=1,padding=1), - nn.BatchNorm2d(64), - nn.ReLU(inplace=True) - ) - self.sqz_r5 = nn.Sequential( - nn.Conv2d(512, 64, kernel_size=3,stride=1,dilation=1,padding=1), - nn.BatchNorm2d(64), - nn.ReLU(inplace=True) - ) - - self.GF = Grafting(64,num_heads=8) - self.d1 = DB1(512,64) - self.d2 = DB2(512,64) - self.d3 = DB2(64,64) - self.d4 = DB3() - self.d5 = DB2(128,64) - self.d6 = DB2(64,64) - - def forward(self,s1,s2,s3,s4,r2,r3,r4,r5): - r5 = F.interpolate(r5,size = s2.size()[2:],mode='bilinear',align_corners=True) - s1 = F.interpolate(s1,size = r4.size()[2:],mode='bilinear',align_corners=True) - - s4_,_ = self.d1(s4) - s3_,_ = self.d2(s3,s4_) - - s2_ = self.sqz_s2(s2) - r5_= self.sqz_r5(r5) - graft_feature_r5,cam = self.GF(r5_,s2_) - - graft_feature_r5_,_=self.d3(graft_feature_r5,s3_) - - graft_feature_r4,_=self.d4(s1,r4,graft_feature_r5_) - - r3_,_ = self.d5(r3,graft_feature_r4) - - r2_,_ = self.d6(r2,r3_) - - return r2_,cam,r5_,s2_ - - def initialize(self): - weight_init(self) - - - - -class PGNet(nn.Module): - def __init__(self, cfg=None): - super(PGNet, self).__init__() - self.cfg = cfg - self.decoder = decoder() - self.linear1 = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1) - self.linear2 = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1) - self.linear3 = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1) - self.conv = nn.Conv2d(8,1,kernel_size=3, stride=1, padding=1) - - - if self.cfg is None or self.cfg.snapshot is None: - weight_init(self) - - self.resnet = resnet18() - self.swin = Swintransformer(224) - device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu") - self.swin.load_state_dict(torch.load('sod/weights/swin224.pth', map_location=device)['model'],strict=False) - self.resnet.load_state_dict(torch.load('sod/weights/resnet18.pth', map_location=device),strict=False) - - if self.cfg is not None and self.cfg.snapshot: - print('load checkpoint') - pretrain=torch.load(self.cfg.snapshot, map_location=device) - new_state_dict = {} - for k,v in pretrain.items(): - new_state_dict[k[7:]] = v - self.load_state_dict(new_state_dict, strict=False) - - def forward(self, x,shape=None,mask=None): - shape = x.size()[2:] if shape is None else shape - y = F.interpolate(x, size=(224,224), mode='bilinear',align_corners=True) - - r2,r3,r4,r5 = self.resnet(x) - s1,s2,s3,s4 = self.swin(y) - r2_,attmap,r5_,s2_ = self.decoder(s1,s2,s3,s4,r2,r3,r4,r5) - - pred1 = F.interpolate(self.linear1(r2_), size=shape, mode='bilinear') - wr = F.interpolate(self.linear2(r5_), size=(28,28), mode='bilinear') - ws = F.interpolate(self.linear3(s2_), size=(28,28), mode='bilinear') - - - return pred1,wr,ws,self.conv(attmap) - - - - diff --git a/spaces/wffcyrus/llama2-with-gradio-chat/constants.py b/spaces/wffcyrus/llama2-with-gradio-chat/constants.py deleted file mode 100644 index b5e1c33d14f17a24dd2468a0a69b3f46efedcab3..0000000000000000000000000000000000000000 --- a/spaces/wffcyrus/llama2-with-gradio-chat/constants.py +++ /dev/null @@ -1,6 +0,0 @@ -DEFAULT_GLOBAL_CTX = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. - -If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. - -In each conversation, question is placed after [INST] while your answer should be placed after [/INST]. By looking [INST] and [/INST], you must consider multi-turn conversations. -""" \ No newline at end of file diff --git a/spaces/whgwd2023/bingo/src/pages/api/kblob.ts b/spaces/whgwd2023/bingo/src/pages/api/kblob.ts deleted file mode 100644 index 0ce7e6063cdc06838e76f1cff1d5982d34ef52de..0000000000000000000000000000000000000000 --- a/spaces/whgwd2023/bingo/src/pages/api/kblob.ts +++ /dev/null @@ -1,56 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import FormData from 'form-data' -import { fetch } from '@/lib/isomorphic' -import { KBlobRequest } from '@/lib/bots/bing/types' - -const API_DOMAIN = 'https://bing.vcanbb.top' - -export const config = { - api: { - bodyParser: { - sizeLimit: '10mb' // Set desired value here - } - } -} - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const { knowledgeRequest, imageBase64 } = req.body as KBlobRequest - - const formData = new FormData() - formData.append('knowledgeRequest', JSON.stringify(knowledgeRequest)) - if (imageBase64) { - formData.append('imageBase64', imageBase64) - } - - const response = await fetch(`${API_DOMAIN}/images/kblob`, - { - method: 'POST', - body: formData.getBuffer(), - headers: { - "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"", - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": "\"Windows\"", - "Referer": `${API_DOMAIN}/web/index.html`, - "Referrer-Policy": "origin-when-cross-origin", - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - ...formData.getHeaders() - } - } - ).then(res => res.text()) - - res.writeHead(200, { - 'Content-Type': 'application/json', - }) - res.end(response || JSON.stringify({ result: { value: 'UploadFailed', message: '请更换 IP 或代理后重试' } })) - } catch (e) { - return res.json({ - result: { - value: 'UploadFailed', - message: `${e}` - } - }) - } -} diff --git a/spaces/xYousha/AlphaGPT/README.md b/spaces/xYousha/AlphaGPT/README.md deleted file mode 100644 index 9899599cf50b535f915c178f22ee3fda22df937f..0000000000000000000000000000000000000000 --- a/spaces/xYousha/AlphaGPT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: AlphaGPT -emoji: 🌍 -colorFrom: yellow -colorTo: red -sdk: gradio -sdk_version: 3.32.0 -app_file: app.py -pinned: false -license: other ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/xc9/VITS-Umamusume-voice-synthesizer/ONNXVITS_transforms.py b/spaces/xc9/VITS-Umamusume-voice-synthesizer/ONNXVITS_transforms.py deleted file mode 100644 index 69b6d1c4b5724a3ef61f8bc3d64fc45c5e51e270..0000000000000000000000000000000000000000 --- a/spaces/xc9/VITS-Umamusume-voice-synthesizer/ONNXVITS_transforms.py +++ /dev/null @@ -1,196 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - #unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - unnormalized_derivatives_ = torch.zeros((1, 1, unnormalized_derivatives.size(2), unnormalized_derivatives.size(3)+2)) - unnormalized_derivatives_[...,1:-1] = unnormalized_derivatives - unnormalized_derivatives = unnormalized_derivatives_ - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/models/mlfn.py b/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/models/mlfn.py deleted file mode 100644 index ac7e126b073db6a710fc41e62624127ca91ec131..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/models/mlfn.py +++ /dev/null @@ -1,269 +0,0 @@ -from __future__ import division, absolute_import -import torch -import torch.utils.model_zoo as model_zoo -from torch import nn -from torch.nn import functional as F - -__all__ = ['mlfn'] - -model_urls = { - # training epoch = 5, top1 = 51.6 - 'imagenet': - 'https://mega.nz/#!YHxAhaxC!yu9E6zWl0x5zscSouTdbZu8gdFFytDdl-RAdD2DEfpk', -} - - -class MLFNBlock(nn.Module): - - def __init__( - self, in_channels, out_channels, stride, fsm_channels, groups=32 - ): - super(MLFNBlock, self).__init__() - self.groups = groups - mid_channels = out_channels // 2 - - # Factor Modules - self.fm_conv1 = nn.Conv2d(in_channels, mid_channels, 1, bias=False) - self.fm_bn1 = nn.BatchNorm2d(mid_channels) - self.fm_conv2 = nn.Conv2d( - mid_channels, - mid_channels, - 3, - stride=stride, - padding=1, - bias=False, - groups=self.groups - ) - self.fm_bn2 = nn.BatchNorm2d(mid_channels) - self.fm_conv3 = nn.Conv2d(mid_channels, out_channels, 1, bias=False) - self.fm_bn3 = nn.BatchNorm2d(out_channels) - - # Factor Selection Module - self.fsm = nn.Sequential( - nn.AdaptiveAvgPool2d(1), - nn.Conv2d(in_channels, fsm_channels[0], 1), - nn.BatchNorm2d(fsm_channels[0]), - nn.ReLU(inplace=True), - nn.Conv2d(fsm_channels[0], fsm_channels[1], 1), - nn.BatchNorm2d(fsm_channels[1]), - nn.ReLU(inplace=True), - nn.Conv2d(fsm_channels[1], self.groups, 1), - nn.BatchNorm2d(self.groups), - nn.Sigmoid(), - ) - - self.downsample = None - if in_channels != out_channels or stride > 1: - self.downsample = nn.Sequential( - nn.Conv2d( - in_channels, out_channels, 1, stride=stride, bias=False - ), - nn.BatchNorm2d(out_channels), - ) - - def forward(self, x): - residual = x - s = self.fsm(x) - - # reduce dimension - x = self.fm_conv1(x) - x = self.fm_bn1(x) - x = F.relu(x, inplace=True) - - # group convolution - x = self.fm_conv2(x) - x = self.fm_bn2(x) - x = F.relu(x, inplace=True) - - # factor selection - b, c = x.size(0), x.size(1) - n = c // self.groups - ss = s.repeat(1, n, 1, 1) # from (b, g, 1, 1) to (b, g*n=c, 1, 1) - ss = ss.view(b, n, self.groups, 1, 1) - ss = ss.permute(0, 2, 1, 3, 4).contiguous() - ss = ss.view(b, c, 1, 1) - x = ss * x - - # recover dimension - x = self.fm_conv3(x) - x = self.fm_bn3(x) - x = F.relu(x, inplace=True) - - if self.downsample is not None: - residual = self.downsample(residual) - - return F.relu(residual + x, inplace=True), s - - -class MLFN(nn.Module): - """Multi-Level Factorisation Net. - - Reference: - Chang et al. Multi-Level Factorisation Net for - Person Re-Identification. CVPR 2018. - - Public keys: - - ``mlfn``: MLFN (Multi-Level Factorisation Net). - """ - - def __init__( - self, - num_classes, - loss='softmax', - groups=32, - channels=[64, 256, 512, 1024, 2048], - embed_dim=1024, - **kwargs - ): - super(MLFN, self).__init__() - self.loss = loss - self.groups = groups - - # first convolutional layer - self.conv1 = nn.Conv2d(3, channels[0], 7, stride=2, padding=3) - self.bn1 = nn.BatchNorm2d(channels[0]) - self.maxpool = nn.MaxPool2d(3, stride=2, padding=1) - - # main body - self.feature = nn.ModuleList( - [ - # layer 1-3 - MLFNBlock(channels[0], channels[1], 1, [128, 64], self.groups), - MLFNBlock(channels[1], channels[1], 1, [128, 64], self.groups), - MLFNBlock(channels[1], channels[1], 1, [128, 64], self.groups), - # layer 4-7 - MLFNBlock( - channels[1], channels[2], 2, [256, 128], self.groups - ), - MLFNBlock( - channels[2], channels[2], 1, [256, 128], self.groups - ), - MLFNBlock( - channels[2], channels[2], 1, [256, 128], self.groups - ), - MLFNBlock( - channels[2], channels[2], 1, [256, 128], self.groups - ), - # layer 8-13 - MLFNBlock( - channels[2], channels[3], 2, [512, 128], self.groups - ), - MLFNBlock( - channels[3], channels[3], 1, [512, 128], self.groups - ), - MLFNBlock( - channels[3], channels[3], 1, [512, 128], self.groups - ), - MLFNBlock( - channels[3], channels[3], 1, [512, 128], self.groups - ), - MLFNBlock( - channels[3], channels[3], 1, [512, 128], self.groups - ), - MLFNBlock( - channels[3], channels[3], 1, [512, 128], self.groups - ), - # layer 14-16 - MLFNBlock( - channels[3], channels[4], 2, [512, 128], self.groups - ), - MLFNBlock( - channels[4], channels[4], 1, [512, 128], self.groups - ), - MLFNBlock( - channels[4], channels[4], 1, [512, 128], self.groups - ), - ] - ) - self.global_avgpool = nn.AdaptiveAvgPool2d(1) - - # projection functions - self.fc_x = nn.Sequential( - nn.Conv2d(channels[4], embed_dim, 1, bias=False), - nn.BatchNorm2d(embed_dim), - nn.ReLU(inplace=True), - ) - self.fc_s = nn.Sequential( - nn.Conv2d(self.groups * 16, embed_dim, 1, bias=False), - nn.BatchNorm2d(embed_dim), - nn.ReLU(inplace=True), - ) - - self.classifier = nn.Linear(embed_dim, num_classes) - - self.init_params() - - def init_params(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_( - m.weight, mode='fan_out', nonlinearity='relu' - ) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.BatchNorm2d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x = self.conv1(x) - x = self.bn1(x) - x = F.relu(x, inplace=True) - x = self.maxpool(x) - - s_hat = [] - for block in self.feature: - x, s = block(x) - s_hat.append(s) - s_hat = torch.cat(s_hat, 1) - - x = self.global_avgpool(x) - x = self.fc_x(x) - s_hat = self.fc_s(s_hat) - - v = (x+s_hat) * 0.5 - v = v.view(v.size(0), -1) - - if not self.training: - return v - - y = self.classifier(v) - - if self.loss == 'softmax': - return y - elif self.loss == 'triplet': - return y, v - else: - raise KeyError('Unsupported loss: {}'.format(self.loss)) - - -def init_pretrained_weights(model, model_url): - """Initializes model with pretrained weights. - - Layers that don't match with pretrained layers in name or size are kept unchanged. - """ - pretrain_dict = model_zoo.load_url(model_url) - model_dict = model.state_dict() - pretrain_dict = { - k: v - for k, v in pretrain_dict.items() - if k in model_dict and model_dict[k].size() == v.size() - } - model_dict.update(pretrain_dict) - model.load_state_dict(model_dict) - - -def mlfn(num_classes, loss='softmax', pretrained=True, **kwargs): - model = MLFN(num_classes, loss, **kwargs) - if pretrained: - # init_pretrained_weights(model, model_urls['imagenet']) - import warnings - warnings.warn( - 'The imagenet pretrained weights need to be manually downloaded from {}' - .format(model_urls['imagenet']) - ) - return model diff --git a/spaces/xfys/yolov5_tracking/val_utils/docs/RobMOTS-Official/Readme.md b/spaces/xfys/yolov5_tracking/val_utils/docs/RobMOTS-Official/Readme.md deleted file mode 100644 index 4cb225604d0fad600534d833f420757034f460ad..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/val_utils/docs/RobMOTS-Official/Readme.md +++ /dev/null @@ -1,240 +0,0 @@ -[![image](https://user-images.githubusercontent.com/23000532/118353602-607d1080-b567-11eb-8744-3e346a438583.png)](https://eval.vision.rwth-aachen.de/rvsu-workshop21/?page_id=110) - -# RobMOTS Official Evaluation Code - -### NEWS: [RobMOTS Challenge](https://eval.vision.rwth-aachen.de/rvsu-workshop21/?page_id=110) for the [RVSU CVPR'21 Workshop](https://eval.vision.rwth-aachen.de/rvsu-workshop21/) is now live!!!! Challenge deadline June 15. - -### NEWS: [Call for short papers](https://eval.vision.rwth-aachen.de/rvsu-workshop21/?page_id=74) (4 pages) on tracking and other video topics for [RVSU CVPR'21 Workshop](https://eval.vision.rwth-aachen.de/rvsu-workshop21/)!!!! Paper deadline June 4. - -TrackEval is now the Official Evaluation Kit for the RobMOTS Challenge. - -This repository contains the official evaluation code for the challenges available at the [RobMOTS Website](https://eval.vision.rwth-aachen.de/rvsu-workshop21/?page_id=110). - -The RobMOTS Challenge tests trackers' ability to work robustly across 8 different benchmarks, while tracking the [80 categories of objects from COCO](https://cocodataset.org/#explore). - -The following benchmarks are included: - -Benchmark | Website | -|----- | ----------- | -|MOTS Challenge| https://motchallenge.net/results/MOTS/ | -|KITTI-MOTS| http://www.cvlibs.net/datasets/kitti/eval_mots.php | -|DAVIS Challenge Unsupervised| https://davischallenge.org/challenge2020/unsupervised.html | -|YouTube-VIS| https://youtube-vos.org/dataset/vis/ | -|BDD100k MOTS| https://bdd-data.berkeley.edu/ | -|TAO| https://taodataset.org/ | -|Waymo Open Dataset| https://waymo.com/open/ | -|OVIS| http://songbai.site/ovis/ | - -## Installing, obtaining the data, and running - -Simply follow the code snippet below to install the evaluation code, download the train groundtruth data and an example tracker, and run the evaluation code on the sample tracker. - -Note the code requires python 3.5 or higher. - -``` -# Download the TrackEval repo -git clone https://github.com/JonathonLuiten/TrackEval.git - -# Move to repo folder -cd TrackEval - -# Create a virtual env in the repo for evaluation -python3 -m venv ./venv - -# Activate the virtual env -source venv/bin/activate - -# Update pip to have the latest version of packages -pip install --upgrade pip - -# Install the required packages -pip install -r requirements.txt - -# Download the train gt data -wget https://omnomnom.vision.rwth-aachen.de/data/RobMOTS/train_gt.zip - -# Unzip the train gt data you just downloaded. -unzip train_gt.zip - -# Download the example tracker -wget https://omnomnom.vision.rwth-aachen.de/data/RobMOTS/example_tracker.zip - -# Unzip the example tracker you just downloaded. -unzip example_tracker.zip - -# Run the evaluation on the provided example tracker on the train split (using 4 cores in parallel) -python scripts/run_rob_mots.py --ROBMOTS_SPLIT train --TRACKERS_TO_EVAL STP --USE_PARALLEL True --NUM_PARALLEL_CORES 4 - -``` - -You may further download the raw sequence images and supplied detections (as well as train GT data and example tracker) by following the ```Data Download``` link here: - -[RobMOTS Challenge Info](https://eval.vision.rwth-aachen.de/rvsu-workshop21/?page_id=110) - -## Accessing tracking evaluation results - -You will find the results of the evaluation (for the supplied tracker STP) in the folder ```TrackEval/data/trackers/rob_mots/train/STP/```. -The overall summary of the results is in ```./final_results.csv```, and more detailed results per sequence and per class and results plots can be found under ```./results/*```. - -The ```final_results.csv``` can be most easily read by opening it in Excel or similar. The ```c```, ```d``` and ```f``` prepending the metric names refer respectively to ```class averaged```, ```detection averaged (class agnostic)``` and ```final``` (the geometric mean of class and detection averaged). - -## Supplied Detections - -To make creating your own tracker particularly easy, we supply a set of strong supplied detection. - -These detections are from the Detectron 2 Mask R-CNN X152 (very bottom model on this [page](https://github.com/facebookresearch/detectron2/blob/master/MODEL_ZOO.md) which achieves a COCO detection mAP score of 50.2). - -We then obtain segmentation masks for these detections using the Box2Seg Network (also called Refinement Net), which results in far more accurate masks than the default Mask R-CNN masks. The code for this can be found [here](https://github.com/JonathonLuiten/PReMVOS/tree/master/code/refinement_net). - -We supply two different supplied detections. The first is the ```raw_supplied``` detections, which is taking all 1000 detections output from the Mask R-CNN, and only removing those for which the maximum class score is less than 0.02 (here no non-maximum suppression, NMS, is run). These can be downloaded [here](https://eval.vision.rwth-aachen.de/rvsu-workshop21/?page_id=110). - -The second is ```non_overlap_supplied``` detections. These are the same detections as above, but with further processing steps applied to them. First we perform Non-Maximum Suppression (NMS) with a threshold of 0.5 to remove any masks which have an IoU of 0.5 or more with any other mask that has a higher score. Second we run a Non-Overlap algorithm which forces all of the masks for a single image to be non-overlapping. It does this by putting all the masks 'on top of' each other, ordered by score, such that masks with a lower score will be partially removed if a mask with a higher score partially overlaps them. Note that these detections are still only thresholded at a score of 0.02, in general we recommend further thresholding with a higher value to get a good balance of precision and recall. - -Code for this NMS and Non-Overlap algorithm can be found here: -[Non-Overlap Code](https://github.com/JonathonLuiten/TrackEval/blob/master/trackeval/baselines/non_overlap.py). - -Note that for RobMOTS evaluation the final tracking results need to be 'non-overlapping' so we recommend using the ```non_overlap_supplied``` detections, however you may use the ```raw_supplied```, or your own or any other detections as you like. - -Supplied detections (both raw and non-overlapping) are available for the train, val and test sets. - -Example code for reading in these detections and using them can be found here: - -[Tracker Example](https://github.com/JonathonLuiten/TrackEval/blob/master/trackeval/baselines/stp.py). - -## Creating your own tracker - -We provide sample code ([Tracker Example](https://github.com/JonathonLuiten/TrackEval/blob/master/trackeval/baselines/stp.py)) for our STP tracker (Simplest Tracker Possible) which walks though how to create tracking results in the required RobMOTS format. - -This includes code for reading in the supplied detections and writing out the tracking results in the desired format, plus many other useful functions (IoU calculation etc). - -## Evaluating your own tracker - -To evaluate your tracker, put the results in the folder ```TrackEval/data/trackers/rob_mots/train/```, in a folder alongside the supplied tracker STP with the folder labelled as your tracker name, e.g. YOUR_TRACKER. - -You can then run the evaluation code on your tracker like this: - -``` -python scripts/run_rob_mots.py --ROBMOTS_SPLIT train --TRACKERS_TO_EVAL YOUR_TRACKER --USE_PARALLEL True --NUM_PARALLEL_CORES 4 -``` - -## Data format - -For RobMOTS, trackers must submit their results in the following folder format: - -``` -|—— - |—— .txt - |—— .txt - |—— .txt -|—— - |—— .txt - |—— .txt - |—— .txt -``` - -See the supplied STP tracker results (in the Train Data linked above) for an example. - -Thus there is one .txt file for each sequence. This file has one row per detection (object mask in one frame). Each row must have 7 values and has the following format: - -

- -<Timestep>(int), -<Track ID>(int), -<Class Number>(int), -<Detection Confidence>(float), -<Image Height>(int), -<Image Width>(int), -<Compressed RLE Mask>(string), - -

- -Timesteps are the same as the frame names for the supplied images. These start at 0. - -Track IDs must be unique across all classes within a frame. They can be non-unique across different sequences. - -The mapping of class numbers to class names can be found is [this file](https://github.com/JonathonLuiten/TrackEval/blob/master/trackeval/datasets/rob_mots_classmap.py). Note that this is the same as used in Detectron 2, and is the default COCO class ordering with the unused numbers removed. - -Detection Confidence score should be between 0 and 1. This is not used for HOTA evaluation, but is used for other eval metrics like Track mAP. - -Image height and width are needed to decode the compressed RLE mask representation. - -The Compressed RLE Mask is the same format used by coco, pycocotools and mots. - -An example of a tracker result file looks like this: - -``` -0 1 3 0.9917707443237305 1200 1920 VaTi0b0lT17F8K3M3N1O1N2O0O2M3N2N101O1O1O01O1O0100O100O01O1O100O10O1000O1000000000000000O1000001O0000000000000000O101O00000000000001O0000010O0110O0O100O1O2N1O2N0O2O2M3M2N2O1O2N5J;DgePZ1 -0 2 3 0.989478349685669 1200 1920 Ql^c05ZU12O2N001O0O10OTkNIaT17^kNKaT15^kNLbT14^kNMaT13^kNOaT11_kN0`T10_kN1`T11_kN0`T11_kN0`T1a0O00001O1O1O3M;E5K3M2N000000000O100000000000000000001O00001O2N1O1O1O000001O001O0O2O0O2M3M3M3N2O1O1O1N2O002N1O2N10O02N10000O1O101M3N2N2M7H^_g_1 -1 2 3 0.964085042476654 1200 1920 o_Uc03\U12O1O1N102N002N001O1O000O2O1O00002N6J1O001O2N1O3L3N2N4L5K2N1O000000000000001O1O2N01O01O010O01N2O0O2O1M4L3N2N101N2O001O1O100O0100000O1O1O1O2N6I4Mdm^`1 -``` - -Note that for the evaluation to be valid, the masks must not overlap within one frame. - -The supplied detections have the same format (but with all the Track IDs being set to 0). - -The groundtruth data for most benchmarks is in the exact same format as above (usually Detection Confidence is set to 1.0). The exception is the few benchmarks for which the ground-truth is not segmentation masks but bounding boxes (Waymo and TAO). For these the last three columns are not there (height, width and mask) as these encode a mask, and instead there are 4 columns encoding the bounding box co-ordinates in the format ```x0 y0 x1 y1```, where x0 and y0 are the coordinates of the top left of the box and x1 and y0 are the coordinates for the bottom right. - -The groundtruth can also contain ignore regions. The are marked by being having a class number of 100 or larger. Class number 100 encodes and ignore region for all class, which class numbers higher than 100 encode ignore regions specific to each class. E.g. class number 105 are ignore regions for class 5. - -As well as the per sequence files described above, the groundtruth for each benchmark contains two more files ```clsmap.txt``` and ```seqmap.txt```. - -```clsmap.txt``` is a single row, space-separated, containing all of the valid classes that should be evaluated for each benchmark (not all benchmarks evaluate all of the coco classes). - -```seqmap.txt``` contains a list of the sequences to be evaluated for that benchmark. Each row has at least 4 values. These are: -``` - -``` -More than 4 values can be present, the remaining values are 'ignore classes for this sequence'. E.g. classes which are evaluated for the particular benchmark as a whole, but should be ignored for this sequence. - -## Visualizing GT and Tracker Masks - -We provide code for converting our .txt format with compressed RLE masks into .png format where it is easy to visualize the GT and Predicted masks. - -This code can be found here: - -[Vizualize Tracking Results](https://github.com/JonathonLuiten/TrackEval/blob/master/trackeval/baselines/vizualize.py). - - -## Evaluate on the validation and test server - -The val and test GT will NOT be provided. However we provide a live evaluation server to upload your tracking results and evaluate it on the val and test set. - -The val server will allow infinite uploads, while the test will limit trackers to 4 uploads total. - -These evaluation servers can be found here: https://eval.vision.rwth-aachen.de/vision/ - -Ensure that your files to upload are in the correct format. Examples of the correct way to upload files can be found here: [STP val upload](https://omnomnom.vision.rwth-aachen.de/data/RobMOTS/STP_val_upload.zip), [STP test upload](https://omnomnom.vision.rwth-aachen.de/data/RobMOTS/STP_test_upload.zip). - -## Citation -If you work with the code and the benchmark, please cite: - -***TrackEval*** -``` -@misc{luiten2020trackeval, - author = {Jonathon Luiten, Arne Hoffhues}, - title = {TrackEval}, - howpublished = {\url{https://github.com/JonathonLuiten/TrackEval}}, - year = {2020} -} -``` -***HOTA metrics*** -``` -@article{luiten2020IJCV, - title={HOTA: A Higher Order Metric for Evaluating Multi-Object Tracking}, - author={Luiten, Jonathon and Osep, Aljosa and Dendorfer, Patrick and Torr, Philip and Geiger, Andreas and Leal-Taix{\'e}, Laura and Leibe, Bastian}, - journal={International Journal of Computer Vision}, - pages={1--31}, - year={2020}, - publisher={Springer} -} -``` - -## Feedback and Contact -We are constantly working on improving RobMOTS, and wish to provide the most useful support to the community. -You can help us to make the benchmark better by open issues in the repo and reporting bugs. - -For general questions, please contact the following: - -``` -Jonathon Luiten - luiten@vision.rwth-aachen.de -``` diff --git a/spaces/xiang-wuu/yolov5/data/scripts/get_coco128.sh b/spaces/xiang-wuu/yolov5/data/scripts/get_coco128.sh deleted file mode 100644 index ee05a867e5644be8cc7549b89cad89d5e84573d0..0000000000000000000000000000000000000000 --- a/spaces/xiang-wuu/yolov5/data/scripts/get_coco128.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) -# Example usage: bash data/scripts/get_coco128.sh -# parent -# ├── yolov5 -# └── datasets -# └── coco128 ← downloads here - -# Download/unzip images and labels -d='../datasets' # unzip directory -url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ -f='coco128.zip' # or 'coco128-segments.zip', 68 MB -echo 'Downloading' $url$f ' ...' -curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & - -wait # finish background tasks diff --git a/spaces/xkhaloda/Envvi-Inkpunk-Diffusion/README.md b/spaces/xkhaloda/Envvi-Inkpunk-Diffusion/README.md deleted file mode 100644 index c91b56c2564f6b4710b56bd481a56b3a98a981f4..0000000000000000000000000000000000000000 --- a/spaces/xkhaloda/Envvi-Inkpunk-Diffusion/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Envvi Inkpunk Diffusion -emoji: 💩 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.14.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/xnetba/MMS/vits/text/cleaners.py b/spaces/xnetba/MMS/vits/text/cleaners.py deleted file mode 100644 index 2658f667a7d59ca99a3e16ba0c157d2ab5d795eb..0000000000000000000000000000000000000000 --- a/spaces/xnetba/MMS/vits/text/cleaners.py +++ /dev/null @@ -1,100 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - -import re -from unidecode import unidecode -from phonemizer import phonemize - - -# Regular expression matching whitespace: -_whitespace_re = re.compile(r'\s+') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def expand_numbers(text): - return normalize_numbers(text) - - -def lowercase(text): - return text.lower() - - -def collapse_whitespace(text): - return re.sub(_whitespace_re, ' ', text) - - -def convert_to_ascii(text): - return unidecode(text) - - -def basic_cleaners(text): - '''Basic pipeline that lowercases and collapses whitespace without transliteration.''' - text = lowercase(text) - text = collapse_whitespace(text) - return text - - -def transliteration_cleaners(text): - '''Pipeline for non-English text that transliterates to ASCII.''' - text = convert_to_ascii(text) - text = lowercase(text) - text = collapse_whitespace(text) - return text - - -def english_cleaners(text): - '''Pipeline for English text, including abbreviation expansion.''' - text = convert_to_ascii(text) - text = lowercase(text) - text = expand_abbreviations(text) - phonemes = phonemize(text, language='en-us', backend='espeak', strip=True) - phonemes = collapse_whitespace(phonemes) - return phonemes - - -def english_cleaners2(text): - '''Pipeline for English text, including abbreviation expansion. + punctuation + stress''' - text = convert_to_ascii(text) - text = lowercase(text) - text = expand_abbreviations(text) - phonemes = phonemize(text, language='en-us', backend='espeak', strip=True, preserve_punctuation=True, with_stress=True) - phonemes = collapse_whitespace(phonemes) - return phonemes diff --git a/spaces/xxbb/VITS-Umamusume-voice-synthesizer/attentions.py b/spaces/xxbb/VITS-Umamusume-voice-synthesizer/attentions.py deleted file mode 100644 index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000 --- a/spaces/xxbb/VITS-Umamusume-voice-synthesizer/attentions.py +++ /dev/null @@ -1,300 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/xxie92/antibody_visulization/diffab/utils/protein/writers.py b/spaces/xxie92/antibody_visulization/diffab/utils/protein/writers.py deleted file mode 100644 index 2889e8e7ebe938f2a054a6d1a84b7f50318d8430..0000000000000000000000000000000000000000 --- a/spaces/xxie92/antibody_visulization/diffab/utils/protein/writers.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch -import warnings -from Bio import BiopythonWarning -from Bio.PDB import PDBIO -from Bio.PDB.StructureBuilder import StructureBuilder - -from .constants import AA, restype_to_heavyatom_names - - -def save_pdb(data, path=None): - """ - Args: - data: A dict that contains: `chain_nb`, `chain_id`, `aa`, `resseq`, `icode`, - `pos_heavyatom`, `mask_heavyatom`. - """ - - def _mask_select(v, mask): - if isinstance(v, str): - return ''.join([s for i, s in enumerate(v) if mask[i]]) - elif isinstance(v, list): - return [s for i, s in enumerate(v) if mask[i]] - elif isinstance(v, torch.Tensor): - return v[mask] - else: - return v - - def _build_chain(builder, aa_ch, pos_heavyatom_ch, mask_heavyatom_ch, chain_id_ch, resseq_ch, icode_ch): - builder.init_chain(chain_id_ch[0]) - builder.init_seg(' ') - - for aa_res, pos_allatom_res, mask_allatom_res, resseq_res, icode_res in \ - zip(aa_ch, pos_heavyatom_ch, mask_heavyatom_ch, resseq_ch, icode_ch): - if not AA.is_aa(aa_res.item()): - print('[Warning] Unknown amino acid type at %d%s: %r' % (resseq_res.item(), icode_res, aa_res.item())) - continue - restype = AA(aa_res.item()) - builder.init_residue( - resname = str(restype), - field = ' ', - resseq = resseq_res.item(), - icode = icode_res, - ) - - for i, atom_name in enumerate(restype_to_heavyatom_names[restype]): - if atom_name == '': continue # No expected atom - if (~mask_allatom_res[i]).any(): continue # Atom is missing - if len(atom_name) == 1: fullname = ' %s ' % atom_name - elif len(atom_name) == 2: fullname = ' %s ' % atom_name - elif len(atom_name) == 3: fullname = ' %s' % atom_name - else: fullname = atom_name # len == 4 - builder.init_atom(atom_name, pos_allatom_res[i].tolist(), 0.0, 1.0, ' ', fullname,) - - warnings.simplefilter('ignore', BiopythonWarning) - builder = StructureBuilder() - builder.init_structure(0) - builder.init_model(0) - - unique_chain_nb = data['chain_nb'].unique().tolist() - for ch_nb in unique_chain_nb: - mask = (data['chain_nb'] == ch_nb) - aa = _mask_select(data['aa'], mask) - pos_heavyatom = _mask_select(data['pos_heavyatom'], mask) - mask_heavyatom = _mask_select(data['mask_heavyatom'], mask) - chain_id = _mask_select(data['chain_id'], mask) - resseq = _mask_select(data['resseq'], mask) - icode = _mask_select(data['icode'], mask) - - _build_chain(builder, aa, pos_heavyatom, mask_heavyatom, chain_id, resseq, icode) - - structure = builder.get_structure() - if path is not None: - io = PDBIO() - io.set_structure(structure) - io.save(path) - return structure diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/dependency_versions_check.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/dependency_versions_check.py deleted file mode 100644 index 254bb64b3e9ee7fbfd61f7f0e9ed5c96345625b1..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/dependency_versions_check.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .dependency_versions_table import deps -from .utils.versions import require_version, require_version_core - - -# define which module versions we always want to check at run time -# (usually the ones defined in `install_requires` in setup.py) -# -# order specific notes: -# - tqdm must be checked before tokenizers - -pkgs_to_check_at_runtime = [ - "python", - "tqdm", - "regex", - "requests", - "packaging", - "filelock", - "numpy", - "tokenizers", - "huggingface-hub", - "safetensors", - "accelerate", - "pyyaml", -] - -# yizhangliu -if 0==1: - for pkg in pkgs_to_check_at_runtime: - if pkg in deps: - if pkg == "tokenizers": - # must be loaded here, or else tqdm check may fail - from .utils import is_tokenizers_available - - if not is_tokenizers_available(): - continue # not required, check version only if installed - elif pkg == "accelerate": - # must be loaded here, or else tqdm check may fail - from .utils import is_accelerate_available - - # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of - # Transformers with PyTorch - if not is_accelerate_available(): - continue # not required, check version only if installed - - require_version_core(deps[pkg]) - else: - raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") - - - def dep_version_check(pkg, hint=None): - require_version(deps[pkg], hint) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/mobilenet_v1/image_processing_mobilenet_v1.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/mobilenet_v1/image_processing_mobilenet_v1.py deleted file mode 100644 index c9b015c5c01fb76f17b88d9c725fadbe45bea390..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/mobilenet_v1/image_processing_mobilenet_v1.py +++ /dev/null @@ -1,297 +0,0 @@ -# coding=utf-8 -# Copyright 2022 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Image processor class for MobileNetV1.""" - -from typing import Dict, List, Optional, Union - -import numpy as np - -from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict -from ...image_transforms import ( - get_resize_output_image_size, - resize, - to_channel_dimension_format, -) -from ...image_utils import ( - IMAGENET_STANDARD_MEAN, - IMAGENET_STANDARD_STD, - ChannelDimension, - ImageInput, - PILImageResampling, - infer_channel_dimension_format, - is_scaled_image, - make_list_of_images, - to_numpy_array, - valid_images, -) -from ...utils import TensorType, logging - - -logger = logging.get_logger(__name__) - - -class MobileNetV1ImageProcessor(BaseImageProcessor): - r""" - Constructs a MobileNetV1 image processor. - - Args: - do_resize (`bool`, *optional*, defaults to `True`): - Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by - `do_resize` in the `preprocess` method. - size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 256}`): - Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with - the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` - method. - resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): - Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the - `preprocess` method. - do_center_crop (`bool`, *optional*, defaults to `True`): - Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image - is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in the - `preprocess` method. - crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`): - Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`. - Can be overridden by the `crop_size` parameter in the `preprocess` method. - do_rescale (`bool`, *optional*, defaults to `True`): - Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` - parameter in the `preprocess` method. - rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): - Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the - `preprocess` method. - do_normalize: - Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` - method. - image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): - Mean to use if normalizing the image. This is a float or list of floats the length of the number of - channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. - image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): - Standard deviation to use if normalizing the image. This is a float or list of floats the length of the - number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. - """ - - model_input_names = ["pixel_values"] - - def __init__( - self, - do_resize: bool = True, - size: Optional[Dict[str, int]] = None, - resample: PILImageResampling = PILImageResampling.BILINEAR, - do_center_crop: bool = True, - crop_size: Dict[str, int] = None, - do_rescale: bool = True, - rescale_factor: Union[int, float] = 1 / 255, - do_normalize: bool = True, - image_mean: Optional[Union[float, List[float]]] = None, - image_std: Optional[Union[float, List[float]]] = None, - **kwargs, - ) -> None: - super().__init__(**kwargs) - size = size if size is not None else {"shortest_edge": 256} - size = get_size_dict(size, default_to_square=False) - crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} - crop_size = get_size_dict(crop_size) - self.do_resize = do_resize - self.size = size - self.resample = resample - self.do_center_crop = do_center_crop - self.crop_size = crop_size - self.do_rescale = do_rescale - self.rescale_factor = rescale_factor - self.do_normalize = do_normalize - self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN - self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD - - # Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize - def resize( - self, - image: np.ndarray, - size: Dict[str, int], - resample: PILImageResampling = PILImageResampling.BICUBIC, - data_format: Optional[Union[str, ChannelDimension]] = None, - input_data_format: Optional[Union[str, ChannelDimension]] = None, - **kwargs, - ) -> np.ndarray: - """ - Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge - resized to keep the input aspect ratio. - - Args: - image (`np.ndarray`): - Image to resize. - size (`Dict[str, int]`): - Size of the output image. - resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): - Resampling filter to use when resiizing the image. - data_format (`str` or `ChannelDimension`, *optional*): - The channel dimension format of the image. If not provided, it will be the same as the input image. - input_data_format (`ChannelDimension` or `str`, *optional*): - The channel dimension format of the input image. If not provided, it will be inferred. - """ - size = get_size_dict(size, default_to_square=False) - if "shortest_edge" not in size: - raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}") - output_size = get_resize_output_image_size( - image, size=size["shortest_edge"], default_to_square=False, input_data_format=input_data_format - ) - return resize( - image, - size=output_size, - resample=resample, - data_format=data_format, - input_data_format=input_data_format, - **kwargs, - ) - - def preprocess( - self, - images: ImageInput, - do_resize: Optional[bool] = None, - size: Dict[str, int] = None, - resample: PILImageResampling = None, - do_center_crop: bool = None, - crop_size: Dict[str, int] = None, - do_rescale: Optional[bool] = None, - rescale_factor: Optional[float] = None, - do_normalize: Optional[bool] = None, - image_mean: Optional[Union[float, List[float]]] = None, - image_std: Optional[Union[float, List[float]]] = None, - return_tensors: Optional[Union[str, TensorType]] = None, - data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, - input_data_format: Optional[Union[str, ChannelDimension]] = None, - **kwargs, - ): - """ - Preprocess an image or batch of images. - - Args: - images (`ImageInput`): - Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If - passing in images with pixel values between 0 and 1, set `do_rescale=False`. - do_resize (`bool`, *optional*, defaults to `self.do_resize`): - Whether to resize the image. - size (`Dict[str, int]`, *optional*, defaults to `self.size`): - Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with - the longest edge resized to keep the input aspect ratio. - resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`): - `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has - an effect if `do_resize` is set to `True`. - do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): - Whether to center crop the image. - crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): - Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. - do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): - Whether to rescale the image values between [0 - 1]. - rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): - Rescale factor to rescale the image by if `do_rescale` is set to `True`. - do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): - Whether to normalize the image. - image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): - Image mean to use if `do_normalize` is set to `True`. - image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): - Image standard deviation to use if `do_normalize` is set to `True`. - return_tensors (`str` or `TensorType`, *optional*): - The type of tensors to return. Can be one of: - - Unset: Return a list of `np.ndarray`. - - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. - data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): - The channel dimension format for the output image. Can be one of: - - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - - Unset: Use the channel dimension format of the input image. - input_data_format (`ChannelDimension` or `str`, *optional*): - The channel dimension format for the input image. If unset, the channel dimension format is inferred - from the input image. Can be one of: - - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - """ - do_resize = do_resize if do_resize is not None else self.do_resize - size = size if size is not None else self.size - size = get_size_dict(size, default_to_square=False) - resample = resample if resample is not None else self.resample - do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop - crop_size = crop_size if crop_size is not None else self.crop_size - crop_size = get_size_dict(crop_size) - do_rescale = do_rescale if do_rescale is not None else self.do_rescale - rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor - do_normalize = do_normalize if do_normalize is not None else self.do_normalize - image_mean = image_mean if image_mean is not None else self.image_mean - image_std = image_std if image_std is not None else self.image_std - - images = make_list_of_images(images) - - if not valid_images(images): - raise ValueError( - "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " - "torch.Tensor, tf.Tensor or jax.ndarray." - ) - - if do_resize and size is None: - raise ValueError("Size must be specified if do_resize is True.") - - if do_center_crop and crop_size is None: - raise ValueError("Crop size must be specified if do_center_crop is True.") - - if do_rescale and rescale_factor is None: - raise ValueError("Rescale factor must be specified if do_rescale is True.") - - if do_normalize and (image_mean is None or image_std is None): - raise ValueError("Image mean and std must be specified if do_normalize is True.") - - # All transformations expect numpy arrays. - images = [to_numpy_array(image) for image in images] - - if is_scaled_image(images[0]) and do_rescale: - logger.warning_once( - "It looks like you are trying to rescale already rescaled images. If the input" - " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." - ) - - if input_data_format is None: - # We assume that all images have the same channel dimension format. - input_data_format = infer_channel_dimension_format(images[0]) - - if do_resize: - images = [ - self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) - for image in images - ] - - if do_center_crop: - images = [ - self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images - ] - - if do_rescale: - images = [ - self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) - for image in images - ] - - if do_normalize: - images = [ - self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) - for image in images - ] - - images = [ - to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images - ] - - data = {"pixel_values": images} - return BatchFeature(data=data, tensor_type=return_tensors) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/mpnet/tokenization_mpnet.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/mpnet/tokenization_mpnet.py deleted file mode 100644 index 21c3555c0577491ca4a0f49de35402ca89819785..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/mpnet/tokenization_mpnet.py +++ /dev/null @@ -1,545 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The HuggingFace Inc. team, Microsoft Corporation. -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tokenization classes for MPNet.""" - -import collections -import os -import unicodedata -from typing import List, Optional, Tuple - -from ...tokenization_utils import AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace -from ...utils import logging - - -logger = logging.get_logger(__name__) - -VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} - -PRETRAINED_VOCAB_FILES_MAP = { - "vocab_file": { - "microsoft/mpnet-base": "https://huggingface.co/microsoft/mpnet-base/resolve/main/vocab.txt", - } -} - -PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { - "microsoft/mpnet-base": 512, -} - -PRETRAINED_INIT_CONFIGURATION = { - "microsoft/mpnet-base": {"do_lower_case": True}, -} - - -def load_vocab(vocab_file): - """Loads a vocabulary file into a dictionary.""" - vocab = collections.OrderedDict() - with open(vocab_file, "r", encoding="utf-8") as reader: - tokens = reader.readlines() - for index, token in enumerate(tokens): - token = token.rstrip("\n") - vocab[token] = index - return vocab - - -def whitespace_tokenize(text): - """Runs basic whitespace cleaning and splitting on a piece of text.""" - text = text.strip() - if not text: - return [] - tokens = text.split() - return tokens - - -class MPNetTokenizer(PreTrainedTokenizer): - """ - - This tokenizer inherits from [`BertTokenizer`] which contains most of the methods. Users should refer to the - superclass for more information regarding methods. - - Args: - vocab_file (`str`): - Path to the vocabulary file. - do_lower_case (`bool`, *optional*, defaults to `True`): - Whether or not to lowercase the input when tokenizing. - do_basic_tokenize (`bool`, *optional*, defaults to `True`): - Whether or not to do basic tokenization before WordPiece. - never_split (`Iterable`, *optional*): - Collection of tokens which will never be split during tokenization. Only has an effect when - `do_basic_tokenize=True` - bos_token (`str`, *optional*, defaults to `""`): - The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token. - - - - When building a sequence using special tokens, this is not the token that is used for the beginning of - sequence. The token used is the `cls_token`. - - - - eos_token (`str`, *optional*, defaults to `""`): - The end of sequence token. - - - - When building a sequence using special tokens, this is not the token that is used for the end of sequence. - The token used is the `sep_token`. - - - - sep_token (`str`, *optional*, defaults to `""`): - The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for - sequence classification or for a text and a question for question answering. It is also used as the last - token of a sequence built with special tokens. - cls_token (`str`, *optional*, defaults to `""`): - The classifier token which is used when doing sequence classification (classification of the whole sequence - instead of per-token classification). It is the first token of the sequence when built with special tokens. - unk_token (`str`, *optional*, defaults to `"[UNK]"`): - The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this - token instead. - pad_token (`str`, *optional*, defaults to `""`): - The token used for padding, for example when batching sequences of different lengths. - mask_token (`str`, *optional*, defaults to `""`): - The token used for masking values. This is the token used when training this model with masked language - modeling. This is the token which the model will try to predict. - tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): - Whether or not to tokenize Chinese characters. - - This should likely be deactivated for Japanese (see this - [issue](https://github.com/huggingface/transformers/issues/328)). - strip_accents (`bool`, *optional*): - Whether or not to strip all accents. If this option is not specified, then it will be determined by the - value for `lowercase` (as in the original BERT). - """ - - vocab_files_names = VOCAB_FILES_NAMES - pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION - max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES - model_input_names = ["input_ids", "attention_mask"] - - def __init__( - self, - vocab_file, - do_lower_case=True, - do_basic_tokenize=True, - never_split=None, - bos_token="", - eos_token="", - sep_token="", - cls_token="", - unk_token="[UNK]", - pad_token="", - mask_token="", - tokenize_chinese_chars=True, - strip_accents=None, - **kwargs, - ): - bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token - eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token - sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token - cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token - unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token - pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token - - # Mask token behave like a normal word, i.e. include the space before it - mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token - - if not os.path.isfile(vocab_file): - raise ValueError( - f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" - " model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" - ) - self.vocab = load_vocab(vocab_file) - self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) - self.do_basic_tokenize = do_basic_tokenize - if do_basic_tokenize: - self.basic_tokenizer = BasicTokenizer( - do_lower_case=do_lower_case, - never_split=never_split, - tokenize_chinese_chars=tokenize_chinese_chars, - strip_accents=strip_accents, - ) - self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) - - super().__init__( - do_lower_case=do_lower_case, - do_basic_tokenize=do_basic_tokenize, - never_split=never_split, - bos_token=bos_token, - eos_token=eos_token, - unk_token=unk_token, - sep_token=sep_token, - cls_token=cls_token, - pad_token=pad_token, - mask_token=mask_token, - tokenize_chinese_chars=tokenize_chinese_chars, - strip_accents=strip_accents, - **kwargs, - ) - - @property - def do_lower_case(self): - return self.basic_tokenizer.do_lower_case - - @property - def vocab_size(self): - return len(self.vocab) - - def get_vocab(self): - vocab = self.vocab.copy() - vocab.update(self.added_tokens_encoder) - return vocab - - def _tokenize(self, text): - split_tokens = [] - if self.do_basic_tokenize: - for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): - # If the token is part of the never_split set - if token in self.basic_tokenizer.never_split: - split_tokens.append(token) - else: - split_tokens += self.wordpiece_tokenizer.tokenize(token) - else: - split_tokens = self.wordpiece_tokenizer.tokenize(text) - return split_tokens - - def _convert_token_to_id(self, token): - """Converts a token (str) in an id using the vocab.""" - return self.vocab.get(token, self.vocab.get(self.unk_token)) - - def _convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - return self.ids_to_tokens.get(index, self.unk_token) - - def convert_tokens_to_string(self, tokens): - """Converts a sequence of tokens (string) in a single string.""" - out_string = " ".join(tokens).replace(" ##", "").strip() - return out_string - - def build_inputs_with_special_tokens( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and - adding special tokens. A MPNet sequence has the following format: - - - single sequence: ` X ` - - pair of sequences: ` A B ` - - Args: - token_ids_0 (`List[int]`): - List of IDs to which the special tokens will be added - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. - """ - if token_ids_1 is None: - return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] - cls = [self.cls_token_id] - sep = [self.sep_token_id] - return cls + token_ids_0 + sep + sep + token_ids_1 + sep - - def get_special_tokens_mask( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False - ) -> List[int]: - """ - Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding - special tokens using the tokenizer `prepare_for_model` methods. - - Args: - token_ids_0 (`List[int]`): - List of ids. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - already_has_special_tokens (`bool`, *optional*, defaults to `False`): - Set to True if the token list is already formatted with special tokens for the model - - Returns: - `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. - """ - if already_has_special_tokens: - return super().get_special_tokens_mask( - token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True - ) - - if token_ids_1 is None: - return [1] + ([0] * len(token_ids_0)) + [1] - return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] - - def create_token_type_ids_from_sequences( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Creates a mask from the two sequences passed to be used in a sequence-pair classification task. MPNet does not - make use of token type ids, therefore a list of zeros is returned. - - Args: - token_ids_0 (`List[int]`): - List of ids. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of zeros. - """ - sep = [self.sep_token_id] - cls = [self.cls_token_id] - - if token_ids_1 is None: - return len(cls + token_ids_0 + sep) * [0] - return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] - - def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: - index = 0 - if os.path.isdir(save_directory): - vocab_file = os.path.join( - save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] - ) - else: - vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory - with open(vocab_file, "w", encoding="utf-8") as writer: - for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): - if index != token_index: - logger.warning( - f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." - " Please check that the vocabulary is not corrupted!" - ) - index = token_index - writer.write(token + "\n") - index += 1 - return (vocab_file,) - - -# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer -class BasicTokenizer(object): - """ - Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). - - Args: - do_lower_case (`bool`, *optional*, defaults to `True`): - Whether or not to lowercase the input when tokenizing. - never_split (`Iterable`, *optional*): - Collection of tokens which will never be split during tokenization. Only has an effect when - `do_basic_tokenize=True` - tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): - Whether or not to tokenize Chinese characters. - - This should likely be deactivated for Japanese (see this - [issue](https://github.com/huggingface/transformers/issues/328)). - strip_accents (`bool`, *optional*): - Whether or not to strip all accents. If this option is not specified, then it will be determined by the - value for `lowercase` (as in the original BERT). - do_split_on_punc (`bool`, *optional*, defaults to `True`): - In some instances we want to skip the basic punctuation splitting so that later tokenization can capture - the full context of the words, such as contractions. - """ - - def __init__( - self, - do_lower_case=True, - never_split=None, - tokenize_chinese_chars=True, - strip_accents=None, - do_split_on_punc=True, - ): - if never_split is None: - never_split = [] - self.do_lower_case = do_lower_case - self.never_split = set(never_split) - self.tokenize_chinese_chars = tokenize_chinese_chars - self.strip_accents = strip_accents - self.do_split_on_punc = do_split_on_punc - - def tokenize(self, text, never_split=None): - """ - Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. - - Args: - never_split (`List[str]`, *optional*) - Kept for backward compatibility purposes. Now implemented directly at the base class level (see - [`PreTrainedTokenizer.tokenize`]) List of token not to split. - """ - # union() returns a new set by concatenating the two sets. - never_split = self.never_split.union(set(never_split)) if never_split else self.never_split - text = self._clean_text(text) - - # This was added on November 1st, 2018 for the multilingual and Chinese - # models. This is also applied to the English models now, but it doesn't - # matter since the English models were not trained on any Chinese data - # and generally don't have any Chinese data in them (there are Chinese - # characters in the vocabulary because Wikipedia does have some Chinese - # words in the English Wikipedia.). - if self.tokenize_chinese_chars: - text = self._tokenize_chinese_chars(text) - # prevents treating the same character with different unicode codepoints as different characters - unicode_normalized_text = unicodedata.normalize("NFC", text) - orig_tokens = whitespace_tokenize(unicode_normalized_text) - split_tokens = [] - for token in orig_tokens: - if token not in never_split: - if self.do_lower_case: - token = token.lower() - if self.strip_accents is not False: - token = self._run_strip_accents(token) - elif self.strip_accents: - token = self._run_strip_accents(token) - split_tokens.extend(self._run_split_on_punc(token, never_split)) - - output_tokens = whitespace_tokenize(" ".join(split_tokens)) - return output_tokens - - def _run_strip_accents(self, text): - """Strips accents from a piece of text.""" - text = unicodedata.normalize("NFD", text) - output = [] - for char in text: - cat = unicodedata.category(char) - if cat == "Mn": - continue - output.append(char) - return "".join(output) - - def _run_split_on_punc(self, text, never_split=None): - """Splits punctuation on a piece of text.""" - if not self.do_split_on_punc or (never_split is not None and text in never_split): - return [text] - chars = list(text) - i = 0 - start_new_word = True - output = [] - while i < len(chars): - char = chars[i] - if _is_punctuation(char): - output.append([char]) - start_new_word = True - else: - if start_new_word: - output.append([]) - start_new_word = False - output[-1].append(char) - i += 1 - - return ["".join(x) for x in output] - - def _tokenize_chinese_chars(self, text): - """Adds whitespace around any CJK character.""" - output = [] - for char in text: - cp = ord(char) - if self._is_chinese_char(cp): - output.append(" ") - output.append(char) - output.append(" ") - else: - output.append(char) - return "".join(output) - - def _is_chinese_char(self, cp): - """Checks whether CP is the codepoint of a CJK character.""" - # This defines a "chinese character" as anything in the CJK Unicode block: - # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) - # - # Note that the CJK Unicode block is NOT all Japanese and Korean characters, - # despite its name. The modern Korean Hangul alphabet is a different block, - # as is Japanese Hiragana and Katakana. Those alphabets are used to write - # space-separated words, so they are not treated specially and handled - # like the all of the other languages. - if ( - (cp >= 0x4E00 and cp <= 0x9FFF) - or (cp >= 0x3400 and cp <= 0x4DBF) # - or (cp >= 0x20000 and cp <= 0x2A6DF) # - or (cp >= 0x2A700 and cp <= 0x2B73F) # - or (cp >= 0x2B740 and cp <= 0x2B81F) # - or (cp >= 0x2B820 and cp <= 0x2CEAF) # - or (cp >= 0xF900 and cp <= 0xFAFF) - or (cp >= 0x2F800 and cp <= 0x2FA1F) # - ): # - return True - - return False - - def _clean_text(self, text): - """Performs invalid character removal and whitespace cleanup on text.""" - output = [] - for char in text: - cp = ord(char) - if cp == 0 or cp == 0xFFFD or _is_control(char): - continue - if _is_whitespace(char): - output.append(" ") - else: - output.append(char) - return "".join(output) - - -# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer -class WordpieceTokenizer(object): - """Runs WordPiece tokenization.""" - - def __init__(self, vocab, unk_token, max_input_chars_per_word=100): - self.vocab = vocab - self.unk_token = unk_token - self.max_input_chars_per_word = max_input_chars_per_word - - def tokenize(self, text): - """ - Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform - tokenization using the given vocabulary. - - For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. - - Args: - text: A single token or whitespace separated tokens. This should have - already been passed through *BasicTokenizer*. - - Returns: - A list of wordpiece tokens. - """ - - output_tokens = [] - for token in whitespace_tokenize(text): - chars = list(token) - if len(chars) > self.max_input_chars_per_word: - output_tokens.append(self.unk_token) - continue - - is_bad = False - start = 0 - sub_tokens = [] - while start < len(chars): - end = len(chars) - cur_substr = None - while start < end: - substr = "".join(chars[start:end]) - if start > 0: - substr = "##" + substr - if substr in self.vocab: - cur_substr = substr - break - end -= 1 - if cur_substr is None: - is_bad = True - break - sub_tokens.append(cur_substr) - start = end - - if is_bad: - output_tokens.append(self.unk_token) - else: - output_tokens.extend(sub_tokens) - return output_tokens diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/docs/tutorials/models.md b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/docs/tutorials/models.md deleted file mode 100644 index 3cf918e7a145ee326c6cccf8a88835b7e02a7c30..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/docs/tutorials/models.md +++ /dev/null @@ -1,180 +0,0 @@ -# Use Models - -## Build Models from Yacs Config -From a yacs config object, -models (and their sub-models) can be built by -functions such as `build_model`, `build_backbone`, `build_roi_heads`: -```python -from detectron2.modeling import build_model -model = build_model(cfg) # returns a torch.nn.Module -``` - -`build_model` only builds the model structure and fills it with random parameters. -See below for how to load an existing checkpoint to the model and how to use the `model` object. - -### Load/Save a Checkpoint -```python -from detectron2.checkpoint import DetectionCheckpointer -DetectionCheckpointer(model).load(file_path_or_url) # load a file, usually from cfg.MODEL.WEIGHTS - -checkpointer = DetectionCheckpointer(model, save_dir="output") -checkpointer.save("model_999") # save to output/model_999.pth -``` - -Detectron2's checkpointer recognizes models in pytorch's `.pth` format, as well as the `.pkl` files -in our model zoo. -See [API doc](../modules/checkpoint.html#detectron2.checkpoint.DetectionCheckpointer) -for more details about its usage. - -The model files can be arbitrarily manipulated using `torch.{load,save}` for `.pth` files or -`pickle.{dump,load}` for `.pkl` files. - -### Use a Model - -A model can be called by `outputs = model(inputs)`, where `inputs` is a `list[dict]`. -Each dict corresponds to one image and the required keys -depend on the type of model, and whether the model is in training or evaluation mode. -For example, in order to do inference, -all existing models expect the "image" key, and optionally "height" and "width". -The detailed format of inputs and outputs of existing models are explained below. - -__Training__: When in training mode, all models are required to be used under an `EventStorage`. -The training statistics will be put into the storage: -```python -from detectron2.utils.events import EventStorage -with EventStorage() as storage: - losses = model(inputs) -``` - -__Inference__: If you only want to do simple inference using an existing model, -[DefaultPredictor](../modules/engine.html#detectron2.engine.defaults.DefaultPredictor) -is a wrapper around model that provides such basic functionality. -It includes default behavior including model loading, preprocessing, -and operates on single image rather than batches. See its documentation for usage. - -You can also run inference directly like this: -``` -model.eval() -with torch.no_grad(): - outputs = model(inputs) -``` - -### Model Input Format - -Users can implement custom models that support any arbitrary input format. -Here we describe the standard input format that all builtin models support in detectron2. -They all take a `list[dict]` as the inputs. Each dict -corresponds to information about one image. - -The dict may contain the following keys: - -* "image": `Tensor` in (C, H, W) format. The meaning of channels are defined by `cfg.INPUT.FORMAT`. - Image normalization, if any, will be performed inside the model using - `cfg.MODEL.PIXEL_{MEAN,STD}`. -* "height", "width": the **desired** output height and width **in inference**, which is not necessarily the same - as the height or width of the `image` field. - For example, the `image` field contains the resized image, if resize is used as a preprocessing step. - But you may want the outputs to be in **original** resolution. - If provided, the model will produce output in this resolution, - rather than in the resolution of the `image` as input into the model. This is more efficient and accurate. -* "instances": an [Instances](../modules/structures.html#detectron2.structures.Instances) - object for training, with the following fields: - + "gt_boxes": a [Boxes](../modules/structures.html#detectron2.structures.Boxes) object storing N boxes, one for each instance. - + "gt_classes": `Tensor` of long type, a vector of N labels, in range [0, num_categories). - + "gt_masks": a [PolygonMasks](../modules/structures.html#detectron2.structures.PolygonMasks) - or [BitMasks](../modules/structures.html#detectron2.structures.BitMasks) object storing N masks, one for each instance. - + "gt_keypoints": a [Keypoints](../modules/structures.html#detectron2.structures.Keypoints) - object storing N keypoint sets, one for each instance. -* "sem_seg": `Tensor[int]` in (H, W) format. The semantic segmentation ground truth for training. - Values represent category labels starting from 0. -* "proposals": an [Instances](../modules/structures.html#detectron2.structures.Instances) - object used only in Fast R-CNN style models, with the following fields: - + "proposal_boxes": a [Boxes](../modules/structures.html#detectron2.structures.Boxes) object storing P proposal boxes. - + "objectness_logits": `Tensor`, a vector of P scores, one for each proposal. - -For inference of builtin models, only "image" key is required, and "width/height" are optional. - -We currently don't define standard input format for panoptic segmentation training, -because models now use custom formats produced by custom data loaders. - -#### How it connects to data loader: - -The output of the default [DatasetMapper]( ../modules/data.html#detectron2.data.DatasetMapper) is a dict -that follows the above format. -After the data loader performs batching, it becomes `list[dict]` which the builtin models support. - - -### Model Output Format - -When in training mode, the builtin models output a `dict[str->ScalarTensor]` with all the losses. - -When in inference mode, the builtin models output a `list[dict]`, one dict for each image. -Based on the tasks the model is doing, each dict may contain the following fields: - -* "instances": [Instances](../modules/structures.html#detectron2.structures.Instances) - object with the following fields: - * "pred_boxes": [Boxes](../modules/structures.html#detectron2.structures.Boxes) object storing N boxes, one for each detected instance. - * "scores": `Tensor`, a vector of N confidence scores. - * "pred_classes": `Tensor`, a vector of N labels in range [0, num_categories). - + "pred_masks": a `Tensor` of shape (N, H, W), masks for each detected instance. - + "pred_keypoints": a `Tensor` of shape (N, num_keypoint, 3). - Each row in the last dimension is (x, y, score). Confidence scores are larger than 0. -* "sem_seg": `Tensor` of (num_categories, H, W), the semantic segmentation prediction. -* "proposals": [Instances](../modules/structures.html#detectron2.structures.Instances) - object with the following fields: - * "proposal_boxes": [Boxes](../modules/structures.html#detectron2.structures.Boxes) - object storing N boxes. - * "objectness_logits": a torch vector of N confidence scores. -* "panoptic_seg": A tuple of `(pred: Tensor, segments_info: Optional[list[dict]])`. - The `pred` tensor has shape (H, W), containing the segment id of each pixel. - - * If `segments_info` exists, each dict describes one segment id in `pred` and has the following fields: - - * "id": the segment id - * "isthing": whether the segment is a thing or stuff - * "category_id": the category id of this segment. - - If a pixel's id does not exist in `segments_info`, it is considered to be void label - defined in [Panoptic Segmentation](https://arxiv.org/abs/1801.00868). - - * If `segments_info` is None, all pixel values in `pred` must be ≥ -1. - Pixels with value -1 are assigned void labels. - Otherwise, the category id of each pixel is obtained by - `category_id = pixel // metadata.label_divisor`. - - -### Partially execute a model: - -Sometimes you may want to obtain an intermediate tensor inside a model, -such as the input of certain layer, the output before post-processing. -Since there are typically hundreds of intermediate tensors, there isn't an API that provides you -the intermediate result you need. -You have the following options: - -1. Write a (sub)model. Following the [tutorial](./write-models.md), you can - rewrite a model component (e.g. a head of a model), such that it - does the same thing as the existing component, but returns the output - you need. -2. Partially execute a model. You can create the model as usual, - but use custom code to execute it instead of its `forward()`. For example, - the following code obtains mask features before mask head. - - ```python - images = ImageList.from_tensors(...) # preprocessed input tensor - model = build_model(cfg) - model.eval() - features = model.backbone(images.tensor) - proposals, _ = model.proposal_generator(images, features) - instances, _ = model.roi_heads(images, features, proposals) - mask_features = [features[f] for f in model.roi_heads.in_features] - mask_features = model.roi_heads.mask_pooler(mask_features, [x.pred_boxes for x in instances]) - ``` - -3. Use [forward hooks](https://pytorch.org/tutorials/beginner/former_torchies/nnft_tutorial.html#forward-and-backward-function-hooks). - Forward hooks can help you obtain inputs or outputs of a certain module. - If they are not exactly what you want, they can at least be used together with partial execution - to obtain other tensors. - -All options require you to read documentation and sometimes code -of the existing models to understand the internal logic, -in order to write code to obtain the internal tensors. diff --git a/spaces/yuan1615/EmpathyTTS/text/cleaners.py b/spaces/yuan1615/EmpathyTTS/text/cleaners.py deleted file mode 100644 index 70094a265a0c4fdb797e9ffef81167878120e06e..0000000000000000000000000000000000000000 --- a/spaces/yuan1615/EmpathyTTS/text/cleaners.py +++ /dev/null @@ -1,104 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - -import re -from unidecode import unidecode -from phonemizer import phonemize - - -# Regular expression matching whitespace: -_whitespace_re = re.compile(r'\s+') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def expand_numbers(text): - return normalize_numbers(text) - - -def lowercase(text): - return text.lower() - - -def collapse_whitespace(text): - return re.sub(_whitespace_re, ' ', text) - - -def convert_to_ascii(text): - return unidecode(text) - - -def basic_cleaners(text): - '''Basic pipeline that lowercases and collapses whitespace without transliteration.''' - text = lowercase(text) - text = collapse_whitespace(text) - return text - - -def transliteration_cleaners(text): - '''Pipeline for non-English text that transliterates to ASCII.''' - text = convert_to_ascii(text) - text = lowercase(text) - text = collapse_whitespace(text) - return text - - -def english_cleaners(text): - '''Pipeline for English text, including abbreviation expansion.''' - text = convert_to_ascii(text) - text = lowercase(text) - text = expand_abbreviations(text) - phonemes = phonemize(text, language='en-us', backend='espeak', strip=True) - phonemes = collapse_whitespace(phonemes) - return phonemes - - -def english_cleaners2(text): - '''Pipeline for English text, including abbreviation expansion. + punctuation + stress''' - text = convert_to_ascii(text) - text = lowercase(text) - text = expand_abbreviations(text) - phonemes = collapse_whitespace(text) - return phonemes - - -def mandarin_cleaners(text): - '''Pipeline for Cantonese text, including abbreviation expansion. + punctuation + stress''' - return text diff --git a/spaces/yufiofficial/MusicGenQ/tests/__init__.py b/spaces/yufiofficial/MusicGenQ/tests/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/yufiofficial/MusicGenQ/tests/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/yuhanbo/chat-gpt/public/serviceWorker.js b/spaces/yuhanbo/chat-gpt/public/serviceWorker.js deleted file mode 100644 index 585633fcb2fd522492fc821f5ff013745527a069..0000000000000000000000000000000000000000 --- a/spaces/yuhanbo/chat-gpt/public/serviceWorker.js +++ /dev/null @@ -1,24 +0,0 @@ -const CHATGPT_NEXT_WEB_CACHE = "chatgpt-next-web-cache"; - -self.addEventListener('activate', function (event) { - console.log('ServiceWorker activated.'); -}); - -self.addEventListener('install', function (event) { - event.waitUntil( - caches.open(CHATGPT_NEXT_WEB_CACHE) - .then(function (cache) { - return cache.addAll([ - ]); - }) - ); -}); - -self.addEventListener('fetch', function (event) { - event.respondWith( - caches.match(event.request) - .then(function (response) { - return response || fetch(event.request); - }) - ); -}); \ No newline at end of file diff --git a/spaces/yunfei0710/gpt-academic/docs/self_analysis.md b/spaces/yunfei0710/gpt-academic/docs/self_analysis.md deleted file mode 100644 index ebc2337194974bf210794df7d858889010fecf08..0000000000000000000000000000000000000000 --- a/spaces/yunfei0710/gpt-academic/docs/self_analysis.md +++ /dev/null @@ -1,378 +0,0 @@ -# chatgpt-academic项目自译解报告 -(Author补充:以下分析均由本项目调用ChatGPT一键生成,如果有不准确的地方,全怪GPT😄) - - -| 文件名 | 功能描述 | -| ------ | ------ | -| check_proxy.py | 检查代理有效性及地理位置 | -| colorful.py | 控制台打印彩色文字 | -| config.py | 配置和参数设置 | -| config_private.py | 私人配置和参数设置 | -| core_functional.py | 核心函数和参数设置 | -| crazy_functional.py | 高级功能插件集合 | -| main.py | 一个 Chatbot 程序,提供各种学术翻译、文本处理和其他查询服务 | -| multi_language.py | 识别和翻译不同语言 | -| theme.py | 自定义 gradio 应用程序主题 | -| toolbox.py | 工具类库,用于协助实现各种功能 | -| crazy_functions\crazy_functions_test.py | 测试 crazy_functions 中的各种函数 | -| crazy_functions\crazy_utils.py | 工具函数,用于字符串处理、异常检测、Markdown 格式转换等 | -| crazy_functions\Latex全文润色.py | 对整个 Latex 项目进行润色和纠错 | -| crazy_functions\Latex全文翻译.py | 对整个 Latex 项目进行翻译 | -| crazy_functions\\_\_init\_\_.py | 模块初始化文件,标识 `crazy_functions` 是一个包 | -| crazy_functions\下载arxiv论文翻译摘要.py | 下载 `arxiv` 论文的 PDF 文件,并提取摘要和翻译 | -| crazy_functions\代码重写为全英文_多线程.py | 将Python源代码文件中的中文内容转化为英文 | -| crazy_functions\图片生成.py | 根据激励文本使用GPT模型生成相应的图像 | -| crazy_functions\对话历史存档.py | 将每次对话记录写入Markdown格式的文件中 | -| crazy_functions\总结word文档.py | 对输入的word文档进行摘要生成 | -| crazy_functions\总结音视频.py | 对输入的音视频文件进行摘要生成 | -| crazy_functions\批量Markdown翻译.py | 将指定目录下的Markdown文件进行中英文翻译 | -| crazy_functions\批量总结PDF文档.py | 对PDF文件进行切割和摘要生成 | -| crazy_functions\批量总结PDF文档pdfminer.py | 对PDF文件进行文本内容的提取和摘要生成 | -| crazy_functions\批量翻译PDF文档_多线程.py | 将指定目录下的PDF文件进行中英文翻译 | -| crazy_functions\理解PDF文档内容.py | 对PDF文件进行摘要生成和问题解答 | -| crazy_functions\生成函数注释.py | 自动生成Python函数的注释 | -| crazy_functions\联网的ChatGPT.py | 使用网络爬虫和ChatGPT模型进行聊天回答 | -| crazy_functions\解析JupyterNotebook.py | 对Jupyter Notebook进行代码解析 | -| crazy_functions\解析项目源代码.py | 对指定编程语言的源代码进行解析 | -| crazy_functions\询问多个大语言模型.py | 使用多个大语言模型对输入进行处理和回复 | -| crazy_functions\读文章写摘要.py | 对论文进行解析和全文摘要生成 | -| crazy_functions\谷歌检索小助手.py | 提供谷歌学术搜索页面中相关文章的元数据信息。 | -| crazy_functions\高级功能函数模板.py | 使用Unsplash API发送相关图片以回复用户的输入。 | -| request_llm\bridge_all.py | 基于不同LLM模型进行对话。 | -| request_llm\bridge_chatglm.py | 使用ChatGLM模型生成回复,支持单线程和多线程方式。 | -| request_llm\bridge_chatgpt.py | 基于GPT模型完成对话。 | -| request_llm\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话,支持单线程和多线程方式。 | -| request_llm\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话,基于多进程和多线程方式。 | -| request_llm\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能,提供包括历史信息、参数调节等在内的多个功能选项。 | -| request_llm\bridge_moss.py | 加载Moss模型完成对话功能。 | -| request_llm\bridge_newbing.py | 使用Newbing聊天机器人进行对话,支持单线程和多线程方式。 | -| request_llm\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 | -| request_llm\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 | -| request_llm\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 | -| request_llm\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 | -| request_llm\edge_gpt_free.py | 实现聊天机器人API,采用aiohttp和httpx工具库。 | -| request_llm\test_llms.py | 对llm模型进行单元测试。 | - -## 接下来请你逐文件分析下面的工程[0/48] 请对下面的程序文件做一个概述: check_proxy.py - -这个文件主要包含了五个函数: - -1. `check_proxy`:用于检查代理的有效性及地理位置,输出代理配置和所在地信息。 - -2. `backup_and_download`:用于备份当前版本并下载新版本。 - -3. `patch_and_restart`:用于覆盖更新当前版本并重新启动程序。 - -4. `get_current_version`:用于获取当前程序的版本号。 - -5. `auto_update`:用于自动检查新版本并提示用户更新。如果用户选择更新,则备份并下载新版本,覆盖更新当前版本并重新启动程序。如果更新失败,则输出错误信息,并不会向用户进行任何提示。 - -还有一个没有函数名的语句`os.environ['no_proxy'] = '*'`,用于设置环境变量,避免代理网络产生意外污染。 - -此外,该文件导入了以下三个模块/函数: - -- `requests` -- `shutil` -- `os` - -## [1/48] 请对下面的程序文件做一个概述: colorful.py - -该文件是一个Python脚本,用于在控制台中打印彩色文字。该文件包含了一些函数,用于以不同颜色打印文本。其中,红色、绿色、黄色、蓝色、紫色、靛色分别以函数 print红、print绿、print黄、print蓝、print紫、print靛 的形式定义;亮红色、亮绿色、亮黄色、亮蓝色、亮紫色、亮靛色分别以 print亮红、print亮绿、print亮黄、print亮蓝、print亮紫、print亮靛 的形式定义。它们使用 ANSI Escape Code 将彩色输出从控制台突出显示。如果运行在 Linux 操作系统上,文件所执行的操作被留空;否则,该文件导入了 colorama 库并调用 init() 函数进行初始化。最后,通过一系列条件语句,该文件通过将所有彩色输出函数的名称重新赋值为 print 函数的名称来避免输出文件的颜色问题。 - -## [2/48] 请对下面的程序文件做一个概述: config.py - -这个程序文件是用来配置和参数设置的。它包含了许多设置,如API key,使用代理,线程数,默认模型,超时时间等等。此外,它还包含了一些高级功能,如URL重定向等。这些设置将会影响到程序的行为和性能。 - -## [3/48] 请对下面的程序文件做一个概述: config_private.py - -这个程序文件是一个Python脚本,文件名为config_private.py。其中包含以下变量的赋值: - -1. API_KEY:API密钥。 -2. USE_PROXY:是否应用代理。 -3. proxies:如果使用代理,则设置代理网络的协议(socks5/http)、地址(localhost)和端口(11284)。 -4. DEFAULT_WORKER_NUM:默认的工作线程数量。 -5. SLACK_CLAUDE_BOT_ID:Slack机器人ID。 -6. SLACK_CLAUDE_USER_TOKEN:Slack用户令牌。 - -## [4/48] 请对下面的程序文件做一个概述: core_functional.py - -这是一个名为core_functional.py的源代码文件,该文件定义了一个名为get_core_functions()的函数,该函数返回一个字典,该字典包含了各种学术翻译润色任务的说明和相关参数,如颜色、前缀、后缀等。这些任务包括英语学术润色、中文学术润色、查找语法错误、中译英、学术中英互译、英译中、找图片和参考文献转Bib。其中,一些任务还定义了预处理函数用于处理任务的输入文本。 - -## [5/48] 请对下面的程序文件做一个概述: crazy_functional.py - -此程序文件(crazy_functional.py)是一个函数插件集合,包含了多个函数插件的定义和调用。这些函数插件旨在提供一些高级功能,如解析项目源代码、批量翻译PDF文档和Latex全文润色等。其中一些插件还支持热更新功能,不需要重启程序即可生效。文件中的函数插件按照功能进行了分类(第一组和第二组),并且有不同的调用方式(作为按钮或下拉菜单)。 - -## [6/48] 请对下面的程序文件做一个概述: main.py - -这是一个Python程序文件,文件名为main.py。该程序包含一个名为main的函数,程序会自动运行该函数。程序要求已经安装了gradio、os等模块,会根据配置文件加载代理、model、API Key等信息。程序提供了Chatbot功能,实现了一个对话界面,用户可以输入问题,然后Chatbot可以回答问题或者提供相关功能。程序还包含了基础功能区、函数插件区、更换模型 & SysPrompt & 交互界面布局、备选输入区,用户可以在这些区域选择功能和插件进行使用。程序中还包含了一些辅助模块,如logging等。 - -## [7/48] 请对下面的程序文件做一个概述: multi_language.py - -该文件multi_language.py是用于将项目翻译成不同语言的程序。它包含了以下函数和变量:lru_file_cache、contains_chinese、split_list、map_to_json、read_map_from_json、advanced_split、trans、trans_json、step_1_core_key_translate、CACHE_FOLDER、blacklist、LANG、TransPrompt、cached_translation等。注释和文档字符串提供了有关程序的说明,例如如何使用该程序,如何修改“LANG”和“TransPrompt”变量等。 - -## [8/48] 请对下面的程序文件做一个概述: theme.py - -这是一个Python源代码文件,文件名为theme.py。此文件中定义了一个函数adjust_theme,其功能是自定义gradio应用程序的主题,包括调整颜色、字体、阴影等。如果允许,则添加一个看板娘。此文件还包括变量advanced_css,其中包含一些CSS样式,用于高亮显示代码和自定义聊天框样式。此文件还导入了get_conf函数和gradio库。 - -## [9/48] 请对下面的程序文件做一个概述: toolbox.py - -toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和小工具函数,用于协助实现聊天机器人所需的各种功能,包括文本处理、功能插件加载、异常检测、Markdown格式转换,文件读写等等。此外,该库还包含一些依赖、参数配置等信息。该库易于理解和维护。 - -## [10/48] 请对下面的程序文件做一个概述: crazy_functions\crazy_functions_test.py - -这个文件是一个Python测试模块,用于测试crazy_functions中的各种函数插件。这些函数包括:解析Python项目源代码、解析Cpp项目源代码、Latex全文润色、Markdown中译英、批量翻译PDF文档、谷歌检索小助手、总结word文档、下载arxiv论文并翻译摘要、联网回答问题、和解析Jupyter Notebooks。对于每个函数插件,都有一个对应的测试函数来进行测试。 - -## [11/48] 请对下面的程序文件做一个概述: crazy_functions\crazy_utils.py - -这个Python文件中包括了两个函数: - -1. `input_clipping`: 该函数用于裁剪输入文本长度,使其不超过一定的限制。 -2. `request_gpt_model_in_new_thread_with_ui_alive`: 该函数用于请求 GPT 模型并保持用户界面的响应,支持多线程和实时更新用户界面。 - -这两个函数都依赖于从 `toolbox` 和 `request_llm` 中导入的一些工具函数。函数的输入和输出有详细的描述文档。 - -## [12/48] 请对下面的程序文件做一个概述: crazy_functions\Latex全文润色.py - -这是一个Python程序文件,文件名为crazy_functions\Latex全文润色.py。文件包含了一个PaperFileGroup类和三个函数Latex英文润色,Latex中文润色和Latex英文纠错。程序使用了字符串处理、正则表达式、文件读写、多线程等技术,主要作用是对整个Latex项目进行润色和纠错。其中润色和纠错涉及到了对文本的语法、清晰度和整体可读性等方面的提升。此外,该程序还参考了第三方库,并封装了一些工具函数。 - -## [13/48] 请对下面的程序文件做一个概述: crazy_functions\Latex全文翻译.py - -这个文件包含两个函数 `Latex英译中` 和 `Latex中译英`,它们都会对整个Latex项目进行翻译。这个文件还包含一个类 `PaperFileGroup`,它拥有一个方法 `run_file_split`,用于把长文本文件分成多个短文件。其中使用了工具库 `toolbox` 中的一些函数和从 `request_llm` 中导入了 `model_info`。接下来的函数把文件读取进来,把它们的注释删除,进行分割,并进行翻译。这个文件还包括了一些异常处理和界面更新的操作。 - -## [14/48] 请对下面的程序文件做一个概述: crazy_functions\__init__.py - -这是一个Python模块的初始化文件(__init__.py),命名为"crazy_functions"。该模块包含了一些疯狂的函数,但该文件并没有实现这些函数,而是作为一个包(package)来导入其它的Python模块以实现这些函数。在该文件中,没有定义任何类或函数,它唯一的作用就是标识"crazy_functions"模块是一个包。 - -## [15/48] 请对下面的程序文件做一个概述: crazy_functions\下载arxiv论文翻译摘要.py - -这是一个 Python 程序文件,文件名为 `下载arxiv论文翻译摘要.py`。程序包含多个函数,其中 `下载arxiv论文并翻译摘要` 函数的作用是下载 `arxiv` 论文的 PDF 文件,提取摘要并使用 GPT 对其进行翻译。其他函数包括用于下载 `arxiv` 论文的 `download_arxiv_` 函数和用于获取文章信息的 `get_name` 函数,其中涉及使用第三方库如 requests, BeautifulSoup 等。该文件还包含一些用于调试和存储文件的代码段。 - -## [16/48] 请对下面的程序文件做一个概述: crazy_functions\代码重写为全英文_多线程.py - -该程序文件是一个多线程程序,主要功能是将指定目录下的所有Python代码文件中的中文内容转化为英文,并将转化后的代码存储到一个新的文件中。其中,程序使用了GPT-3等技术进行中文-英文的转化,同时也进行了一些Token限制下的处理,以防止程序发生错误。程序在执行过程中还会输出一些提示信息,并将所有转化过的代码文件存储到指定目录下。在程序执行结束后,还会生成一个任务执行报告,记录程序运行的详细信息。 - -## [17/48] 请对下面的程序文件做一个概述: crazy_functions\图片生成.py - -该程序文件提供了一个用于生成图像的函数`图片生成`。函数实现的过程中,会调用`gen_image`函数来生成图像,并返回图像生成的网址和本地文件地址。函数有多个参数,包括`prompt`(激励文本)、`llm_kwargs`(GPT模型的参数)、`plugin_kwargs`(插件模型的参数)等。函数核心代码使用了`requests`库向OpenAI API请求图像,并做了简单的处理和保存。函数还更新了交互界面,清空聊天历史并显示正在生成图像的消息和最终的图像网址和预览。 - -## [18/48] 请对下面的程序文件做一个概述: crazy_functions\对话历史存档.py - -这个文件是名为crazy_functions\对话历史存档.py的Python程序文件,包含了4个函数: - -1. write_chat_to_file(chatbot, history=None, file_name=None):用来将对话记录以Markdown格式写入文件中,并且生成文件名,如果没指定文件名则用当前时间。写入完成后将文件路径打印出来。 - -2. gen_file_preview(file_name):从传入的文件中读取内容,解析出对话历史记录并返回前100个字符,用于文件预览。 - -3. read_file_to_chat(chatbot, history, file_name):从传入的文件中读取内容,解析出对话历史记录并更新聊天显示框。 - -4. 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):一个主要函数,用于保存当前对话记录并提醒用户。如果用户希望加载历史记录,则调用read_file_to_chat()来更新聊天显示框。如果用户希望删除历史记录,调用删除所有本地对话历史记录()函数完成删除操作。 - -## [19/48] 请对下面的程序文件做一个概述: crazy_functions\总结word文档.py - -该程序文件实现了一个总结Word文档的功能,使用Python的docx库读取docx格式的文件,使用pywin32库读取doc格式的文件。程序会先根据传入的txt参数搜索需要处理的文件,并逐个解析其中的内容,将内容拆分为指定长度的文章片段,然后使用另一个程序文件中的request_gpt_model_in_new_thread_with_ui_alive函数进行中文概述。最后将所有的总结结果写入一个文件中,并在界面上进行展示。 - -## [20/48] 请对下面的程序文件做一个概述: crazy_functions\总结音视频.py - -该程序文件包括两个函数:split_audio_file()和AnalyAudio(),并且导入了一些必要的库并定义了一些工具函数。split_audio_file用于将音频文件分割成多个时长相等的片段,返回一个包含所有切割音频片段文件路径的列表,而AnalyAudio用来分析音频文件,通过调用whisper模型进行音频转文字并使用GPT模型对音频内容进行概述,最终将所有总结结果写入结果文件中。 - -## [21/48] 请对下面的程序文件做一个概述: crazy_functions\批量Markdown翻译.py - -该程序文件名为`批量Markdown翻译.py`,包含了以下功能:读取Markdown文件,将长文本分离开来,将Markdown文件进行翻译(英译中和中译英),整理结果并退出。程序使用了多线程以提高效率。程序使用了`tiktoken`依赖库,可能需要额外安装。文件中还有一些其他的函数和类,但与文件名所描述的功能无关。 - -## [22/48] 请对下面的程序文件做一个概述: crazy_functions\批量总结PDF文档.py - -该文件是一个Python脚本,名为crazy_functions\批量总结PDF文档.py。在导入了一系列库和工具函数后,主要定义了5个函数,其中包括一个错误处理装饰器(@CatchException),用于批量总结PDF文档。该函数主要实现对PDF文档的解析,并调用模型生成中英文摘要。 - -## [23/48] 请对下面的程序文件做一个概述: crazy_functions\批量总结PDF文档pdfminer.py - -该程序文件是一个用于批量总结PDF文档的函数插件,使用了pdfminer插件和BeautifulSoup库来提取PDF文档的文本内容,对每个PDF文件分别进行处理并生成中英文摘要。同时,该程序文件还包括一些辅助工具函数和处理异常的装饰器。 - -## [24/48] 请对下面的程序文件做一个概述: crazy_functions\批量翻译PDF文档_多线程.py - -这个程序文件是一个Python脚本,文件名为“批量翻译PDF文档_多线程.py”。它主要使用了“toolbox”、“request_gpt_model_in_new_thread_with_ui_alive”、“request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency”、“colorful”等Python库和自定义的模块“crazy_utils”的一些函数。程序实现了一个批量翻译PDF文档的功能,可以自动解析PDF文件中的基础信息,递归地切割PDF文件,翻译和处理PDF论文中的所有内容,并生成相应的翻译结果文件(包括md文件和html文件)。功能比较复杂,其中需要调用多个函数和依赖库,涉及到多线程操作和UI更新。文件中有详细的注释和变量命名,代码比较清晰易读。 - -## [25/48] 请对下面的程序文件做一个概述: crazy_functions\理解PDF文档内容.py - -该程序文件实现了一个名为“理解PDF文档内容”的函数,该函数可以为输入的PDF文件提取摘要以及正文各部分的主要内容,并在提取过程中根据上下文关系进行学术性问题解答。该函数依赖于多个辅助函数和第三方库,并在执行过程中针对可能出现的异常进行了处理。 - -## [26/48] 请对下面的程序文件做一个概述: crazy_functions\生成函数注释.py - -该程序文件是一个Python模块文件,文件名为“生成函数注释.py”,定义了两个函数:一个是生成函数注释的主函数“生成函数注释”,另一个是通过装饰器实现异常捕捉的函数“批量生成函数注释”。该程序文件依赖于“toolbox”和本地“crazy_utils”模块,并且在运行时使用了多线程技术和GPT模型来生成注释。函数生成的注释结果使用Markdown表格输出并写入历史记录文件。 - -## [27/48] 请对下面的程序文件做一个概述: crazy_functions\联网的ChatGPT.py - -这是一个名为`联网的ChatGPT.py`的Python程序文件,其中定义了一个函数`连接网络回答问题`。该函数通过爬取搜索引擎的结果和访问网页来综合回答给定的问题,并使用ChatGPT模型完成回答。此外,该文件还包括一些工具函数,例如从网页中抓取文本和使用代理访问网页。 - -## [28/48] 请对下面的程序文件做一个概述: crazy_functions\解析JupyterNotebook.py - -这个程序文件包含了两个函数: `parseNotebook()`和`解析ipynb文件()`,并且引入了一些工具函数和类。`parseNotebook()`函数将Jupyter Notebook文件解析为文本代码块,`解析ipynb文件()`函数则用于解析多个Jupyter Notebook文件,使用`parseNotebook()`解析每个文件和一些其他的处理。函数中使用了多线程处理输入和输出,并且将结果写入到文件中。 - -## [29/48] 请对下面的程序文件做一个概述: crazy_functions\解析项目源代码.py - -这是一个源代码分析的Python代码文件,其中定义了多个函数,包括解析一个Python项目、解析一个C项目、解析一个C项目的头文件和解析一个Java项目等。其中解析源代码新函数是实际处理源代码分析并生成报告的函数。该函数首先会逐个读取传入的源代码文件,生成对应的请求内容,通过多线程发送到chatgpt进行分析。然后将结果写入文件,并进行汇总分析。最后通过调用update_ui函数刷新界面,完整实现了源代码的分析。 - -## [30/48] 请对下面的程序文件做一个概述: crazy_functions\询问多个大语言模型.py - -该程序文件包含两个函数:同时问询()和同时问询_指定模型(),它们的作用是使用多个大语言模型同时对用户输入进行处理,返回对应模型的回复结果。同时问询()会默认使用ChatGPT和ChatGLM两个模型,而同时问询_指定模型()则可以指定要使用的模型。该程序文件还引用了其他的模块和函数库。 - -## [31/48] 请对下面的程序文件做一个概述: crazy_functions\读文章写摘要.py - -这个程序文件是一个Python模块,文件名为crazy_functions\读文章写摘要.py。该模块包含了两个函数,其中主要函数是"读文章写摘要"函数,其实现了解析给定文件夹中的tex文件,对其中每个文件的内容进行摘要生成,并根据各论文片段的摘要,最终生成全文摘要。第二个函数是"解析Paper"函数,用于解析单篇论文文件。其中用到了一些工具函数和库,如update_ui、CatchException、report_execption、write_results_to_file等。 - -## [32/48] 请对下面的程序文件做一个概述: crazy_functions\谷歌检索小助手.py - -该文件是一个Python模块,文件名为“谷歌检索小助手.py”。该模块包含两个函数,一个是“get_meta_information()”,用于从提供的网址中分析出所有相关的学术文献的元数据信息;另一个是“谷歌检索小助手()”,是主函数,用于分析用户提供的谷歌学术搜索页面中出现的文章,并提取相关信息。其中,“谷歌检索小助手()”函数依赖于“get_meta_information()”函数,并调用了其他一些Python模块,如“arxiv”、“math”、“bs4”等。 - -## [33/48] 请对下面的程序文件做一个概述: crazy_functions\高级功能函数模板.py - -该程序文件定义了一个名为高阶功能模板函数的函数,该函数接受多个参数,包括输入的文本、gpt模型参数、插件模型参数、聊天显示框的句柄、聊天历史等,并利用送出请求,使用 Unsplash API 发送相关图片。其中,为了避免输入溢出,函数会在开始时清空历史。函数也有一些 UI 更新的语句。该程序文件还依赖于其他两个模块:CatchException 和 update_ui,以及一个名为 request_gpt_model_in_new_thread_with_ui_alive 的来自 crazy_utils 模块(应该是自定义的工具包)的函数。 - -## [34/48] 请对下面的程序文件做一个概述: request_llm\bridge_all.py - -该文件包含两个函数:predict和predict_no_ui_long_connection,用于基于不同的LLM模型进行对话。该文件还包含一个lazyloadTiktoken类和一个LLM_CATCH_EXCEPTION修饰器函数。其中lazyloadTiktoken类用于懒加载模型的tokenizer,LLM_CATCH_EXCEPTION用于错误处理。整个文件还定义了一些全局变量和模型信息字典,用于引用和配置LLM模型。 - -## [35/48] 请对下面的程序文件做一个概述: request_llm\bridge_chatglm.py - -这是一个Python程序文件,名为`bridge_chatglm.py`,其中定义了一个名为`GetGLMHandle`的类和三个方法:`predict_no_ui_long_connection`、 `predict`和 `stream_chat`。该文件依赖于多个Python库,如`transformers`和`sentencepiece`。该文件实现了一个聊天机器人,使用ChatGLM模型来生成回复,支持单线程和多线程方式。程序启动时需要加载ChatGLM的模型和tokenizer,需要一段时间。在配置文件`config.py`中设置参数会影响模型的内存和显存使用,因此程序可能会导致低配计算机卡死。 - -## [36/48] 请对下面的程序文件做一个概述: request_llm\bridge_chatgpt.py - -该文件为 Python 代码文件,文件名为 request_llm\bridge_chatgpt.py。该代码文件主要提供三个函数:predict、predict_no_ui和 predict_no_ui_long_connection,用于发送至 chatGPT 并等待回复,获取输出。该代码文件还包含一些辅助函数,用于处理连接异常、生成 HTTP 请求等。该文件的代码架构清晰,使用了多个自定义函数和模块。 - -## [37/48] 请对下面的程序文件做一个概述: request_llm\bridge_jittorllms_llama.py - -该代码文件实现了一个聊天机器人,其中使用了 JittorLLMs 模型。主要包括以下几个部分: -1. GetGLMHandle 类:一个进程类,用于加载 JittorLLMs 模型并接收并处理请求。 -2. predict_no_ui_long_connection 函数:一个多线程方法,用于在后台运行聊天机器人。 -3. predict 函数:一个单线程方法,用于在前端页面上交互式调用聊天机器人,以获取用户输入并返回相应的回复。 - -这个文件中还有一些辅助函数和全局变量,例如 importlib、time、threading 等。 - -## [38/48] 请对下面的程序文件做一个概述: request_llm\bridge_jittorllms_pangualpha.py - -这个文件是为了实现使用jittorllms(一种机器学习模型)来进行聊天功能的代码。其中包括了模型加载、模型的参数加载、消息的收发等相关操作。其中使用了多进程和多线程来提高性能和效率。代码中还包括了处理依赖关系的函数和预处理函数等。 - -## [39/48] 请对下面的程序文件做一个概述: request_llm\bridge_jittorllms_rwkv.py - -这个文件是一个Python程序,文件名为request_llm\bridge_jittorllms_rwkv.py。它依赖transformers、time、threading、importlib、multiprocessing等库。在文件中,通过定义GetGLMHandle类加载jittorllms模型参数和定义stream_chat方法来实现与jittorllms模型的交互。同时,该文件还定义了predict_no_ui_long_connection和predict方法来处理历史信息、调用jittorllms模型、接收回复信息并输出结果。 - -## [40/48] 请对下面的程序文件做一个概述: request_llm\bridge_moss.py - -该文件为一个Python源代码文件,文件名为 request_llm\bridge_moss.py。代码定义了一个 GetGLMHandle 类和两个函数 predict_no_ui_long_connection 和 predict。 - -GetGLMHandle 类继承自Process类(多进程),主要功能是启动一个子进程并加载 MOSS 模型参数,通过 Pipe 进行主子进程的通信。该类还定义了 check_dependency、moss_init、run 和 stream_chat 等方法,其中 check_dependency 和 moss_init 是子进程的初始化方法,run 是子进程运行方法,stream_chat 实现了主进程和子进程的交互过程。 - -函数 predict_no_ui_long_connection 是多线程方法,调用 GetGLMHandle 类加载 MOSS 参数后使用 stream_chat 实现主进程和子进程的交互过程。 - -函数 predict 是单线程方法,通过调用 update_ui 将交互过程中 MOSS 的回复实时更新到UI(User Interface)中,并执行一个 named function(additional_fn)指定的函数对输入进行预处理。 - -## [41/48] 请对下面的程序文件做一个概述: request_llm\bridge_newbing.py - -这是一个名为`bridge_newbing.py`的程序文件,包含三个部分: - -第一部分使用from语句导入了`edge_gpt`模块的`NewbingChatbot`类。 - -第二部分定义了一个名为`NewBingHandle`的继承自进程类的子类,该类会检查依赖性并启动进程。同时,该部分还定义了一个名为`predict_no_ui_long_connection`的多线程方法和一个名为`predict`的单线程方法,用于与NewBing进行通信。 - -第三部分定义了一个名为`newbing_handle`的全局变量,并导出了`predict_no_ui_long_connection`和`predict`这两个方法,以供其他程序可以调用。 - -## [42/48] 请对下面的程序文件做一个概述: request_llm\bridge_newbingfree.py - -这个Python文件包含了三部分内容。第一部分是来自edge_gpt_free.py文件的聊天机器人程序。第二部分是子进程Worker,用于调用主体。第三部分提供了两个函数:predict_no_ui_long_connection和predict用于调用NewBing聊天机器人和返回响应。其中predict函数还提供了一些参数用于控制聊天机器人的回复和更新UI界面。 - -## [43/48] 请对下面的程序文件做一个概述: request_llm\bridge_stackclaude.py - -这是一个Python源代码文件,文件名为request_llm\bridge_stackclaude.py。代码分为三个主要部分: - -第一部分定义了Slack API Client类,实现Slack消息的发送、接收、循环监听,用于与Slack API进行交互。 - -第二部分定义了ClaudeHandle类,继承Process类,用于创建子进程Worker,调用主体,实现Claude与用户交互的功能。 - -第三部分定义了predict_no_ui_long_connection和predict两个函数,主要用于通过调用ClaudeHandle对象的stream_chat方法来获取Claude的回复,并更新ui以显示相关信息。其中predict函数采用单线程方法,而predict_no_ui_long_connection函数使用多线程方法。 - -## [44/48] 请对下面的程序文件做一个概述: request_llm\bridge_tgui.py - -该文件是一个Python代码文件,名为request_llm\bridge_tgui.py。它包含了一些函数用于与chatbot UI交互,并通过WebSocket协议与远程LLM模型通信完成文本生成任务,其中最重要的函数是predict()和predict_no_ui_long_connection()。这个程序还有其他的辅助函数,如random_hash()。整个代码文件在协作的基础上完成了一次修改。 - -## [45/48] 请对下面的程序文件做一个概述: request_llm\edge_gpt.py - -该文件是一个用于调用Bing chatbot API的Python程序,它由多个类和辅助函数构成,可以根据给定的对话连接在对话中提出问题,使用websocket与远程服务通信。程序实现了一个聊天机器人,可以为用户提供人工智能聊天。 - -## [46/48] 请对下面的程序文件做一个概述: request_llm\edge_gpt_free.py - -该代码文件为一个会话API,可通过Chathub发送消息以返回响应。其中使用了 aiohttp 和 httpx 库进行网络请求并发送。代码中包含了一些函数和常量,多数用于生成请求数据或是请求头信息等。同时该代码文件还包含了一个 Conversation 类,调用该类可实现对话交互。 - -## [47/48] 请对下面的程序文件做一个概述: request_llm\test_llms.py - -这个文件是用于对llm模型进行单元测试的Python程序。程序导入一个名为"request_llm.bridge_newbingfree"的模块,然后三次使用该模块中的predict_no_ui_long_connection()函数进行预测,并输出结果。此外,还有一些注释掉的代码段,这些代码段也是关于模型预测的。 - -## 用一张Markdown表格简要描述以下文件的功能: -check_proxy.py, colorful.py, config.py, config_private.py, core_functional.py, crazy_functional.py, main.py, multi_language.py, theme.py, toolbox.py, crazy_functions\crazy_functions_test.py, crazy_functions\crazy_utils.py, crazy_functions\Latex全文润色.py, crazy_functions\Latex全文翻译.py, crazy_functions\__init__.py, crazy_functions\下载arxiv论文翻译摘要.py。根据以上分析,用一句话概括程序的整体功能。 - -| 文件名 | 功能描述 | -| ------ | ------ | -| check_proxy.py | 检查代理有效性及地理位置 | -| colorful.py | 控制台打印彩色文字 | -| config.py | 配置和参数设置 | -| config_private.py | 私人配置和参数设置 | -| core_functional.py | 核心函数和参数设置 | -| crazy_functional.py | 高级功能插件集合 | -| main.py | 一个 Chatbot 程序,提供各种学术翻译、文本处理和其他查询服务 | -| multi_language.py | 识别和翻译不同语言 | -| theme.py | 自定义 gradio 应用程序主题 | -| toolbox.py | 工具类库,用于协助实现各种功能 | -| crazy_functions\crazy_functions_test.py | 测试 crazy_functions 中的各种函数 | -| crazy_functions\crazy_utils.py | 工具函数,用于字符串处理、异常检测、Markdown 格式转换等 | -| crazy_functions\Latex全文润色.py | 对整个 Latex 项目进行润色和纠错 | -| crazy_functions\Latex全文翻译.py | 对整个 Latex 项目进行翻译 | -| crazy_functions\__init__.py | 模块初始化文件,标识 `crazy_functions` 是一个包 | -| crazy_functions\下载arxiv论文翻译摘要.py | 下载 `arxiv` 论文的 PDF 文件,并提取摘要和翻译 | - -这些程序源文件提供了基础的文本和语言处理功能、工具函数和高级插件,使 Chatbot 能够处理各种复杂的学术文本问题,包括润色、翻译、搜索、下载、解析等。 - -## 用一张Markdown表格简要描述以下文件的功能: -crazy_functions\代码重写为全英文_多线程.py, crazy_functions\图片生成.py, crazy_functions\对话历史存档.py, crazy_functions\总结word文档.py, crazy_functions\总结音视频.py, crazy_functions\批量Markdown翻译.py, crazy_functions\批量总结PDF文档.py, crazy_functions\批量总结PDF文档pdfminer.py, crazy_functions\批量翻译PDF文档_多线程.py, crazy_functions\理解PDF文档内容.py, crazy_functions\生成函数注释.py, crazy_functions\联网的ChatGPT.py, crazy_functions\解析JupyterNotebook.py, crazy_functions\解析项目源代码.py, crazy_functions\询问多个大语言模型.py, crazy_functions\读文章写摘要.py。根据以上分析,用一句话概括程序的整体功能。 - -| 文件名 | 功能简述 | -| --- | --- | -| 代码重写为全英文_多线程.py | 将Python源代码文件中的中文内容转化为英文 | -| 图片生成.py | 根据激励文本使用GPT模型生成相应的图像 | -| 对话历史存档.py | 将每次对话记录写入Markdown格式的文件中 | -| 总结word文档.py | 对输入的word文档进行摘要生成 | -| 总结音视频.py | 对输入的音视频文件进行摘要生成 | -| 批量Markdown翻译.py | 将指定目录下的Markdown文件进行中英文翻译 | -| 批量总结PDF文档.py | 对PDF文件进行切割和摘要生成 | -| 批量总结PDF文档pdfminer.py | 对PDF文件进行文本内容的提取和摘要生成 | -| 批量翻译PDF文档_多线程.py | 将指定目录下的PDF文件进行中英文翻译 | -| 理解PDF文档内容.py | 对PDF文件进行摘要生成和问题解答 | -| 生成函数注释.py | 自动生成Python函数的注释 | -| 联网的ChatGPT.py | 使用网络爬虫和ChatGPT模型进行聊天回答 | -| 解析JupyterNotebook.py | 对Jupyter Notebook进行代码解析 | -| 解析项目源代码.py | 对指定编程语言的源代码进行解析 | -| 询问多个大语言模型.py | 使用多个大语言模型对输入进行处理和回复 | -| 读文章写摘要.py | 对论文进行解析和全文摘要生成 | - -概括程序的整体功能:提供了一系列处理文本、文件和代码的功能,使用了各类语言模型、多线程、网络请求和数据解析技术来提高效率和精度。 - -## 用一张Markdown表格简要描述以下文件的功能: -crazy_functions\谷歌检索小助手.py, crazy_functions\高级功能函数模板.py, request_llm\bridge_all.py, request_llm\bridge_chatglm.py, request_llm\bridge_chatgpt.py, request_llm\bridge_jittorllms_llama.py, request_llm\bridge_jittorllms_pangualpha.py, request_llm\bridge_jittorllms_rwkv.py, request_llm\bridge_moss.py, request_llm\bridge_newbing.py, request_llm\bridge_newbingfree.py, request_llm\bridge_stackclaude.py, request_llm\bridge_tgui.py, request_llm\edge_gpt.py, request_llm\edge_gpt_free.py, request_llm\test_llms.py。根据以上分析,用一句话概括程序的整体功能。 - -| 文件名 | 功能描述 | -| --- | --- | -| crazy_functions\谷歌检索小助手.py | 提供谷歌学术搜索页面中相关文章的元数据信息。 | -| crazy_functions\高级功能函数模板.py | 使用Unsplash API发送相关图片以回复用户的输入。 | -| request_llm\bridge_all.py | 基于不同LLM模型进行对话。 | -| request_llm\bridge_chatglm.py | 使用ChatGLM模型生成回复,支持单线程和多线程方式。 | -| request_llm\bridge_chatgpt.py | 基于GPT模型完成对话。 | -| request_llm\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话,支持单线程和多线程方式。 | -| request_llm\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话,基于多进程和多线程方式。 | -| request_llm\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能,提供包括历史信息、参数调节等在内的多个功能选项。 | -| request_llm\bridge_moss.py | 加载Moss模型完成对话功能。 | -| request_llm\bridge_newbing.py | 使用Newbing聊天机器人进行对话,支持单线程和多线程方式。 | -| request_llm\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 | -| request_llm\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 | -| request_llm\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 | -| request_llm\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 | -| request_llm\edge_gpt_free.py | 实现聊天机器人API,采用aiohttp和httpx工具库。 | -| request_llm\test_llms.py | 对llm模型进行单元测试。 | -| 程序整体功能 | 实现不同种类的聊天机器人,可以根据输入进行文本生成。 | diff --git a/spaces/zhang-wei-jian/docker/node_modules/nopt/bin/nopt.js b/spaces/zhang-wei-jian/docker/node_modules/nopt/bin/nopt.js deleted file mode 100644 index df90c729af693401302fcbfd74ba1b792e817de3..0000000000000000000000000000000000000000 --- a/spaces/zhang-wei-jian/docker/node_modules/nopt/bin/nopt.js +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env node -var nopt = require("../lib/nopt") - , types = { num: Number - , bool: Boolean - , help: Boolean - , list: Array - , "num-list": [Number, Array] - , "str-list": [String, Array] - , "bool-list": [Boolean, Array] - , str: String } - , shorthands = { s: [ "--str", "astring" ] - , b: [ "--bool" ] - , nb: [ "--no-bool" ] - , tft: [ "--bool-list", "--no-bool-list", "--bool-list", "true" ] - , "?": ["--help"] - , h: ["--help"] - , H: ["--help"] - , n: [ "--num", "125" ] } - , parsed = nopt( types - , shorthands - , process.argv - , 2 ) - -console.log("parsed", parsed) - -if (parsed.help) { - console.log("") - console.log("nopt cli tester") - console.log("") - console.log("types") - console.log(Object.keys(types).map(function M (t) { - var type = types[t] - if (Array.isArray(type)) { - return [t, type.map(function (type) { return type.name })] - } - return [t, type && type.name] - }).reduce(function (s, i) { - s[i[0]] = i[1] - return s - }, {})) - console.log("") - console.log("shorthands") - console.log(shorthands) -} diff --git a/spaces/zhaoys/wfms-kuiwenc/src/pages/api/proxy.ts b/spaces/zhaoys/wfms-kuiwenc/src/pages/api/proxy.ts deleted file mode 100644 index 6682043259862e075cecdefddc3915c7d2741664..0000000000000000000000000000000000000000 --- a/spaces/zhaoys/wfms-kuiwenc/src/pages/api/proxy.ts +++ /dev/null @@ -1,34 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { fetch, debug } from '@/lib/isomorphic' -import { createHeaders } from '@/lib/utils' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const { url, headers = {}, method = 'GET', body } = req.body - if (!url) { - return res.end('{}') - } - Object.assign(headers, createHeaders(req.cookies)) - const id = headers['x-forwarded-for'] - debug(id, method, url, headers, body ?? '') - const response = await fetch(url, { - headers, - method, - body, - redirect: 'manual' - }) - const text = await response.text() - res.writeHead(200, { - 'Content-Type': 'application/text; charset=UTF-8', - 'x-url': response.url, - 'x-status': response.status, - }) - res.end(text) - } catch (e) { - console.log(e) - res.end(String(e)) - return - } -} diff --git a/spaces/zhenwusw/JoJoGAN/e4e/training/ranger.py b/spaces/zhenwusw/JoJoGAN/e4e/training/ranger.py deleted file mode 100644 index 3d63264dda6df0ee40cac143440f0b5f8977a9ad..0000000000000000000000000000000000000000 --- a/spaces/zhenwusw/JoJoGAN/e4e/training/ranger.py +++ /dev/null @@ -1,164 +0,0 @@ -# Ranger deep learning optimizer - RAdam + Lookahead + Gradient Centralization, combined into one optimizer. - -# https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer -# and/or -# https://github.com/lessw2020/Best-Deep-Learning-Optimizers - -# Ranger has now been used to capture 12 records on the FastAI leaderboard. - -# This version = 20.4.11 - -# Credits: -# Gradient Centralization --> https://arxiv.org/abs/2004.01461v2 (a new optimization technique for DNNs), github: https://github.com/Yonghongwei/Gradient-Centralization -# RAdam --> https://github.com/LiyuanLucasLiu/RAdam -# Lookahead --> rewritten by lessw2020, but big thanks to Github @LonePatient and @RWightman for ideas from their code. -# Lookahead paper --> MZhang,G Hinton https://arxiv.org/abs/1907.08610 - -# summary of changes: -# 4/11/20 - add gradient centralization option. Set new testing benchmark for accuracy with it, toggle with use_gc flag at init. -# full code integration with all updates at param level instead of group, moves slow weights into state dict (from generic weights), -# supports group learning rates (thanks @SHolderbach), fixes sporadic load from saved model issues. -# changes 8/31/19 - fix references to *self*.N_sma_threshold; -# changed eps to 1e-5 as better default than 1e-8. - -import math -import torch -from torch.optim.optimizer import Optimizer - - -class Ranger(Optimizer): - - def __init__(self, params, lr=1e-3, # lr - alpha=0.5, k=6, N_sma_threshhold=5, # Ranger options - betas=(.95, 0.999), eps=1e-5, weight_decay=0, # Adam options - use_gc=True, gc_conv_only=False - # Gradient centralization on or off, applied to conv layers only or conv + fc layers - ): - - # parameter checks - if not 0.0 <= alpha <= 1.0: - raise ValueError(f'Invalid slow update rate: {alpha}') - if not 1 <= k: - raise ValueError(f'Invalid lookahead steps: {k}') - if not lr > 0: - raise ValueError(f'Invalid Learning Rate: {lr}') - if not eps > 0: - raise ValueError(f'Invalid eps: {eps}') - - # parameter comments: - # beta1 (momentum) of .95 seems to work better than .90... - # N_sma_threshold of 5 seems better in testing than 4. - # In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you. - - # prep defaults and init torch.optim base - defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas, N_sma_threshhold=N_sma_threshhold, - eps=eps, weight_decay=weight_decay) - super().__init__(params, defaults) - - # adjustable threshold - self.N_sma_threshhold = N_sma_threshhold - - # look ahead params - - self.alpha = alpha - self.k = k - - # radam buffer for state - self.radam_buffer = [[None, None, None] for ind in range(10)] - - # gc on or off - self.use_gc = use_gc - - # level of gradient centralization - self.gc_gradient_threshold = 3 if gc_conv_only else 1 - - def __setstate__(self, state): - super(Ranger, self).__setstate__(state) - - def step(self, closure=None): - loss = None - - # Evaluate averages and grad, update param tensors - for group in self.param_groups: - - for p in group['params']: - if p.grad is None: - continue - grad = p.grad.data.float() - - if grad.is_sparse: - raise RuntimeError('Ranger optimizer does not support sparse gradients') - - p_data_fp32 = p.data.float() - - state = self.state[p] # get state dict for this param - - if len(state) == 0: # if first time to run...init dictionary with our desired entries - # if self.first_run_check==0: - # self.first_run_check=1 - # print("Initializing slow buffer...should not see this at load from saved model!") - state['step'] = 0 - state['exp_avg'] = torch.zeros_like(p_data_fp32) - state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) - - # look ahead weight storage now in state dict - state['slow_buffer'] = torch.empty_like(p.data) - state['slow_buffer'].copy_(p.data) - - else: - state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) - state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) - - # begin computations - exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] - beta1, beta2 = group['betas'] - - # GC operation for Conv layers and FC layers - if grad.dim() > self.gc_gradient_threshold: - grad.add_(-grad.mean(dim=tuple(range(1, grad.dim())), keepdim=True)) - - state['step'] += 1 - - # compute variance mov avg - exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) - # compute mean moving avg - exp_avg.mul_(beta1).add_(1 - beta1, grad) - - buffered = self.radam_buffer[int(state['step'] % 10)] - - if state['step'] == buffered[0]: - N_sma, step_size = buffered[1], buffered[2] - else: - buffered[0] = state['step'] - beta2_t = beta2 ** state['step'] - N_sma_max = 2 / (1 - beta2) - 1 - N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) - buffered[1] = N_sma - if N_sma > self.N_sma_threshhold: - step_size = math.sqrt( - (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / ( - N_sma_max - 2)) / (1 - beta1 ** state['step']) - else: - step_size = 1.0 / (1 - beta1 ** state['step']) - buffered[2] = step_size - - if group['weight_decay'] != 0: - p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) - - # apply lr - if N_sma > self.N_sma_threshhold: - denom = exp_avg_sq.sqrt().add_(group['eps']) - p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom) - else: - p_data_fp32.add_(-step_size * group['lr'], exp_avg) - - p.data.copy_(p_data_fp32) - - # integrated look ahead... - # we do it at the param level instead of group level - if state['step'] % group['k'] == 0: - slow_p = state['slow_buffer'] # get access to slow param tensor - slow_p.add_(self.alpha, p.data - slow_p) # (fast weights - slow weights) * alpha - p.data.copy_(slow_p) # copy interpolated weights to RAdam param tensor - - return loss \ No newline at end of file diff --git a/spaces/zomehwh/sovits-goldship/inference/slicer.py b/spaces/zomehwh/sovits-goldship/inference/slicer.py deleted file mode 100644 index b05840bcf6bdced0b6e2adbecb1a1dd5b3dee462..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/sovits-goldship/inference/slicer.py +++ /dev/null @@ -1,142 +0,0 @@ -import librosa -import torch -import torchaudio - - -class Slicer: - def __init__(self, - sr: int, - threshold: float = -40., - min_length: int = 5000, - min_interval: int = 300, - hop_size: int = 20, - max_sil_kept: int = 5000): - if not min_length >= min_interval >= hop_size: - raise ValueError('The following condition must be satisfied: min_length >= min_interval >= hop_size') - if not max_sil_kept >= hop_size: - raise ValueError('The following condition must be satisfied: max_sil_kept >= hop_size') - min_interval = sr * min_interval / 1000 - self.threshold = 10 ** (threshold / 20.) - self.hop_size = round(sr * hop_size / 1000) - self.win_size = min(round(min_interval), 4 * self.hop_size) - self.min_length = round(sr * min_length / 1000 / self.hop_size) - self.min_interval = round(min_interval / self.hop_size) - self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size) - - def _apply_slice(self, waveform, begin, end): - if len(waveform.shape) > 1: - return waveform[:, begin * self.hop_size: min(waveform.shape[1], end * self.hop_size)] - else: - return waveform[begin * self.hop_size: min(waveform.shape[0], end * self.hop_size)] - - # @timeit - def slice(self, waveform): - if len(waveform.shape) > 1: - samples = librosa.to_mono(waveform) - else: - samples = waveform - if samples.shape[0] <= self.min_length: - return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}} - rms_list = librosa.feature.rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0) - sil_tags = [] - silence_start = None - clip_start = 0 - for i, rms in enumerate(rms_list): - # Keep looping while frame is silent. - if rms < self.threshold: - # Record start of silent frames. - if silence_start is None: - silence_start = i - continue - # Keep looping while frame is not silent and silence start has not been recorded. - if silence_start is None: - continue - # Clear recorded silence start if interval is not enough or clip is too short - is_leading_silence = silence_start == 0 and i > self.max_sil_kept - need_slice_middle = i - silence_start >= self.min_interval and i - clip_start >= self.min_length - if not is_leading_silence and not need_slice_middle: - silence_start = None - continue - # Need slicing. Record the range of silent frames to be removed. - if i - silence_start <= self.max_sil_kept: - pos = rms_list[silence_start: i + 1].argmin() + silence_start - if silence_start == 0: - sil_tags.append((0, pos)) - else: - sil_tags.append((pos, pos)) - clip_start = pos - elif i - silence_start <= self.max_sil_kept * 2: - pos = rms_list[i - self.max_sil_kept: silence_start + self.max_sil_kept + 1].argmin() - pos += i - self.max_sil_kept - pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start - pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept - if silence_start == 0: - sil_tags.append((0, pos_r)) - clip_start = pos_r - else: - sil_tags.append((min(pos_l, pos), max(pos_r, pos))) - clip_start = max(pos_r, pos) - else: - pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start - pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept - if silence_start == 0: - sil_tags.append((0, pos_r)) - else: - sil_tags.append((pos_l, pos_r)) - clip_start = pos_r - silence_start = None - # Deal with trailing silence. - total_frames = rms_list.shape[0] - if silence_start is not None and total_frames - silence_start >= self.min_interval: - silence_end = min(total_frames, silence_start + self.max_sil_kept) - pos = rms_list[silence_start: silence_end + 1].argmin() + silence_start - sil_tags.append((pos, total_frames + 1)) - # Apply and return slices. - if len(sil_tags) == 0: - return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}} - else: - chunks = [] - # 第一段静音并非从头开始,补上有声片段 - if sil_tags[0][0]: - chunks.append( - {"slice": False, "split_time": f"0,{min(waveform.shape[0], sil_tags[0][0] * self.hop_size)}"}) - for i in range(0, len(sil_tags)): - # 标识有声片段(跳过第一段) - if i: - chunks.append({"slice": False, - "split_time": f"{sil_tags[i - 1][1] * self.hop_size},{min(waveform.shape[0], sil_tags[i][0] * self.hop_size)}"}) - # 标识所有静音片段 - chunks.append({"slice": True, - "split_time": f"{sil_tags[i][0] * self.hop_size},{min(waveform.shape[0], sil_tags[i][1] * self.hop_size)}"}) - # 最后一段静音并非结尾,补上结尾片段 - if sil_tags[-1][1] * self.hop_size < len(waveform): - chunks.append({"slice": False, "split_time": f"{sil_tags[-1][1] * self.hop_size},{len(waveform)}"}) - chunk_dict = {} - for i in range(len(chunks)): - chunk_dict[str(i)] = chunks[i] - return chunk_dict - - -def cut(audio_path, db_thresh=-30, min_len=5000): - audio, sr = librosa.load(audio_path, sr=None) - slicer = Slicer( - sr=sr, - threshold=db_thresh, - min_length=min_len - ) - chunks = slicer.slice(audio) - return chunks - - -def chunks2audio(audio_path, chunks): - chunks = dict(chunks) - audio, sr = torchaudio.load(audio_path) - if len(audio.shape) == 2 and audio.shape[1] >= 2: - audio = torch.mean(audio, dim=0).unsqueeze(0) - audio = audio.cpu().numpy()[0] - result = [] - for k, v in chunks.items(): - tag = v["split_time"].split(",") - if tag[0] != tag[1]: - result.append((v["slice"], audio[int(tag[0]):int(tag[1])])) - return result, sr