diff --git a/spaces/101-5/gpt4free/testing/interference_test.py b/spaces/101-5/gpt4free/testing/interference_test.py deleted file mode 100644 index e7a780d526e0ccbda8f3127d818e81a9b1ba231f..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/testing/interference_test.py +++ /dev/null @@ -1,15 +0,0 @@ -import openai - -openai.api_key = '' -openai.api_base = 'http://localhost:1337' - -chat_completion = openai.ChatCompletion.create(stream=True, - model='gpt-3.5-turbo', messages=[{'role': 'user', 'content': 'write a poem about a tree'}]) - -#print(chat_completion.choices[0].message.content) - -for token in chat_completion: - - content = token['choices'][0]['delta'].get('content') - if content != None: - print(content) \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/EssentialPIM Free 8.6 Crack Full Version Serial Keys [2021].md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/EssentialPIM Free 8.6 Crack Full Version Serial Keys [2021].md deleted file mode 100644 index a81d80d8016a1c022e9de59ecb60ae9f8221ea72..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/EssentialPIM Free 8.6 Crack Full Version Serial Keys [2021].md +++ /dev/null @@ -1,143 +0,0 @@ - -

EssentialPIM Free 8.6 Crack Full Version Serial Keys

-

Are you looking for a way to manage your personal information more efficiently and conveniently? Do you want to use a powerful and versatile software that can help you organize your tasks, notes, contacts, calendar, email, and more? If yes, then you might want to check out EssentialPIM, a personal information manager that has been trusted by millions of users worldwide. But what if you don't want to pay for the Pro or Business versions of the software? Is there a way to get all the features and benefits of EssentialPIM for free? In this article, we will tell you everything you need to know about EssentialPIM Free 8.6 Crack Full Version Serial Keys, including what it is, how to get it, what are its advantages and disadvantages, and more.

-

What is EssentialPIM and why do you need it?

-

EssentialPIM is a personal information manager that helps you organize your life

-

EssentialPIM is a software that allows you to store, manage, and access all your personal information in one place. You can use it to create and edit tasks, notes, contacts, calendar events, email messages, passwords, and more. You can also link different items together, such as attaching files or notes to tasks or contacts, or creating reminders for events or emails. You can also customize the appearance and behavior of the software according to your preferences and needs.

-

EssentialPIM Free 8.6 Crack Full Version Serial Keys


Download ===== https://byltly.com/2uKwf9



-

EssentialPIM has many features to manage your tasks, notes, contacts, calendar, email, and more

-

EssentialPIM has a user-friendly interface that lets you easily switch between different modules and views. You can also use keyboard shortcuts or drag-and-drop operations to perform various actions. Some of the features that EssentialPIM offers are:

- -

EssentialPIM can sync with various cloud services and devices

-

EssentialPIM can sync your data with various cloud services such as Google Drive, Dropbox, iCloud, OneDrive, etc. You can also sync your data with other devices such as Android phones or tablets, iPhones or iPads, Windows phones or tablets, etc. You can also export or import your data in various formats such as CSV, HTML, ICS, VCF, EML, TXT, etc.

-

How to get EssentialPIM Free 8.6 Crack Full Version Serial Keys?

-

EssentialPIM Free 8.6 Crack is a modified version of the software that bypasses the license verification

-

EssentialPIM Free 8.6 Crack is a version of the software that has been modified by some hackers or crackers to bypass the license verification process. This means that you can use the software without entering a valid serial key or activating it online. This way,you can access all the features and benefits of the Pro and Business versions of the software without paying any fees or subscriptions.

-

EssentialPIM Free 8.6 Crack Full Version Serial Keys can be downloaded from various websites

-

EssentialPIM Free 8.6 Crack Full Version Serial Keys can be downloaded from various websites that offer cracked software or serial keys. Some of these websites are:

- - - - - - - - - - - - - - - - - -
NameURL
All tips tunes
BEST PDF
HOT PDF
-

You can also search for other websites using keywords such as "EssentialPIM Free 8.6 Crack", "EssentialPIM Free 8.6 Keygen", "EssentialPIM Free 8.6 License Key", etc.

-

EssentialPIM Free 8.6 Crack Full Version Serial Keys can be installed and activated with a few steps

-

To install and activate EssentialPIM Free 8.6 Crack Full Version Serial Keys,you need to follow these steps:

-
    -
  1. Download the crack file from one of the websites mentioned above.
  2. -
  3. Extract the file using a program such as WinRAR or WinZip.
  4. -
  5. Run the setup file and follow the instructions to install the software.
  6. -
  7. Copy the crack file from the extracted folder and paste it into the installation directory of the software.
  8. -
  9. Run the software and enter any serial key from the crack file when prompted.
  10. -
  11. Enjoy using EssentialPIM Pro Business for free!
  12. -
-

What are the benefits of using EssentialPIM Free 8.6 Crack Full Version Serial Keys?

-

EssentialPIM Free 8.6 Crack Full Version Serial Keys gives you access to all the features of the Pro and Business versions

-

The Pro version of EssentialPIM has some additional features that are not available in the Free version,such as:

-

EssentialPIM Free 8.6 Crack Download with License Key
-How to Activate EssentialPIM Free 8.6 Full Version for Free
-EssentialPIM Free 8.6 Serial Key Generator Online
-EssentialPIM Free 8.6 Crack + Keygen Full Setup
-EssentialPIM Free 8.6 Crack Patch with Activation Code
-EssentialPIM Free 8.6 Full Version Crack Free Download
-EssentialPIM Free 8.6 License Key Crack Latest Version
-EssentialPIM Free 8.6 Crack + Serial Number Working
-EssentialPIM Free 8.6 Full Crack with Registration Key
-EssentialPIM Free 8.6 Crack + Torrent Download Link
-EssentialPIM Free 8.6 Serial Key Crack No Survey
-EssentialPIM Free 8.6 Full Version with Crack and Key
-EssentialPIM Free 8.6 Crack + Product Key Updated
-EssentialPIM Free 8.6 Crack + Portable Version Download
-EssentialPIM Free 8.6 Serial Key Full Crack Lifetime
-EssentialPIM Free 8.6 Crack + Serial Key Free Download
-EssentialPIM Free 8.6 Full Version Crack with Keygen
-EssentialPIM Free 8.6 Serial Key Crack Latest Download
-EssentialPIM Free 8.6 Crack + Activation Key Full Version
-EssentialPIM Free 8.6 Full Crack with Serial Number
-EssentialPIM Free 8.6 Serial Key + Crack Download Link
-EssentialPIM Free 8.6 Full Version with Crack and Serial Key
-EssentialPIM Free 8.6 Crack + Registration Code Working
-EssentialPIM Free 8.6 Full Crack + License Key Download
-EssentialPIM Free 8.6 Serial Key Full Version Crack Download
-EssentialPIM Free 8.6 Crack + Serial Keygen Full Version
-EssentialPIM Free 8.6 Full Version Crack with Activation Code
-EssentialPIM Free 8.6 Serial Key + Patch Download Link
-EssentialPIM Free 8.6 Full Version with Crack and Activation Key
-EssentialPIM Free 8.6 Crack + License Code Updated
-EssentialPIM Free 8.6 Full Crack + Serial Keygen Download
-EssentialPIM Free 8.6 Serial Key Full Version with Crack
-EssentialPIM Free 8.6 Crack + Registration Key Working
-EssentialPIM Free 8.6 Full Version with Crack and License Key
-EssentialPIM Free 8.6 Serial Key + Keygen Download Link
-EssentialPIM Free 8.6 Full Version with Crack and Registration Key
-EssentialPIM Free 8.6 Crack + Activation Code Updated
-EssentialPIM Free 8.6 Full Crack + Registration Code Download
-EssentialPIM Free 8.6 Serial Key Full Version with Activation Code
-EssentialPIM Free 8.6 Crack + License Key Working
-EssentialPIM Free 8.6 Full Version with Crack and Activation Code
-EssentialPIM Free 8.6 Serial Key + Activation Code Download Link
-EssentialPIM Free 8.6 Full Version with Crack and License Code
-EssentialPIM Free 8.6 Crack + Registration Code Updated
-EssentialPIM Free 8.6 Full Crack + Activation Code Download

- Register Right Now! Download The Full Versions Now! It's Quick and Easy! Click here! -->
Download Free Trial Version:
Mac OS X | Windows UC3D Upgrade To The Full Version:
Mac OS X | Windows 3D Desktop Jigsaw Puzzle Screen saver
Latest Version - Mac: v1.0 - Windows: v1.0 Watch as your desktop, or any other image you like, is broken up into a jigsaw puzzle and then slowly put back together again. The jigsaw pieces move around in 3D, and you can display any image you want!
Features Include:

-

-

The OSGeo-Live distribution includes MB-System. OSGeo-Live is a self-contained bootable DVD, USB thumb drive or Virtual Machine based on Lubuntu that includes a wide variety of open source geospatial software.

-

You're probably going to download a bunch of games (we highly recommend Beat Saber, Sports Scramble, and SuperHot VR to get you started). But there's a whole lot more to do in VR on the Quest beyond slicing rhythm cubes and dodging bullets in slow-motion.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Fallout 4 Remove Borders Enjoy Unlimited Exploration with This Easy Fix.md b/spaces/cihyFjudo/fairness-paper-search/Fallout 4 Remove Borders Enjoy Unlimited Exploration with This Easy Fix.md deleted file mode 100644 index 2f89f99263e94bc99a46426f6692b689181391a3..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Fallout 4 Remove Borders Enjoy Unlimited Exploration with This Easy Fix.md +++ /dev/null @@ -1,28 +0,0 @@ - -

To remove the black borders from the Youtube thumbnail we need not have to write a seperate code or CSS. Simply use the ytimg.com site, that stands for YouTube image, which fetches the images from YouTube, like thumbnails and icons as required which come from that domain.

-

fallout 4 remove borders


Download File >>>>> https://tinurli.com/2uwkOa



-

After searching the net a while for fix of this issue I came up with nothing, I think I have tried everything and nothing solved my problem. Then driven by my logic I just wrapped the iframe of the embedded youtube video in one div set overflow: hidden; to this div and made it's height 2px smaller then the iframe height (on my video there was black border in the bottom).So the wrapper div is smaller then the iframe and with positioning it over the video you can hide the black borders you don't want.I think that this is the best solution from everything I have tried so far.

-

In my case the border was with about 2px height so I made the wrapper div 2px smaller in height to hide the border, in your scenario if the border is on the top of the video or on the sides and/or with different dimensions, you have to make different dimensions for the wrapper div and position it good so it overlaps the video where the borders are and with overflow:hidden; the borders are hidden.

-

I'm not an expert, i was looking for a solution to remove the black bars of youtube video thumbnails, found a few solutions but didn't worked for me. Started experimenting with the solutions i found and came up with this.

-

-

Well, some mod's rely on you having the borders removed, for example, Lliana's Elsweyr: Deserts of Anequina. It can also be quite useful, if you're scouting for an area where you can builf your mod, without it conflicting with other mods.

-

The existence of such planning, which was revealed in a draft document, comes as the Biden administration deals with the fallout of two federal court orders on the border policy known as Title 42, which has been met with rebukes from Senate Democrats and immigrant advocates who have long argued it is illegal.

-

DHS officials also are worried about many Haitians who have received temporary protected status in Mexico who could later decide to come to the US. In September, the Biden administration decided to begin a mass deportation campaign, relying on Title 42 policy as a means to remove Haitians from a camp in Del Rio, Texas. In September and October alone, DHS officials used Title 42 to expel thousands of Haitians back to Haiti.

-

The Biden administration's decision last week to drop border restrictions that were put in place because of the COVID-19 pandemic is causing a lot of fallout in Washington. A COVID aid bill the president says is crucial to dealing with the next phase of this pandemic has been stalled as Republicans try to force the CDC to keep the restrictions known as Title 42 in place. Now some Senate Democrats are joining Republicans.

-

Nicola said she and her 81-year-old father, Melvin Sheppard, were horrified to receive a letter from Merthyr Tydfil Council in late July saying they would now have to remove the borders from around both graves at Pant Cemetery by the end of August.

-

Nicola said: "My brother and my mam died six months apart last year very suddenly. Then the week before August we had a letter to say there wasn't enough room between plots to cut the grass. There's been uproar. We were told we needed to sort it out by the end of August. Then they put a sign up in the cemetery telling people to not put borders up."

-

"I hope nothing happens with it. I think it's disgusting if they have to do it. But if we have to do it, we have to do it and clear everything just like everyone else. There's hundreds and hundreds of graves there with borders as you do. We don't want them to do it so it would be us doing it. We will have to take all our belongings from there and just accept it. It will be very painful to have to do that."

-

Windowed typically refers to a mode where the game presents the rendered image in a regular window that is not maximized (fullscreen) on the monitor. The window in question may or may not use window decorations (borders) around the contents of the window, typically indicated by the use of bordered and borderless in the name of the mode. This mode should not be mistaken for borderless fullscreen windowed mode, which specifically removes the window decorations (borderless) and maximizes (fullscreen) the window to have it cover the whole monitor.

-

The Democratic Unionist Party (DUP) supported Brexit, but "opposed the protocol and voted against it in the House of Commons." Some Unionists, according to The Independent, believed that: "the Brexit deal has cut NI adrift from the rest of the UK, pushing Belfast further away from London, paving the way for an economic united Ireland", and loyalists called for the arrangement to be removed or, furthermore, for the collapse of the devolved administration. The governing DUP, however, said that "It would be a foolish idea to collapse devolution. It would remove the party who opposed the NI Protocol and give all power for Northern Ireland back to the UK government, who created and implemented the NI Protocol."[12] Speaking before Westminster voted to ratify the Trade Agreement, Lord Empey (chairman of the Ulster Unionist Party) argued that the Protocol came about because the DUP had indicated acceptance of it. He said that he had "pointed out that, immediately this document was released, Arlene Foster and her DUP colleagues endorsed these proposals, describing them as 'a serious and sensible way forward'".[13]

-

The SweetFX Border effect places a border around the image. This can be used to place black borders at the top and bottom of the image to produce a letterbox appearance similar to that seen in 2.35:1 aspect ratio movies when viewed on a 16:9 aspect ratio HDTV.

-

A top border will automatically have a corresponding bottom border, and a left border will automatically have a corresponding right border. Horizontal and vertical borders may be enabled separately or together.

-

Border_width specifies whether or not to add vertical and horizontal borders, and if so, how large should they be. Keep in mind that if border_width is set to (0,0), then no borders will be displayed even if USE_BORDER is set to 1 because the borders will have no size.

-

The image will be cropped. The game will not be resized to accommodate the border. This means that borders will chop off portions of the image and result in a loss of display information. For example, in 002 below, the horizontal borders have been set to 150 pixels. Due to this size, the in-game HUD information cannot be viewed.

-

Both vertical and both horizontal borders will be of the same size. If the horizontal border is set to 32 pixels, then it will be 32 pixels at the top and 32 pixels at the bottom for a total of 64 cropped image pixels. The image is still present, but it is obstructed by the border.

-

\"At this critical time in our nation's history, we need leaders who will combat the left's socialist, big-government, America-last agenda,\" Palin said. \"This country was built by heroes, and the radical left dishonors their legacies by opening our borders to illegal immigrants, mortgaging our children's future, and selling out our nation's interests to the highest bidder.\"

-

Women and children detained along the border usually spend one to three nights, and sometimes longer, in CBP holding cells, where they sleep on the floor, often with only a Mylar blanket, similar to the foil wrappers used by marathon runners, to protect them from the cold. Border agents sometimes require them to remove and discard sweaters or other layers of clothing, purportedly for security reasons, before they enter the holding cells.

-

It is important to acknowledge that the people of Afghanistan are the primary casualties of the conflict. But Pakistan could also suffer serious fallout from continued instability and increased violence in Afghanistan, including a significant influx of refugees and the resurgence of local militant groups energized by Taliban victories.

-

Scrap houses, sidewalks, that once irremovable vegetation, nearly everything tied to a settlement can be removed to create more build space. It's incredibly easy to use too, although players should perhaps be careful as, once scrapped, things can't be easily restored. However, these minor issues pale in comparison to the world of possibilities opened up by this mod.

-

Results: The Obama Administration has fundamentally reformed immigration enforcement, focusing on identifying and removing criminal aliens who pose a threat to public safety. Overall, criminal removals/returns increased by almost 22,000 between FY 2008 and FY 2009, a 19 percent increase. So far this fiscal year, ICE has removed a record 170,000 criminals from the U.S. DHS will continue to increase focus on removing those convicted of crimes who pose a threat to the safety of communities.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Goosebumps Escape From Horrorland Free Full Download Can You Outsmart the Monsters and Escape?.md b/spaces/cihyFjudo/fairness-paper-search/Goosebumps Escape From Horrorland Free Full Download Can You Outsmart the Monsters and Escape?.md deleted file mode 100644 index 86c0e875d62af89bfd1ad964104d5e45f421f1c8..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Goosebumps Escape From Horrorland Free Full Download Can You Outsmart the Monsters and Escape?.md +++ /dev/null @@ -1,5 +0,0 @@ -
-

This page provides general information on the Goosebumps: Escape from Horrorland videogame. As more information about the project appears, you will find here news, videos, screenshots, arts, interviews with developers and more. Perhaps you have stumbled on this page in search of download torrent Goosebumps: Escape from Horrorland without registration or download Goosebumps: Escape from Horrorland on high speed. GamerInfo.net provides only information about the games and no free download Goosebumps: Escape from Horrorland.

-

Goosebumps Escape From Horrorland Free Full Download


Download Ziphttps://tinurli.com/2uwkIY



aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Modern City Map Generator Tips and Tricks for Making the Most of Your Maps.md b/spaces/cihyFjudo/fairness-paper-search/Modern City Map Generator Tips and Tricks for Making the Most of Your Maps.md deleted file mode 100644 index d77ea33c7e6c75c12c927bfa879c80dc7b41f881..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Modern City Map Generator Tips and Tricks for Making the Most of Your Maps.md +++ /dev/null @@ -1,14 +0,0 @@ - -

Watabou is kind of like DonJon on steroids. Just like the DonJon city map generator, the idea is that you can generate a city map quickly, and it looks pretty good. But Watabou gives you much more customizability.

-

Inkarnate is a browser-based mapmaking software using the freemium model. So that means, you can make city maps for free (with restrictions) or spring for a subscription to get the whole toolset and all the assets.

-

Modern City Map Generator


Download Filehttps://tinurli.com/2uwhPP



-

Need isometric city or village illustration? Build it by combining graphical blocks from our urban and rural graphics collections
Our unique online designer with thousands of icons allows to create illustrations by yourself. It's easy and free to try.
Get Started

-

Create an isometric city. It may be used as infographics, arrival and directional maps, or just for fun.

Icograms Designer has a rich palette of city icons, including soil, roads, buildings, vehicles, and people.

-

There are a lot of handy random name-generators freely available online now. There've been several mentions of some on this Forum over time, so if you get stuck, you might try a search here for those - or simply an ordinary web-search for "random name generators". You'll likely find more than you can possibly use (even if most are ostensibly for fantasy gaming)!

-

Spawn on an island with a huge medieval City of Ambarina that covers 1600 blocks amidst the Mediterranean Sea. This Minecraft city consists of 120 houses fully decorated inside and outside, and a huge castle on the very top of it.

-

This Minecraft city is best used on public servers for zombie survival games. The project is absolutely enormous and even has a transport bridge that connects two halves of the city divided by a wide river.

-

An ancient City of Halory is presented in this Minecraft seed for all those who like to explore the medieval times. The grand city is surrounded by a tall wooden wall for protection with the castle and buildings located inside.

-

-

In addition to automatic creation of buildings and city blocks, the generator outputs rough shapes of city blocks and buildings, which can then be mesh edited manually, and building materials and details can be input in the generator to mesh object after outputting rough shapes.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Recover My Files 4.9.4 1343 Activation Key.rar Benefits and Features of the Software.md b/spaces/cihyFjudo/fairness-paper-search/Recover My Files 4.9.4 1343 Activation Key.rar Benefits and Features of the Software.md deleted file mode 100644 index e11a145cbf983829cf47f08be930c0f00ae41705..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Recover My Files 4.9.4 1343 Activation Key.rar Benefits and Features of the Software.md +++ /dev/null @@ -1,23 +0,0 @@ - -

Recover My Files data recovery software recovers deleted files emptied from the Windows Recycle Bin, files lost due to the format or re-install of a hard drive, or files removed by a virus, Trojan infection, unexpected system shutdown or software failure.

-

Recover My Files is fast and easy. No technical or data recovery software skills are required to undelete files. Use Recover My Files to perform hard drive data recovery, or if you are trying to unformat and recover data from floppy disk, zip drive, smart media, compact flash or other removable media. It is easy disk recovery software to undelete files and get data back.

-

Recover My Files 4.9.4 1343 Activation Key.rar


DOWNLOAD ———>>> https://tinurli.com/2uwiO8



-

Do not waste time and money on other software wondering if you can recover deleted files. Download and try Recover My Files undelete software free. Scan your drive and view your deleted files. If you can see your deleted files then data recovery is quick and easy.

-

Purchasing an activation key lets you to save your file recovery results. When you purchase via our secure server your activation key is provided to you on a web page at the end of the purchase process and by email.

-

With a deleted file the data content of the file is rarely destroyed. Even if Windows file reference information has been destroyed, Recover My Files scans the data at a low level to locate "Lost Files" by their internal file structure. This allows Recover My Files to recover deleted files that other data recovery software can never know exist. Read frequently asked questions about data recovery.

-

Download and try Recover My Files now. Check the Recover My Files search results screen and use the preview feature to display the file that you are trying to recover. If you find what you are looking for, then purchase a product activation key to save your recovered files to another drive.

-

Do you want to free. download full version of Recover My Files 6.0Crack with latest activation key, keygen, serial number, registration code, license key, kickass, etc. to restore your lost files?
No matter virus infection, hardware failure, or just improper operation, it's easy to cause data loss. We need to figure out a way to recover data. You may search for 'Recover My Files' from the network, and you will find Recover My Files 6.0 Crack with latest activation key, keygen, serial number, registration code, license key, kickass, etc.
However, it's better not to use Recover My Files Crack, because the cracked version might package with virus, Trojan, backdoor, spyware or malware that must destroy your files or system.
Here I strongly recommend you to use Bitwar Data Recovery software with the full version; this is your best alternative to Recover My Files Crack, with free data recovery software, you don't need to expose your computer in danger.
Bitwar Data Recovery is fast, reliable and free data recovery software with powerful functions. It can recover lost or deleted files, photos, audio, music from any storage device effectively, safely and ultimately, and allows you to preview to make a selective recovery.
Bitwar Data Recovery Supported OS: Windows 10/8/7/XP/Vista, Mac OS X (Mac OS X 10.6, 10.7 and 10.8, 10.9, 10.10 Yosemite, 10.10, 10.11 El Capitan, 10.12 Sierra) on iMac, MacBook, Mac Pro, etc.
You can quickly get free Bitwar data recovery by clicking below button to download the software and install it on your computer:

-

Found 6 results for Key Recover My Files V4.9.4 1343.rar. Restore File Data, Photo, Documents From HD's, USB, and Memory Cards! Recover My Files 4.9.4.1343 The last serial number for this program was added to our data base on September 11, 2012 744 visitors told us the serial is good, 197 guys said the number is bad. Serial means a unique number or code which identifies the license of the software as being valid. All retail software uses a serial number or key of some form. A serial number can also be referred to as an Activation Code or CD Key. When you search for Recover My Files V4.4.8 License Key Serial, you may sometimes find the word 'serial' in the.

-


Step 4. After the scan, you can Double-click the found files to preview one by one. You can check the data in the preview window, and even you can play video, audio, image, etc. It's a powerful preview function helps you to shorten the recovery time.
Then select the ones you want to recover and click the 'Recover' button to get data back.
(Caution: DO NOT save the files into the Partition or device where you lost them.)

-

-


Bitwar Data Recovery allows to recover lost files from a formatted hard drive and recover lost files due to software crash, virus infection, other unknown reasons, etc. or recover lost data from a disk containing lost or missing partition can all be tackled merely with by using this software.

-

If you cannot find Recover My files crack version, why not give a shot on Bitwar Data Recovery software? It's easy to get from https: //www.bitwar.net and it's safer for data recovery.

-

My.Files.v4.9.4.1343.Cracked-MAZE Serial Key.. GetData.Recover.. ... other .. key recover my files v4.9.4 1296 serial.rar, .. getdata Recover My .. ... Key Crack .. Fa-ti Nasa Fina Frumoasa Emilia Ghinescu Zippy
e1a097fadc

-

Recover My Files Key is an application that can recuperate lost information data files. You now will never shed your information ever. Recover My Files Full Crack is superior recovery application for your PC. You are going to be capable to provide back the data every time you get crazy. This software can recover removed files after container recycling rubbish bin. Documents lost because of to format or pathogen contamination or any unexpected system failure or turn off. It is possible to choose any document or directory site from your gadget to recuperate removed information. It is possible to store Retrieved files in a given directory site. It stabilizes the valuable information that you removed. It is possible to recuperate all removed files through the reuse rubbish bin.

-

This is assists you to recuperate documents inside a couple of actions, to begin with, check your gadget, next see all removed documents, as well as at the finish recover all documents or recuperate those documents that you would like to recuperate. The writing filtration choice helps important text looking online information. It aids to stabilize dozens of documents that get rid of by a few malware removed by the unforeseen arrêt of your program or any kind of another failing of your application. Confirm data files choice rose bush the faulty documents. This restrains the damaged data files to return. This info can help you to be able to understand about all those files which are possible to get back.

-

Recover My files 2023 Crack is applicable for all kind of files it will face all the scenario to overcome and remain safe from any kind of loss. This is best option for recovery of files and documents there are very simple steps provided to turn back the files in the storage. While using the system there are chances of any mishap the data of system can be lost and the files can be deleted un intentionally. In this type of situation there is only one option to get these file and recover the lost data. This is fully updated software to solve the problems regarding this type. You should must install this software in your PC this is helpful in the related matters and helps to recover the important and essential files.

-

When you are working in any office you need to delete the files and documents after deleting the document it will be moved to the recycle bin. Some time there is need to empty recycle bin after performing this action all the deleted files that are in recycle bin will be permanently removed from the system. You cannot recover these file until you will not install the Recover My files Crack in the PC. It is very important application you can create the account to save the files permanently. Once you will sign up your account is created and you can store the data it can be used anywhere after a successful sign in. There are highly efficient filters available that search out the data for you and recover it as well. While using the system sometimes it happens that some parts of your storage is damaged and it is unable to use. One option for you to get the data from damaged partition you can recover all the files in first attempt.

-

by GetData? Learn how to remove Recover My Files Version 4. How to uninstall Recover My Files Version 4. The last serial number for this program was added to our data base on Septem744 visitors told us the serial is good, 197 guys said the number is bad. Restore File Data, Photo, Documents From HD's, USB, and Memory Cards! World's Largest Online Community.įound 6 results for Key Recover My Files V4.9.4 1343.

-

Restore File Data, Photo, Documents From HD's, USB, and Memory Cards!. Searches Revealed: These Are Our Top Links For Your ! Recover files which have been deleted and even after emptying the Recycle Bin. Free download previous version Recover My Files 4. Activation Key For Recover My Files V4.9.4 1343, .eBOOk-sUppLeX.pdf fd1414232a Horse fucked horse fucked girl.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/clarin-pl/datasets-explorer/clarin_datasets/punctuation_restoration_dataset.py b/spaces/clarin-pl/datasets-explorer/clarin_datasets/punctuation_restoration_dataset.py deleted file mode 100644 index 870aea8c6fa51e06ebdcaab095e46f23937ce9bd..0000000000000000000000000000000000000000 --- a/spaces/clarin-pl/datasets-explorer/clarin_datasets/punctuation_restoration_dataset.py +++ /dev/null @@ -1,159 +0,0 @@ -import numpy as np -import matplotlib.pyplot as plt -import pandas as pd -from datasets import load_dataset -from sklearn.manifold import TSNE -import streamlit as st - -from clarin_datasets.dataset_to_show import DatasetToShow -from clarin_datasets.utils import embed_sentence, PLOT_COLOR_PALETTE - - -class PunctuationRestorationDataset(DatasetToShow): - def __init__(self): - DatasetToShow.__init__(self) - self.data_dict_named = None - self.dataset_name = "clarin-pl/2021-punctuation-restoration" - self.description = [ - f""" - Dataset link: https://huggingface.co/datasets/{self.dataset_name} - - Speech transcripts generated by Automatic Speech Recognition (ASR) systems typically do - not contain any punctuation or capitalization. In longer stretches of automatically recognized speech, - the lack of punctuation affects the general clarity of the output text [1]. The primary purpose of - punctuation (PR) and capitalization restoration (CR) as a distinct natural language processing (NLP) task is - to improve the legibility of ASR-generated text, and possibly other types of texts without punctuation. Aside - from their intrinsic value, PR and CR may improve the performance of other NLP aspects such as Named Entity - Recognition (NER), part-of-speech (POS) and semantic parsing or spoken dialog segmentation [2, 3]. As useful - as it seems, It is hard to systematically evaluate PR on transcripts of conversational language; mainly - because punctuation rules can be ambiguous even for originally written texts, and the very nature of - naturally-occurring spoken language makes it difficult to identify clear phrase and sentence boundaries [4, - 5]. Given these requirements and limitations, a PR task based on a redistributable corpus of read speech was - suggested. 1200 texts included in this collection (totaling over 240,000 words) were selected from two - distinct sources: WikiNews and WikiTalks. Punctuation found in these sources should be approached with some - reservation when used for evaluation: these are original texts and may contain some user-induced errors and - bias. The texts were read out by over a hundred different speakers. Original texts with punctuation were - forced-aligned with recordings and used as the ideal ASR output. The goal of the task is to provide a - solution for restoring punctuation in the test set collated for this task. The test set consists of - time-aligned ASR transcriptions of read texts from the two sources. Participants are encouraged to use both - text-based and speech-derived features to identify punctuation symbols (e.g. multimodal framework [6]). In - addition, the train set is accompanied by reference text corpora of WikiNews and WikiTalks data that can be - used in training and fine-tuning punctuation models. - """, - "Task description", - "The purpose of this task is to restore punctuation in the ASR recognition of texts read out loud.", - "clarin_datasets/punctuation_restoration_task.png", - ] - - def load_data(self): - raw_dataset = load_dataset(self.dataset_name) - self.data_dict = { - subset: raw_dataset[subset].to_pandas() for subset in self.subsets - } - self.data_dict_named = {} - for subset in self.subsets: - references = raw_dataset[subset]["tags"] - references_named = [ - [ - raw_dataset[subset].features["tags"].feature.names[label] - for label in labels - ] - for labels in references - ] - self.data_dict_named[subset] = pd.DataFrame( - { - "tokens": self.data_dict[subset]["tokens"], - "tags": references_named, - } - ) - - def show_dataset(self): - header = st.container() - description = st.container() - dataframe_head = st.container() - class_distribution = st.container() - tsne_projection = st.container() - - with header: - st.title(self.dataset_name) - - with description: - st.header("Dataset description") - st.write(self.description[0]) - st.subheader(self.description[1]) - st.write(self.description[2]) - st.image(self.description[3]) - - full_dataframe = pd.concat(self.data_dict.values(), axis="rows") - - with dataframe_head: - st.header("First 10 observations of the chosen subset") - subset_to_show = st.selectbox( - label="Select subset to see", options=self.subsets - ) - df_to_show = self.data_dict[subset_to_show].head(10) - st.dataframe(df_to_show) - st.text_area(label="LaTeX code", value=df_to_show.style.to_latex()) - - class_distribution_dict = {} - for subset in self.subsets: - all_labels_from_subset = self.data_dict_named[subset]["tags"].tolist() - all_labels_from_subset = [ - x for subarray in all_labels_from_subset for x in subarray if x != "O" - ] - all_labels_from_subset = pd.Series(all_labels_from_subset) - class_distribution_dict[subset] = ( - all_labels_from_subset.value_counts(normalize=True) - .sort_index() - .reset_index() - .rename({"index": "class", 0: subset}, axis="columns") - ) - - class_distribution_df = pd.merge( - class_distribution_dict["train"], - class_distribution_dict["test"], - on="class", - ) - - with class_distribution: - st.header("Class distribution in each subset (without 'O')") - st.dataframe(class_distribution_df) - st.text_area( - label="LaTeX code", value=class_distribution_df.style.to_latex() - ) - with tsne_projection: - st.header("t-SNE projection of the dataset") - subset_to_project = st.selectbox( - label="Select subset to project", options=self.subsets - ) - tokens_unzipped = self.data_dict_named[subset_to_project]["tokens"].tolist() - tokens_unzipped = np.array([x for subarray in tokens_unzipped for x in subarray]) - labels_unzipped = self.data_dict_named[subset_to_project]["tags"].tolist() - labels_unzipped = np.array([x for subarray in labels_unzipped for x in subarray]) - df_unzipped = pd.DataFrame( - { - "tokens": tokens_unzipped, - "tags": labels_unzipped, - } - ) - df_unzipped = df_unzipped.loc[df_unzipped["tags"] != "O"] - tokens_unzipped = df_unzipped["tokens"].values - labels_unzipped = df_unzipped["tags"].values - mapping_dict = {name: number for number, name in enumerate(set(labels_unzipped))} - labels_as_ints = [mapping_dict[label] for label in labels_unzipped] - embedded_tokens = np.array( - [embed_sentence(x) for x in tokens_unzipped] - ) - reducer = TSNE( - n_components=2 - ) - transformed_embeddings = reducer.fit_transform(embedded_tokens) - fig, ax = plt.subplots() - ax.scatter( - x=transformed_embeddings[:, 0], - y=transformed_embeddings[:, 1], - c=[ - PLOT_COLOR_PALETTE[i] for i in labels_as_ints - ] - ) - st.pyplot(fig) diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/aiofiles/os.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/aiofiles/os.py deleted file mode 100644 index 29bc748fa91a6d3de6ec42842416de6af7134f5c..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/aiofiles/os.py +++ /dev/null @@ -1,51 +0,0 @@ -"""Async executor versions of file functions from the os module.""" -import os - -from . import ospath as path -from .ospath import wrap - -__all__ = [ - "path", - "stat", - "statvfs", - "rename", - "renames", - "replace", - "remove", - "unlink", - "mkdir", - "makedirs", - "rmdir", - "removedirs", - "link", - "symlink", - "readlink", - "listdir", - "scandir", - "access", - "sendfile", - "wrap", -] - - -stat = wrap(os.stat) -rename = wrap(os.rename) -renames = wrap(os.renames) -replace = wrap(os.replace) -remove = wrap(os.remove) -unlink = wrap(os.unlink) -mkdir = wrap(os.mkdir) -makedirs = wrap(os.makedirs) -rmdir = wrap(os.rmdir) -removedirs = wrap(os.removedirs) -link = wrap(os.link) -symlink = wrap(os.symlink) -readlink = wrap(os.readlink) -listdir = wrap(os.listdir) -scandir = wrap(os.scandir) -access = wrap(os.access) - -if hasattr(os, "sendfile"): - sendfile = wrap(os.sendfile) -if hasattr(os, "statvfs"): - statvfs = wrap(os.statvfs) diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/asciiTable.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/asciiTable.py deleted file mode 100644 index 6f81c526b372b268b253da47c337715e316ee4d4..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/asciiTable.py +++ /dev/null @@ -1,20 +0,0 @@ -from fontTools.misc.textTools import strjoin, tobytes, tostr -from . import DefaultTable - - -class asciiTable(DefaultTable.DefaultTable): - def toXML(self, writer, ttFont): - data = tostr(self.data) - # removing null bytes. XXX needed?? - data = data.split("\0") - data = strjoin(data) - writer.begintag("source") - writer.newline() - writer.write_noindent(data) - writer.newline() - writer.endtag("source") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - lines = strjoin(content).split("\n") - self.data = tobytes("\n".join(lines[1:-1])) diff --git a/spaces/codelion/Grounding_DINO_demo/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn.h b/spaces/codelion/Grounding_DINO_demo/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn.h deleted file mode 100644 index c7408eba007b424194618baa63726657e36875e3..0000000000000000000000000000000000000000 --- a/spaces/codelion/Grounding_DINO_demo/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn.h +++ /dev/null @@ -1,64 +0,0 @@ -/*! -************************************************************************************************** -* Deformable DETR -* Copyright (c) 2020 SenseTime. All Rights Reserved. -* Licensed under the Apache License, Version 2.0 [see LICENSE for details] -************************************************************************************************** -* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -************************************************************************************************** -*/ - -#pragma once - -#include "ms_deform_attn_cpu.h" - -#ifdef WITH_CUDA -#include "ms_deform_attn_cuda.h" -#endif - -namespace groundingdino { - -at::Tensor -ms_deform_attn_forward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const int im2col_step) -{ - if (value.type().is_cuda()) - { -#ifdef WITH_CUDA - return ms_deform_attn_cuda_forward( - value, spatial_shapes, level_start_index, sampling_loc, attn_weight, im2col_step); -#else - AT_ERROR("Not compiled with GPU support"); -#endif - } - AT_ERROR("Not implemented on the CPU"); -} - -std::vector -ms_deform_attn_backward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const at::Tensor &grad_output, - const int im2col_step) -{ - if (value.type().is_cuda()) - { -#ifdef WITH_CUDA - return ms_deform_attn_cuda_backward( - value, spatial_shapes, level_start_index, sampling_loc, attn_weight, grad_output, im2col_step); -#else - AT_ERROR("Not compiled with GPU support"); -#endif - } - AT_ERROR("Not implemented on the CPU"); -} - -} // namespace groundingdino \ No newline at end of file diff --git a/spaces/codelion/Grounding_DINO_demo/groundingdino/models/__init__.py b/spaces/codelion/Grounding_DINO_demo/groundingdino/models/__init__.py deleted file mode 100644 index e3413961d1d184b99835eb1e919b052d70298bc6..0000000000000000000000000000000000000000 --- a/spaces/codelion/Grounding_DINO_demo/groundingdino/models/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -from .GroundingDINO import build_groundingdino - - -def build_model(args): - # we use register to maintain models from catdet6 on. - from .registry import MODULE_BUILD_FUNCS - - assert args.modelname in MODULE_BUILD_FUNCS._module_dict - build_func = MODULE_BUILD_FUNCS.get(args.modelname) - model = build_func(args) - return model diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/ffjni.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/ffjni.c deleted file mode 100644 index 154be9ae993e3669668125535189c56a0a79f890..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/ffjni.c +++ /dev/null @@ -1,419 +0,0 @@ -/* - * JNI utility functions - * - * Copyright (c) 2015-2016 Matthieu Bouron - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include -#include -#include - -#include "libavutil/bprint.h" -#include "libavutil/log.h" -#include "libavutil/mem.h" - -#include "config.h" -#include "jni.h" -#include "ffjni.h" - -static JavaVM *java_vm; -static pthread_key_t current_env; -static pthread_once_t once = PTHREAD_ONCE_INIT; -static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; - -static void jni_detach_env(void *data) -{ - if (java_vm) { - (*java_vm)->DetachCurrentThread(java_vm); - } -} - -static void jni_create_pthread_key(void) -{ - pthread_key_create(¤t_env, jni_detach_env); -} - -JNIEnv *ff_jni_get_env(void *log_ctx) -{ - int ret = 0; - JNIEnv *env = NULL; - - pthread_mutex_lock(&lock); - if (java_vm == NULL) { - java_vm = av_jni_get_java_vm(log_ctx); - } - - if (!java_vm) { - av_log(log_ctx, AV_LOG_ERROR, "No Java virtual machine has been registered\n"); - goto done; - } - - pthread_once(&once, jni_create_pthread_key); - - if ((env = pthread_getspecific(current_env)) != NULL) { - goto done; - } - - ret = (*java_vm)->GetEnv(java_vm, (void **)&env, JNI_VERSION_1_6); - switch(ret) { - case JNI_EDETACHED: - if ((*java_vm)->AttachCurrentThread(java_vm, &env, NULL) != 0) { - av_log(log_ctx, AV_LOG_ERROR, "Failed to attach the JNI environment to the current thread\n"); - env = NULL; - } else { - pthread_setspecific(current_env, env); - } - break; - case JNI_OK: - break; - case JNI_EVERSION: - av_log(log_ctx, AV_LOG_ERROR, "The specified JNI version is not supported\n"); - break; - default: - av_log(log_ctx, AV_LOG_ERROR, "Failed to get the JNI environment attached to this thread\n"); - break; - } - -done: - pthread_mutex_unlock(&lock); - return env; -} - -char *ff_jni_jstring_to_utf_chars(JNIEnv *env, jstring string, void *log_ctx) -{ - char *ret = NULL; - const char *utf_chars = NULL; - - jboolean copy = 0; - - if (!string) { - return NULL; - } - - utf_chars = (*env)->GetStringUTFChars(env, string, ©); - if ((*env)->ExceptionCheck(env)) { - (*env)->ExceptionClear(env); - av_log(log_ctx, AV_LOG_ERROR, "String.getStringUTFChars() threw an exception\n"); - return NULL; - } - - ret = av_strdup(utf_chars); - - (*env)->ReleaseStringUTFChars(env, string, utf_chars); - if ((*env)->ExceptionCheck(env)) { - (*env)->ExceptionClear(env); - av_log(log_ctx, AV_LOG_ERROR, "String.releaseStringUTFChars() threw an exception\n"); - return NULL; - } - - return ret; -} - -jstring ff_jni_utf_chars_to_jstring(JNIEnv *env, const char *utf_chars, void *log_ctx) -{ - jstring ret; - - ret = (*env)->NewStringUTF(env, utf_chars); - if ((*env)->ExceptionCheck(env)) { - (*env)->ExceptionClear(env); - av_log(log_ctx, AV_LOG_ERROR, "NewStringUTF() threw an exception\n"); - return NULL; - } - - return ret; -} - -int ff_jni_exception_get_summary(JNIEnv *env, jthrowable exception, char **error, void *log_ctx) -{ - int ret = 0; - - AVBPrint bp; - - char *name = NULL; - char *message = NULL; - - jclass class_class = NULL; - jmethodID get_name_id = NULL; - - jclass exception_class = NULL; - jmethodID get_message_id = NULL; - - jstring string = NULL; - - av_bprint_init(&bp, 0, AV_BPRINT_SIZE_AUTOMATIC); - - exception_class = (*env)->GetObjectClass(env, exception); - if ((*env)->ExceptionCheck(env)) { - (*env)->ExceptionClear(env); - av_log(log_ctx, AV_LOG_ERROR, "Could not find Throwable class\n"); - ret = AVERROR_EXTERNAL; - goto done; - } - - class_class = (*env)->GetObjectClass(env, exception_class); - if ((*env)->ExceptionCheck(env)) { - (*env)->ExceptionClear(env); - av_log(log_ctx, AV_LOG_ERROR, "Could not find Throwable class's class\n"); - ret = AVERROR_EXTERNAL; - goto done; - } - - get_name_id = (*env)->GetMethodID(env, class_class, "getName", "()Ljava/lang/String;"); - if ((*env)->ExceptionCheck(env)) { - (*env)->ExceptionClear(env); - av_log(log_ctx, AV_LOG_ERROR, "Could not find method Class.getName()\n"); - ret = AVERROR_EXTERNAL; - goto done; - } - - string = (*env)->CallObjectMethod(env, exception_class, get_name_id); - if ((*env)->ExceptionCheck(env)) { - (*env)->ExceptionClear(env); - av_log(log_ctx, AV_LOG_ERROR, "Class.getName() threw an exception\n"); - ret = AVERROR_EXTERNAL; - goto done; - } - - if (string) { - name = ff_jni_jstring_to_utf_chars(env, string, log_ctx); - (*env)->DeleteLocalRef(env, string); - string = NULL; - } - - get_message_id = (*env)->GetMethodID(env, exception_class, "getMessage", "()Ljava/lang/String;"); - if ((*env)->ExceptionCheck(env)) { - (*env)->ExceptionClear(env); - av_log(log_ctx, AV_LOG_ERROR, "Could not find method java/lang/Throwable.getMessage()\n"); - ret = AVERROR_EXTERNAL; - goto done; - } - - string = (*env)->CallObjectMethod(env, exception, get_message_id); - if ((*env)->ExceptionCheck(env)) { - (*env)->ExceptionClear(env); - av_log(log_ctx, AV_LOG_ERROR, "Throwable.getMessage() threw an exception\n"); - ret = AVERROR_EXTERNAL; - goto done; - } - - if (string) { - message = ff_jni_jstring_to_utf_chars(env, string, log_ctx); - (*env)->DeleteLocalRef(env, string); - string = NULL; - } - - if (name && message) { - av_bprintf(&bp, "%s: %s", name, message); - } else if (name && !message) { - av_bprintf(&bp, "%s occurred", name); - } else if (!name && message) { - av_bprintf(&bp, "Exception: %s", message); - } else { - av_log(log_ctx, AV_LOG_WARNING, "Could not retrieve exception name and message\n"); - av_bprintf(&bp, "Exception occurred"); - } - - ret = av_bprint_finalize(&bp, error); -done: - - av_free(name); - av_free(message); - - if (class_class) { - (*env)->DeleteLocalRef(env, class_class); - } - - if (exception_class) { - (*env)->DeleteLocalRef(env, exception_class); - } - - if (string) { - (*env)->DeleteLocalRef(env, string); - } - - return ret; -} - -int ff_jni_exception_check(JNIEnv *env, int log, void *log_ctx) -{ - int ret; - - jthrowable exception; - - char *message = NULL; - - if (!(*(env))->ExceptionCheck((env))) { - return 0; - } - - if (!log) { - (*(env))->ExceptionClear((env)); - return -1; - } - - exception = (*env)->ExceptionOccurred(env); - (*(env))->ExceptionClear((env)); - - if ((ret = ff_jni_exception_get_summary(env, exception, &message, log_ctx)) < 0) { - (*env)->DeleteLocalRef(env, exception); - return ret; - } - - (*env)->DeleteLocalRef(env, exception); - - av_log(log_ctx, AV_LOG_ERROR, "%s\n", message); - av_free(message); - - return -1; -} - -int ff_jni_init_jfields(JNIEnv *env, void *jfields, const struct FFJniField *jfields_mapping, int global, void *log_ctx) -{ - int i, ret = 0; - jclass last_clazz = NULL; - - for (i = 0; jfields_mapping[i].name; i++) { - int mandatory = jfields_mapping[i].mandatory; - enum FFJniFieldType type = jfields_mapping[i].type; - - if (type == FF_JNI_CLASS) { - jclass clazz; - - last_clazz = NULL; - - clazz = (*env)->FindClass(env, jfields_mapping[i].name); - if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) { - goto done; - } - - last_clazz = *(jclass*)((uint8_t*)jfields + jfields_mapping[i].offset) = - global ? (*env)->NewGlobalRef(env, clazz) : clazz; - - if (global) { - (*env)->DeleteLocalRef(env, clazz); - } - - } else { - - if (!last_clazz) { - ret = AVERROR_EXTERNAL; - break; - } - - switch(type) { - case FF_JNI_FIELD: { - jfieldID field_id = (*env)->GetFieldID(env, last_clazz, jfields_mapping[i].method, jfields_mapping[i].signature); - if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) { - goto done; - } - - *(jfieldID*)((uint8_t*)jfields + jfields_mapping[i].offset) = field_id; - break; - } - case FF_JNI_STATIC_FIELD: { - jfieldID field_id = (*env)->GetStaticFieldID(env, last_clazz, jfields_mapping[i].method, jfields_mapping[i].signature); - if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) { - goto done; - } - - *(jfieldID*)((uint8_t*)jfields + jfields_mapping[i].offset) = field_id; - break; - } - case FF_JNI_METHOD: { - jmethodID method_id = (*env)->GetMethodID(env, last_clazz, jfields_mapping[i].method, jfields_mapping[i].signature); - if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) { - goto done; - } - - *(jmethodID*)((uint8_t*)jfields + jfields_mapping[i].offset) = method_id; - break; - } - case FF_JNI_STATIC_METHOD: { - jmethodID method_id = (*env)->GetStaticMethodID(env, last_clazz, jfields_mapping[i].method, jfields_mapping[i].signature); - if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) { - goto done; - } - - *(jmethodID*)((uint8_t*)jfields + jfields_mapping[i].offset) = method_id; - break; - } - default: - av_log(log_ctx, AV_LOG_ERROR, "Unknown JNI field type\n"); - ret = AVERROR(EINVAL); - goto done; - } - - ret = 0; - } - } - -done: - if (ret < 0) { - /* reset jfields in case of failure so it does not leak references */ - ff_jni_reset_jfields(env, jfields, jfields_mapping, global, log_ctx); - } - - return ret; -} - -int ff_jni_reset_jfields(JNIEnv *env, void *jfields, const struct FFJniField *jfields_mapping, int global, void *log_ctx) -{ - int i; - - for (i = 0; jfields_mapping[i].name; i++) { - enum FFJniFieldType type = jfields_mapping[i].type; - - switch(type) { - case FF_JNI_CLASS: { - jclass clazz = *(jclass*)((uint8_t*)jfields + jfields_mapping[i].offset); - if (!clazz) - continue; - - if (global) { - (*env)->DeleteGlobalRef(env, clazz); - } else { - (*env)->DeleteLocalRef(env, clazz); - } - - *(jclass*)((uint8_t*)jfields + jfields_mapping[i].offset) = NULL; - break; - } - case FF_JNI_FIELD: { - *(jfieldID*)((uint8_t*)jfields + jfields_mapping[i].offset) = NULL; - break; - } - case FF_JNI_STATIC_FIELD: { - *(jfieldID*)((uint8_t*)jfields + jfields_mapping[i].offset) = NULL; - break; - } - case FF_JNI_METHOD: { - *(jmethodID*)((uint8_t*)jfields + jfields_mapping[i].offset) = NULL; - break; - } - case FF_JNI_STATIC_METHOD: { - *(jmethodID*)((uint8_t*)jfields + jfields_mapping[i].offset) = NULL; - break; - } - default: - av_log(log_ctx, AV_LOG_ERROR, "Unknown JNI field type\n"); - } - } - - return 0; -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/h264qpel_template.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/h264qpel_template.c deleted file mode 100644 index 27c5b8f17fea3154b8b44883361693e9645e2977..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/h264qpel_template.c +++ /dev/null @@ -1,552 +0,0 @@ -/* - * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder - * Copyright (c) 2003-2010 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavutil/common.h" -#include "libavutil/intreadwrite.h" - -#include "bit_depth_template.c" -#include "hpel_template.c" -#include "pel_template.c" - -static inline void FUNC(copy_block2)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h) -{ - int i; - for(i=0; i>= sizeof(pixel)-1;\ - srcStride >>= sizeof(pixel)-1;\ - for(i=0; i>= sizeof(pixel)-1;\ - srcStride >>= sizeof(pixel)-1;\ - for(i=0; i>= sizeof(pixel)-1;\ - srcStride >>= sizeof(pixel)-1;\ - src -= 2*srcStride;\ - for(i=0; i>= sizeof(pixel)-1;\ - srcStride >>= sizeof(pixel)-1;\ - for(i=0; i>= sizeof(pixel)-1;\ - srcStride >>= sizeof(pixel)-1;\ - for(i=0; i>= sizeof(pixel)-1;\ - srcStride >>= sizeof(pixel)-1;\ - src -= 2*srcStride;\ - for(i=0; i>= sizeof(pixel)-1;\ - srcStride >>= sizeof(pixel)-1;\ - for(i=0; i>= sizeof(pixel)-1;\ - srcStride >>= sizeof(pixel)-1;\ - for(i=0; i>= sizeof(pixel)-1;\ - srcStride >>= sizeof(pixel)-1;\ - src -= 2*srcStride;\ - for(i=0; i>5)+1)>>1) -//#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7) -#define op_put(a, b) a = CLIP(((b) + 16)>>5) -#define op2_avg(a, b) a = (((a)+CLIP(((b) + 512)>>10)+1)>>1) -#define op2_put(a, b) a = CLIP(((b) + 512)>>10) - -H264_LOWPASS(put_ , op_put, op2_put) -H264_LOWPASS(avg_ , op_avg, op2_avg) -H264_MC(put_, 2) -H264_MC(put_, 4) -H264_MC(put_, 8) -H264_MC(put_, 16) -H264_MC(avg_, 4) -H264_MC(avg_, 8) -H264_MC(avg_, 16) - -#undef op_avg -#undef op_put -#undef op2_avg -#undef op2_put diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/hevc_parser.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/hevc_parser.c deleted file mode 100644 index 59f9a0ff3e57d59fa75106e3a1adef09666b6db3..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/hevc_parser.c +++ /dev/null @@ -1,354 +0,0 @@ -/* - * HEVC Annex B format parser - * - * Copyright (C) 2012 - 2013 Guillaume Martres - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavutil/common.h" - -#include "golomb.h" -#include "hevc.h" -#include "hevc_parse.h" -#include "hevc_ps.h" -#include "hevc_sei.h" -#include "h2645_parse.h" -#include "parser.h" - -#define START_CODE 0x000001 ///< start_code_prefix_one_3bytes - -#define IS_IRAP_NAL(nal) (nal->type >= 16 && nal->type <= 23) -#define IS_IDR_NAL(nal) (nal->type == HEVC_NAL_IDR_W_RADL || nal->type == HEVC_NAL_IDR_N_LP) - -typedef struct HEVCParserContext { - ParseContext pc; - - H2645Packet pkt; - HEVCParamSets ps; - HEVCSEI sei; - - int is_avc; - int nal_length_size; - int parsed_extradata; - - int poc; - int pocTid0; -} HEVCParserContext; - -static int hevc_parse_slice_header(AVCodecParserContext *s, H2645NAL *nal, - AVCodecContext *avctx) -{ - HEVCParserContext *ctx = s->priv_data; - HEVCParamSets *ps = &ctx->ps; - HEVCSEI *sei = &ctx->sei; - GetBitContext *gb = &nal->gb; - const HEVCWindow *ow; - int i, num = 0, den = 0; - - unsigned int pps_id, first_slice_in_pic_flag, dependent_slice_segment_flag; - enum HEVCSliceType slice_type; - - first_slice_in_pic_flag = get_bits1(gb); - s->picture_structure = sei->picture_timing.picture_struct; - s->field_order = sei->picture_timing.picture_struct; - - if (IS_IRAP_NAL(nal)) { - s->key_frame = 1; - skip_bits1(gb); // no_output_of_prior_pics_flag - } - - pps_id = get_ue_golomb(gb); - if (pps_id >= HEVC_MAX_PPS_COUNT || !ps->pps_list[pps_id]) { - av_log(avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", pps_id); - return AVERROR_INVALIDDATA; - } - ps->pps = (HEVCPPS*)ps->pps_list[pps_id]->data; - - if (ps->pps->sps_id >= HEVC_MAX_SPS_COUNT || !ps->sps_list[ps->pps->sps_id]) { - av_log(avctx, AV_LOG_ERROR, "SPS id out of range: %d\n", ps->pps->sps_id); - return AVERROR_INVALIDDATA; - } - if (ps->sps != (HEVCSPS*)ps->sps_list[ps->pps->sps_id]->data) { - ps->sps = (HEVCSPS*)ps->sps_list[ps->pps->sps_id]->data; - ps->vps = (HEVCVPS*)ps->vps_list[ps->sps->vps_id]->data; - } - ow = &ps->sps->output_window; - - s->coded_width = ps->sps->width; - s->coded_height = ps->sps->height; - s->width = ps->sps->width - ow->left_offset - ow->right_offset; - s->height = ps->sps->height - ow->top_offset - ow->bottom_offset; - s->format = ps->sps->pix_fmt; - avctx->profile = ps->sps->ptl.general_ptl.profile_idc; - avctx->level = ps->sps->ptl.general_ptl.level_idc; - - if (ps->vps->vps_timing_info_present_flag) { - num = ps->vps->vps_num_units_in_tick; - den = ps->vps->vps_time_scale; - } else if (ps->sps->vui.vui_timing_info_present_flag) { - num = ps->sps->vui.vui_num_units_in_tick; - den = ps->sps->vui.vui_time_scale; - } - - if (num != 0 && den != 0) - av_reduce(&avctx->framerate.den, &avctx->framerate.num, - num, den, 1 << 30); - - if (!first_slice_in_pic_flag) { - unsigned int slice_segment_addr; - int slice_address_length; - - if (ps->pps->dependent_slice_segments_enabled_flag) - dependent_slice_segment_flag = get_bits1(gb); - else - dependent_slice_segment_flag = 0; - - slice_address_length = av_ceil_log2_c(ps->sps->ctb_width * - ps->sps->ctb_height); - slice_segment_addr = get_bitsz(gb, slice_address_length); - if (slice_segment_addr >= ps->sps->ctb_width * ps->sps->ctb_height) { - av_log(avctx, AV_LOG_ERROR, "Invalid slice segment address: %u.\n", - slice_segment_addr); - return AVERROR_INVALIDDATA; - } - } else - dependent_slice_segment_flag = 0; - - if (dependent_slice_segment_flag) - return 0; /* break; */ - - for (i = 0; i < ps->pps->num_extra_slice_header_bits; i++) - skip_bits(gb, 1); // slice_reserved_undetermined_flag[] - - slice_type = get_ue_golomb_31(gb); - if (!(slice_type == HEVC_SLICE_I || slice_type == HEVC_SLICE_P || - slice_type == HEVC_SLICE_B)) { - av_log(avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n", - slice_type); - return AVERROR_INVALIDDATA; - } - s->pict_type = slice_type == HEVC_SLICE_B ? AV_PICTURE_TYPE_B : - slice_type == HEVC_SLICE_P ? AV_PICTURE_TYPE_P : - AV_PICTURE_TYPE_I; - - if (ps->pps->output_flag_present_flag) - skip_bits1(gb); // pic_output_flag - - if (ps->sps->separate_colour_plane_flag) - skip_bits(gb, 2); // colour_plane_id - - if (!IS_IDR_NAL(nal)) { - int pic_order_cnt_lsb = get_bits(gb, ps->sps->log2_max_poc_lsb); - s->output_picture_number = ctx->poc = - ff_hevc_compute_poc(ps->sps, ctx->pocTid0, pic_order_cnt_lsb, nal->type); - } else - s->output_picture_number = ctx->poc = 0; - - if (nal->temporal_id == 0 && - nal->type != HEVC_NAL_TRAIL_N && - nal->type != HEVC_NAL_TSA_N && - nal->type != HEVC_NAL_STSA_N && - nal->type != HEVC_NAL_RADL_N && - nal->type != HEVC_NAL_RASL_N && - nal->type != HEVC_NAL_RADL_R && - nal->type != HEVC_NAL_RASL_R) - ctx->pocTid0 = ctx->poc; - - return 1; /* no need to evaluate the rest */ -} - -/** - * Parse NAL units of found picture and decode some basic information. - * - * @param s parser context. - * @param avctx codec context. - * @param buf buffer with field/frame data. - * @param buf_size size of the buffer. - */ -static int parse_nal_units(AVCodecParserContext *s, const uint8_t *buf, - int buf_size, AVCodecContext *avctx) -{ - HEVCParserContext *ctx = s->priv_data; - HEVCParamSets *ps = &ctx->ps; - HEVCSEI *sei = &ctx->sei; - int ret, i; - - /* set some sane default values */ - s->pict_type = AV_PICTURE_TYPE_I; - s->key_frame = 0; - s->picture_structure = AV_PICTURE_STRUCTURE_UNKNOWN; - - ff_hevc_reset_sei(sei); - - ret = ff_h2645_packet_split(&ctx->pkt, buf, buf_size, avctx, ctx->is_avc, - ctx->nal_length_size, AV_CODEC_ID_HEVC, 1, 0); - if (ret < 0) - return ret; - - for (i = 0; i < ctx->pkt.nb_nals; i++) { - H2645NAL *nal = &ctx->pkt.nals[i]; - GetBitContext *gb = &nal->gb; - - if (nal->nuh_layer_id > 0) - continue; - - switch (nal->type) { - case HEVC_NAL_VPS: - ff_hevc_decode_nal_vps(gb, avctx, ps); - break; - case HEVC_NAL_SPS: - ff_hevc_decode_nal_sps(gb, avctx, ps, 1); - break; - case HEVC_NAL_PPS: - ff_hevc_decode_nal_pps(gb, avctx, ps); - break; - case HEVC_NAL_SEI_PREFIX: - case HEVC_NAL_SEI_SUFFIX: - ff_hevc_decode_nal_sei(gb, avctx, sei, ps, nal->type); - break; - case HEVC_NAL_TRAIL_N: - case HEVC_NAL_TRAIL_R: - case HEVC_NAL_TSA_N: - case HEVC_NAL_TSA_R: - case HEVC_NAL_STSA_N: - case HEVC_NAL_STSA_R: - case HEVC_NAL_BLA_W_LP: - case HEVC_NAL_BLA_W_RADL: - case HEVC_NAL_BLA_N_LP: - case HEVC_NAL_IDR_W_RADL: - case HEVC_NAL_IDR_N_LP: - case HEVC_NAL_CRA_NUT: - case HEVC_NAL_RADL_N: - case HEVC_NAL_RADL_R: - case HEVC_NAL_RASL_N: - case HEVC_NAL_RASL_R: - if (ctx->sei.picture_timing.picture_struct == HEVC_SEI_PIC_STRUCT_FRAME_DOUBLING) { - s->repeat_pict = 1; - } else if (ctx->sei.picture_timing.picture_struct == HEVC_SEI_PIC_STRUCT_FRAME_TRIPLING) { - s->repeat_pict = 2; - } - ret = hevc_parse_slice_header(s, nal, avctx); - if (ret) - return ret; - break; - } - } - /* didn't find a picture! */ - av_log(avctx, AV_LOG_ERROR, "missing picture in access unit with size %d\n", buf_size); - return -1; -} - -/** - * Find the end of the current frame in the bitstream. - * @return the position of the first byte of the next frame, or END_NOT_FOUND - */ -static int hevc_find_frame_end(AVCodecParserContext *s, const uint8_t *buf, - int buf_size) -{ - HEVCParserContext *ctx = s->priv_data; - ParseContext *pc = &ctx->pc; - int i; - - for (i = 0; i < buf_size; i++) { - int nut; - - pc->state64 = (pc->state64 << 8) | buf[i]; - - if (((pc->state64 >> 3 * 8) & 0xFFFFFF) != START_CODE) - continue; - - nut = (pc->state64 >> 2 * 8 + 1) & 0x3F; - // Beginning of access unit - if ((nut >= HEVC_NAL_VPS && nut <= HEVC_NAL_EOB_NUT) || nut == HEVC_NAL_SEI_PREFIX || - (nut >= 41 && nut <= 44) || (nut >= 48 && nut <= 55)) { - if (pc->frame_start_found) { - pc->frame_start_found = 0; - return i - 5; - } - } else if (nut <= HEVC_NAL_RASL_R || - (nut >= HEVC_NAL_BLA_W_LP && nut <= HEVC_NAL_CRA_NUT)) { - int first_slice_segment_in_pic_flag = buf[i] >> 7; - if (first_slice_segment_in_pic_flag) { - if (!pc->frame_start_found) { - pc->frame_start_found = 1; - } else { // First slice of next frame found - pc->frame_start_found = 0; - return i - 5; - } - } - } - } - - return END_NOT_FOUND; -} - -static int hevc_parse(AVCodecParserContext *s, AVCodecContext *avctx, - const uint8_t **poutbuf, int *poutbuf_size, - const uint8_t *buf, int buf_size) -{ - int next; - HEVCParserContext *ctx = s->priv_data; - ParseContext *pc = &ctx->pc; - int is_dummy_buf = !buf_size; - const uint8_t *dummy_buf = buf; - - if (avctx->extradata && !ctx->parsed_extradata) { - ff_hevc_decode_extradata(avctx->extradata, avctx->extradata_size, &ctx->ps, &ctx->sei, - &ctx->is_avc, &ctx->nal_length_size, avctx->err_recognition, - 1, avctx); - ctx->parsed_extradata = 1; - } - - if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) { - next = buf_size; - } else { - next = hevc_find_frame_end(s, buf, buf_size); - if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) { - *poutbuf = NULL; - *poutbuf_size = 0; - return buf_size; - } - } - - is_dummy_buf &= (dummy_buf == buf); - - if (!is_dummy_buf) - parse_nal_units(s, buf, buf_size, avctx); - - *poutbuf = buf; - *poutbuf_size = buf_size; - return next; -} - -static void hevc_parser_close(AVCodecParserContext *s) -{ - HEVCParserContext *ctx = s->priv_data; - - ff_hevc_ps_uninit(&ctx->ps); - ff_h2645_packet_uninit(&ctx->pkt); - ff_hevc_reset_sei(&ctx->sei); - - av_freep(&ctx->pc.buffer); -} - -const AVCodecParser ff_hevc_parser = { - .codec_ids = { AV_CODEC_ID_HEVC }, - .priv_data_size = sizeof(HEVCParserContext), - .parser_parse = hevc_parse, - .parser_close = hevc_parser_close, -}; diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Roblox para PC and Discover Millions of Immersive Experiences.md b/spaces/congsaPfin/Manga-OCR/logs/Download Roblox para PC and Discover Millions of Immersive Experiences.md deleted file mode 100644 index 19bad01f2562fd1c9e2135029d0d0945b72f83ad..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Roblox para PC and Discover Millions of Immersive Experiences.md +++ /dev/null @@ -1,110 +0,0 @@ - -

Download Roblox Para PC: How to Play Roblox on Your PC

-

Roblox is one of the most popular online platforms for creating and playing games. It allows you to unleash your imagination and explore an infinite variety of immersive, user-generated 3D worlds. But did you know that you can also download Roblox para PC and enjoy it on your computer? In this article, we will show you what Roblox is, why you should download it for your PC, how to do it, and how to install and run it. Let's get started!

-

download roblox para pc


Download File ►►► https://urlca.com/2uOdIt



-

What is Roblox?

-

Roblox is more than just a game. It is a platform that lets you create, share, and play experiences with millions of people across the globe. Here are some of the features that make Roblox unique:

-

A Platform for Creating and Playing Games

-

Roblox gives you the tools and the freedom to create your own games and experiences. You can use the Roblox Studio, a powerful and easy-to-use software, to design, code, and publish your creations. You can also use thousands of free models, scripts, sounds, and textures from the Roblox library. Whether you want to make a first-person shooter, a role-playing adventure, a racing game, or anything else, you can do it on Roblox.

-

A Community of Millions of Users

-

Roblox is not only a platform for creating games, but also for playing them. You can join millions of users who are already enjoying the diverse and dynamic content that Roblox has to offer. You can play games made by other users, or invite your friends to play yours. You can also chat, voice chat, message, and interact with other players in various ways. You can even join groups, clans, teams, or communities that share your interests.

-

A Variety of Genres and Themes

-

Roblox has something for everyone. You can find games and experiences that suit your preferences, mood, or curiosity. You can explore different genres, such as action, adventure, simulation, horror, comedy, sci-fi, fantasy, and more. You can also discover different themes, such as seasonal, festive, historical, educational, or fictional. You can even find games based on your favorite movies, TV shows, books, or characters.

-

Why Download Roblox Para PC?

-

Roblox is available on multiple devices, such as smartphones, tablets, consoles, and computers. However, there are some advantages to downloading Roblox para PC and playing it on your computer. Here are some of them:

-

download roblox for pc free
-download roblox studio for pc
-download roblox apk for pc
-download roblox games for pc
-download roblox app for pc
-download roblox player for pc
-download roblox online for pc
-download roblox offline for pc
-download roblox on pc windows 10
-download roblox on pc windows 7
-download roblox on pc windows 8
-download roblox on pc windows xp
-download roblox on pc mac
-download roblox on pc linux
-download roblox on pc chromebook
-how to download roblox para pc
-how to download roblox para pc gratis
-how to download roblox para pc sin virus
-how to download roblox para pc rapido
-how to download roblox para pc facil
-how to download roblox para pc 2023
-how to download roblox para pc 2022
-how to download roblox para pc 2021
-how to download roblox para pc 2020
-how to download roblox para pc 2019
-como descargar roblox para pc
-como descargar roblox para pc gratis
-como descargar roblox para pc sin virus
-como descargar roblox para pc rapido
-como descargar roblox para pc facil
-como descargar roblox para pc 2023
-como descargar roblox para pc 2022
-como descargar roblox para pc 2021
-como descargar roblox para pc 2020
-como descargar roblox para pc 2019
-baixar roblox para pc
-baixar roblox para pc gratis
-baixar roblox para pc sem virus
-baixar roblox para pc rapido
-baixar roblox para pc facil
-baixar roblox para pc 2023
-baixar roblox para pc 2022
-baixar roblox para pc 2021
-baixar roblox para pc 2020
-baixar roblox para pc 2019

-

Better Performance and Graphics

-

Playing Roblox on your PC can give you a smoother and more immersive experience. You can enjoy faster loading times, higher frame rates, better resolution, and more detail. You can also adjust the graphics settings to suit your preferences and your device's capabilities.

-

More Control and Customization

-

Playing Roblox on your PC can give you more control and customization options. You can use your keyboard and mouse to navigate the interface and play the games. You can also use other devices or accessories that are compatible with your PC. You can also customize your settings, such as audio volume, chat filters, language preferences etc.

-

Access to Exclusive Features and Games

-

Playing Rob lox on your PC can give you access to exclusive features and games that are not available on other devices. For example, you can use the Roblox Studio to create and edit your own games on your PC. You can also play some games that are only compatible with PC, such as Phantom Forces, Arsenal, or Jailbreak.

-

How to Download Roblox Para PC?

-

Downloading Roblox para PC is easy and free. You can do it from different sources, depending on your preference. Here are some of the options:

-

From the Official Website

-

The official website of Roblox is the most reliable and secure source to download Roblox para PC. You can follow these steps:

-
    -
  1. Go to https://www.roblox.com on your web browser.
  2. -
  3. Click on the "Download" button on the top right corner of the homepage.
  4. -
  5. Select your operating system (Windows or Mac) and click on the "Download Now" button.
  6. -
  7. Save the file to your desired location on your PC.
  8. -
-

From the Microsoft Store

-

If you have a Windows 10 device, you can also download Roblox para PC from the Microsoft Store. You can follow these steps:

-
    -
  1. Go to https://www.microsoft.com/en-us/p/roblox/9nblgggzm6wm on your web browser.
  2. -
  3. Click on the "Get" button and sign in with your Microsoft account.
  4. -
  5. Click on the "Install" button and wait for the download to complete.
  6. -
  7. Open the Microsoft Store app on your PC and launch Roblox from there.
  8. -
-

From Other Sources

-

You can also find other sources to download Roblox para PC, such as third-party websites or platforms. However, you should be careful and cautious when using these sources, as they may not be safe or authorized by Roblox. You should always check the reviews, ratings, and reputation of these sources before downloading anything from them. You should also scan the files for viruses or malware before installing them on your PC.

-

How to Install and Run Roblox Para PC?

-

Once you have downloaded Roblox para PC from your preferred source, you need to install and run it on your PC. You can follow these steps:

-

Follow the Instructions on the Screen

-

Double-click on the downloaded file and follow the instructions on the screen to install Roblox para PC. You may need to agree to the terms and conditions, choose a destination folder, and create a shortcut icon. The installation process should take only a few minutes.

-

Launch the Roblox Player or Studio

-

After installing Roblox para PC, you can launch it from your desktop, start menu, or taskbar. You can choose between two options: the Roblox Player or the Roblox Studio. The Roblox Player allows you to play games and experiences created by other users or yourself. The Roblox Studio allows you to create and edit your own games and experiences.

-

Log in or Sign up for an Account

-

To access all the features and content of Roblox, you need to have an account. If you already have one, you can log in with your username and password. If you don't have one, you can sign up for free with your email address, date of birth, and gender. You can also customize your avatar, username, password, and profile settings later.

-

Conclusion

-

In conclusion, downloading Roblox para PC is a great way to enjoy this amazing platform on your computer. You can create and play games with millions of users across the world, with better performance, graphics, control, customization, and access to exclusive features and games. You can download Roblox para PC from different sources, such as the official website, the Microsoft Store, or other platforms. You can also install and run it easily by following some simple steps. What are you waiting for? Download Roblox para PC today and unleash your creativity!

-

Frequently Asked Questions

-
    -
  • Is Roblox para PC free?
  • -
  • Yes, Roblox para PC is free to download, install, and play. However, some games or items may require you to spend real money or virtual currency (Robux) to access them.
  • -
  • Is Roblox para PC safe?
  • -
  • Yes, Roblox para PC is safe if you download it from a reliable and secure source, such as the official website or the Microsoft Store. You should also scan the files for viruses or malware before installing them on your PC. You should also be careful when playing games or interacting with other users, as some of them may be inappropriate or harmful.
  • -
  • Can I play Roblox para PC offline?
  • -
  • No, Roblox para PC requires an internet connection to play. You need to be online to access the games and experiences, as well as your account and settings. However, you can use the Roblox Studio offline to create and edit your own games and experiences.
  • -
  • Can I play Roblox para PC with other devices?
  • -
  • Yes, Roblox para PC is cross-platform compatible, which means you can play with other users who are using different devices, such as smartphones, tablets, consoles, or computers. However, some games or features may not be available or compatible with all devices.
  • -
  • How can I contact Roblox para PC support?
  • -
  • If you have any questions, issues, or feedback regarding Roblox para PC, you can contact the Roblox support team by visiting https://en.help.roblox.com/hc/en-us and submitting a request. You can also find answers to common questions and problems on the help center.
  • -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/God of War 2 APK The Ultimate Guide to Install and Enjoy the Game on Android.md b/spaces/congsaPfin/Manga-OCR/logs/God of War 2 APK The Ultimate Guide to Install and Enjoy the Game on Android.md deleted file mode 100644 index c67488bafb391a5e29c897deaa4ed4f8d00a65fd..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/God of War 2 APK The Ultimate Guide to Install and Enjoy the Game on Android.md +++ /dev/null @@ -1,71 +0,0 @@ -
-

God of War 2 APK: How to Play the Epic Action-Adventure Game on Your Android Device

-

God of War 2 is one of the most acclaimed video games of all time, and for good reasons. It is a thrilling action-adventure game that follows the story of Kratos, a former god of war who seeks revenge against Zeus, the king of the Olympian gods. The game features stunning graphics, epic boss battles, engaging puzzles, and a captivating plot based on Greek mythology.

-

god of war 2 apk


DOWNLOADhttps://urlca.com/2uO7Du



-

But what if you want to play this masterpiece on your Android device? Is it possible? The answer is yes, thanks to God of War 2 APK, a modified version of the game that allows you to run it on your smartphone or tablet. In this article, we will show you how to download and install God of War 2 APK on your Android device, and how to play it with some tips and tricks. Let's get started!

-

How to Download and Install God of War 2 APK on Your Android Device

-

To play God of War 2 APK on your Android device, you will need three things: a game ISO file, a PlayStation BIOS file, and a PS emulator app. Here are the steps to follow:

-
    -
  1. Download the game ISO file from this link. This is a compressed file that contains the data of the game. You will need to extract it using an app like ZArchiver.
  2. -
  3. Download the PlayStation BIOS file from this link. This is a file that contains the firmware of the PlayStation console. You will need to copy it to the PS emulator folder.
  4. -
  5. Download and install Demon PS2 Emulator from the Google Play Store. This is an app that simulates the PlayStation console on your Android device. You will need to grant it some permissions and configure some settings.
  6. -
  7. Open ZArchiver and locate the game ISO file that you downloaded. Tap on it and select "Extract here". You will get a folder named "God Of War II PC Setup [CYBERS-TEAM]". Open it and copy the file named "God Of War II.iso".
  8. -
  9. Open Demon PS2 Emulator and tap on the menu icon at the top left corner. Select "BIOS" and then "Import BIOS". Locate the PlayStation BIOS file that you downloaded and tap on it. You will see a message saying "Import BIOS successfully".
  10. -
  11. Go back to the main screen of Demon PS2 Emulator and tap on "Unsorted". You will see a list of games that are available on your device. Tap on "God Of War II.iso" and select "Run Game". The game will start loading.
  12. -
-

Congratulations! You have successfully downloaded and installed God of War 2 APK on your Android device. Now you can enjoy this amazing game anytime, anywhere.

-

How to Play God of War 2 APK on Your Android Device

-

Playing God of War 2 APK on your Android device is not very different from playing it on a PlayStation console. You will see a virtual controller on your screen that mimics the buttons and joysticks of the original controller. You can use them to move, attack, dodge, jump, interact, and perform other actions in the game.

-

god of war 2 apk download for android
-god of war 2 apk + obb
-god of war 2 apk mod
-god of war 2 apk offline
-god of war 2 apk highly compressed
-god of war 2 apk and data
-god of war 2 apk free download
-god of war 2 apk ppsspp
-god of war 2 apk ps2 emulator
-god of war 2 apk revdl
-god of war 2 apk android game
-god of war 2 apk demon ps emulator
-god of war 2 apk zarchiver
-god of war 2 apk pinkvilla
-god of war 2 apk india fantasy
-god of war 2 apk combo
-god of war 2 apk play store
-god of war 2 apk full version
-god of war 2 apk no verification
-god of war 2 apk rexdl
-god of war 2 apk latest version
-god of war 2 apk iso file
-god of war 2 apk bios files
-god of war 2 apk game download
-god of war 2 apk how to install
-god of war 2 apk phone configurations
-god of war 2 apk minimum requirements
-god of war 2 apk recommended requirements
-god of war 2 apk gameplay
-god of war 2 apk graphics
-god of war 2 apk features
-god of war 2 apk cheats
-god of war 2 apk tips and tricks
-god of war 2 apk walkthrough
-god of war 2 apk review
-god of war 2 apk rating
-god of war 2 apk trailer
-god of war 2 apk story
-god of war 2 apk characters
-god of war 2 apk weapons
-god of war 2 apk puzzles
-god of war 2 apk monsters
-god of war 2 apk boss battles
-god of war 2 apk greek mythology
-god of war 2 apk olympus
-god of war 2 apk kratos
-god of war 2 apk athena's sword
-god of war 2 apk net energy gain

-

However, there are some tips and tricks that can help you improve your gaming experience and make it more enjoyable. Here are some of them:

-
-

To troubleshoot this problem, you can try the following solutions:

-
    -
  • Try to upgrade your computer system or hardware to meet the minimum system requirements for running Docrepair v3.10 Build 0710 smoothly and efficiently.
  • -
  • Try to free up some memory or disk space on your computer by closing other programs or deleting unnecessary files before using Docrepair v3.10 Build 0710 to recover your corrupted Word documents.
  • -
  • Try to scan and fix any hardware or software issues on your computer by using a system repair tool or a system update tool before using Docrepair v3.10 Build 0710 to recover your corrupted Word documents.
  • -
  • Try to scan and remove any virus or malware on your computer by using an antivirus software or an anti-malware software before using Docrepair v3.10 Build 0710 to recover your corrupted Word documents.
  • -
-

Problem 3: Docrepair v3.10 Build 0710 shows an error message or a warning message

-

Sometimes Docrepair v3.10 Build 0710 may show an error message or a warning message during the recovery process due to various reasons, such as:

-
    -
  • Your registration key for Docrepair v3.10 Build 0710 is invalid or expired and Docrepair v3.10 Build 0710 cannot activate its full features.
  • -
  • Your corrupted Word documents are not supported by Docrepair v3.10 Build 0710 and Docrepair v3.10 Build 0710 cannot recover them.
  • -
  • Your recovered Word documents are corrupted or damaged again by other factors and Docrepair v3.10 Build 0710 cannot open them.
  • -
  • Your recovered Word documents are incompatible with your current version of Microsoft Word and Docrepair v3.10 Build 0710 cannot display them correctly.
  • -
-

To troubleshoot this problem, you can try the following solutions:

-
    -
  • Try to check and verify your registration key for Docrepair v3.10 Build 0710 by contacting the customer support of the software at support@jufsoft.com or by visiting the official website of the software at https://www.jufsoft.com/docrepair/.
  • -
  • Try to check and verify the file type and format of your corrupted Word documents by checking their file extensions or opening them with a hex editor before using Docrepair v3.10 Build 0710 to recover them.
  • -
  • Try to protect your recovered Word documents from being corrupted or damaged again by saving them in a safe location, making backup copies of them, and scanning them with an antivirus software or an anti-malware software before opening them.
  • -
  • Try to update your current version of Microsoft Word to the latest version or use a compatible version of Microsoft Word to open your recovered Word documents.
  • -
-

Conclusion

-

In conclusion, Docrepair v3.10 Build 0710 is a powerful and professional tool that can help you recover corrupted Word documents easily and quickly.

-

It can recover text, formatting, images, tables, charts, hyperlinks, bookmarks, headers, footers, comments, and other elements from your corrupted Word documents in various formats, such as DOC, DOCX, RTF, and TXT.

-

However, in order to use Docrepair v3.10 Build 0710 effectively, you need a valid registration key that can activate the full features of the software.

-

You can get a registration key for Docrepair v3.10 Build 0710 by purchasing a license for the software from its official website at https://www.jufsoft.com/docrepair/order.html.

-

You can also download and install Docrepair v3.10 Build 0710 on your computer from its official website at https://www.jufsoft.com/docrepair/.

-

After activating Docrepair v3.10 Build 0710 with a registration key, you can use it to recover your corrupted Word documents by following these steps:

-
    -
  1. Launch Docrepair v3.10 Build 0710 and select the corrupted Word documents.
  2. -
  3. Choose the recovery mode and options for Docrepair v3.10 Build 0710.
  4. -
  5. Preview and save the recovered Word documents with Docrepair v3.10 Build 0710.
  6. -
-

If you encounter any problems or errors with Docrepair v3.10 Build 0710, you can troubleshoot them by following the solutions we have provided in this article.

-

We hope this article has helped you understand how to use Docrepair v3.10 Build 0710 to recover corrupted Word documents easily and quickly.

-

If you have any questions or feedback about Docrepair v3.10 Build 0710, please feel free to contact us at support@jufsoft.com or leave a comment below.

-

Frequently Asked Questions

-

Here are some frequently asked questions about Docrepair v3.10 Build 0710 and their answers:

- 0710? -

A1: The price of a registration key for Docrepair v3.10 Build 0710 depends on the type of license you choose. There are different types of licenses available for Docrepair v3.10 Build 0710, such as personal license, business license, site license, etc., depending on your needs and budget. You can check the price and features of each license type at https://www.jufsoft.com/docrepair/order.html.

-

Q2: How long does it take to receive a registration key for Docrepair v3.10 Build 0710 after making a payment?

-

A2: It usually takes only a few minutes to receive a registration key for Docrepair v3.10 Build 0710 after making a payment online via credit card or PayPal. You will receive an email confirmation with your registration key within minutes. However, in some rare cases, it may take up to 24 hours to receive your registration key due to some technical or security issues. If you do not receive your registration key within 24 hours, please contact the customer support of the software at support@jufsoft.com or visit the official website of the software at https://www.jufsoft.com/docrepair/.

-

Q3: How many corrupted Word documents can I recover with Docrepair v3.10 Build 0710?

-

A3: There is no limit to the number of corrupted Word documents that you can recover with Docrepair v3.10 Build 0710. You can recover as many corrupted Word documents as you want with Docrepair v3.10 Build 0710 as long as you have a valid registration key for the software.

-

Q4: What are the system requirements for running Docrepair v3.10 Build 0710?

-

A4: The minimum system requirements for running Docrepair v3.10 Build 0710 are:

-
    -
  • Windows 95/NT/98/2000/XP/Vista/7/8/8.1 (32-bit/64-bit)
  • -
  • 32 MB RAM
  • -
  • 2 MB free disk space
  • -
-

The recommended system requirements for running Docrepair v3.10 Build 0710 are:

-
    -
  • Windows XP/Vista/7/8/8.1 (32-bit/64-bit)
  • -
  • 64 MB RAM or more
  • -
  • 10 MB free disk space or more
  • -
-

Q5: What are the supported file formats and versions for Docrepair v3.10 Build 0710?

-

A5: The supported file formats and versions for Docrepair v3.10 Build 0710 are:

-
    -
  • DOC - Microsoft Word Document (Word 6.0/95/97/2000/XP/2003)
  • -
  • DOCX - Microsoft Word Document (Word 2007/2010/2013)
  • -
  • RTF - Rich Text Format (WordPad)
  • -
  • TXT - Plain Text Format (Notepad)
  • -
-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Download Maari Tamil Movie in Utorrent and Enjoy the Thrilling Story of a Local Don and His Love Interest.md b/spaces/raedeXanto/academic-chatgpt-beta/Download Maari Tamil Movie in Utorrent and Enjoy the Thrilling Story of a Local Don and His Love Interest.md deleted file mode 100644 index fbb1eeb218f5520dff42a41facad5f006a0b9792..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Download Maari Tamil Movie in Utorrent and Enjoy the Thrilling Story of a Local Don and His Love Interest.md +++ /dev/null @@ -1,121 +0,0 @@ -
-

Maari Tamil Movie Download in Utorrent: How to Watch Online for Free

-

If you are a fan of Tamil movies, you might have heard of Maari, a comedy action masala film starring Dhanush and Kajal Aggarwal. The film was released in 2015 and became a hit among the audience and critics. The film has a sequel, Maari 2, which was released in 2018 and also received positive reviews.

-

maari tamil movie download in utorrent


Download Ziphttps://tinourl.com/2uL3eS



-

But what if you missed watching Maari or Maari 2 in theatres or on TV? Or what if you want to watch them again at your convenience? You might be tempted to download them in utorrent, a popular torrent client that allows you to download files from peer-to-peer networks. But is it safe and legal to do so? And are there any alternatives to downloading Maari in utorrent?

-

In this article, we will answer these questions and more. We will tell you what Maari is, why it is popular, how to download it in utorrent, what are the benefits and risks of doing so, and what are some other ways to watch Maari online for free.

-

Introduction

-

What is Maari?

-

Maari is a Tamil movie directed by Balaji Mohan and produced by Dhanush under his banner Wunderbar Films. The movie stars Dhanush as Maari, a local don who is notorious for his pigeon racing and rowdy activities. Kajal Aggarwal plays Sridevi, an aspiring fashion designer who falls in love with Maari. The movie also features Vijay Yesudas as Arjun Kumar, a police officer who wants to arrest Maari, Robo Shankar as Sanikilamai, Maari's loyal sidekick, and Kaali Venkat as Robert, a rival don.

-

The movie revolves around the conflict between Maari and Arjun Kumar, who tries to expose Maari's illegal activities and bring him to justice. The movie also explores the romance between Maari and Sridevi, who tries to change Maari's ways. The movie has a lot of comedy, action, drama, and music that make it an entertaining watch.

-

Why is Maari popular?

-

Maari is popular for many reasons. Some of them are:

-
    -
  • The movie has a catchy and upbeat soundtrack composed by Anirudh Ravichander, who also sang some of the songs. The songs like "Maari Thara Local", "Donu Donu Donu", and "Bagulu Odayum Dagulu Mari" became chartbusters and were loved by the fans.
  • -
  • The movie has a lot of humor and witty dialogues that make the audience laugh. Dhanush's performance as Maari is hilarious and charismatic. He delivers some punchlines that became viral on social media. Robo Shankar and Kaali Venkat also provide comic relief with their antics.
  • -
  • The movie has some thrilling action sequences that showcase Dhanush's skills as a stuntman. He performs some stunts like jumping from buildings, fighting with goons, and racing pigeons without using any body doubles or special effects.
  • -
  • The movie has a simple and engaging story that keeps the audience hooked. The movie has some twists and turns that surprise the audience. The movie also has some emotional moments that touch the audience's hearts.
  • -
-

How to download Maari in utorrent?

-

If you want to download Maari in utorrent, you need to follow these steps:

-
    -
  1. Download and install utorrent on your device from its official website or any other trusted source.
  2. -
  3. Search for "maari tamil movie download in utorrent" on any torrent search engine like LimeTorrents or Torrentz2.
  4. -
  5. Select a torrent file that has a high number of seeders and leechers. Seeders are users who have the complete file and are sharing it with others. Leechers are users who are downloading the file but have not completed it yet.
  6. -
  7. Click on the torrent file or copy its magnet link. A magnet link is a URL that contains information about the file such as its name, size, hash value, etc.
  8. -
  9. Paste the magnet link or open the torrent file in utorrent. The download will start automatically.
  10. -
  11. Wait for the download to finish. You can check the progress of the download on utorrent's interface.
  12. -
  13. Once the download is complete, you can open the file and watch it on your device using any media player that supports MKV format.
  14. -
-

Benefits of downloading Maari in utorrent

-

High quality and fast speed

-

One of the benefits of downloading Maari in utorrent is that you can get high-quality video and audio files that enhance your viewing experience. You can choose from different resolutions like 1080p or 720p depending on your device's compatibility and preference. You can also get files with Dolby Digital or AAC audio formats that provide clear sound quality.

-

maari tamil movie tc x264 700mb torrent download
-maari tamil hq hdrip 1080p x264 dd+5.1 192kbps
-maari tamil movie songs lyrics download
-maari tamil full movie watch online hotstar
-maari 2 tamil movie yuvan shankar raja songs
-maari tamil movie isaimini dubbed movies
-maari tamil movie bagulu odayum dagulu mari song
-maari tamil movie donu donu donu mp3 download
-maari 2 tamil movie dhanush and sai pallavi
-maari tamil movie thappa dhaan theriyum lyrics
-maari tamil movie oru vidha aasai song download
-maari 2 tamil movie rowdy baby video song
-maari tamil movie maari thara local song mp3
-maari 2 tamil movie release date and trailer
-maari tamil movie review and rating by critics
-maari 2 tamil movie box office collection report
-maari tamil movie cast and crew details
-maari 2 tamil movie director balaji mohan interview
-maari tamil movie online streaming platforms list
-maari 2 tamil movie songs ringtone download
-maari tamil movie comedy scenes download
-maari 2 tamil movie climax fight scene hd
-maari tamil movie subtitles download in english
-maari 2 tamil movie bgm download masstamilan
-maari tamil movie anirudh ravichander music director
-maari 2 tamil movie kajal aggarwal cameo appearance
-maari tamil movie robosankar and kalloori vinoth comedy
-maari 2 tamil movie varalaxmi sarathkumar villain role
-maari tamil movie dhanush hairstyle and sunglasses
-maari 2 tamil movie making video behind the scenes
-maari tamil movie sridevi character actress name
-maari 2 tamil movie vijay yesudas police role
-maari tamil movie theme music download mp3
-maari 2 tamil movie whatsapp status video download
-maari tamil movie memes and trolls on social media
-maari 2 tamil movie awards and nominations list
-maari tamil movie shooting locations and sets
-maari 2 tamil movie deleted scenes and bloopers
-maari tamil movie poster and wallpapers download hd
-maari 2 tamil movie teaser and motion poster youtube

-

Another benefit of downloading Maari in utorrent is that you can get fast download speed that saves your time and bandwidth. Utorrent uses peer-to-peer technology that allows you to download files from multiple sources simultaneously. This increases the speed of the download as well as reduces the load on any single source.

-

No ads or malware

-

Another benefit of downloading Maari in utorrent is that you can avoid annoying ads or malware that might interrupt your viewing experience or harm your device. Utorrent does not show any ads or pop-ups while downloading or playing files. Utorrent also scans files for viruses or malware before downloading them and warns you if any are detected.

-

Save money and time

-

Another benefit of downloading Maari in utorrent is that you can save money and time that you might otherwise spend on buying or renting DVDs or Blu-ray discs or subscribing to streaming platforms or OTT services. Utorrent allows you to download files for free without any registration or subscription fees. Utorrent also allows you to download files at any time without any restrictions or limitations.

-

Risks of downloading Maari in utorrent

-

Legal issues and copyright infringement

-

One of the risks of downloading Maari in utorrent is that you might face legal issues and copyright infringement charges if you are caught by authorities or rights holders. Downloading or sharing copyrighted content without permission or authorization is illegal in many countries and regions. You might face fines, lawsuits, imprisonment, or other penalties if you are found guilty of piracy.

-

Privacy and security threats

-

Another risk of downloading Maari in utorrent is that you might expose your privacy and security to threats from hackers, trackers, spyware, malware, etc. Utorrent does not encrypt your data or hide your IP address while downloading or uploading files. This means that anyone can see what files you are downloading or sharing, where you are located, what device you are using, etc. This information can be used by hackers to steal your identity, data, money, etc., or by trackers to monitor your online activity, behavior, preferences, etc.

-

Ethical and moral concerns

-

Another risk of downloading Maari in utorrent is that you might violate ethical and moral principles by depriving the creators and producers of their rightful earnings and recognition. Downloading or sharing pirated content means that you are not paying for their hard work, creativity, talent, etc., or supporting their livelihoods. This might affect their motivation, quality, quantity, etc., of their future works.

-

Alternatives to downloading Maari in utorrent

-

Streaming platforms and OTT services

-

One of the alternatives to downloading Maari in utorrent is to watch it on streaming platforms and OTT services that offer Tamil movies. Some of them are:

-
    -
  • Netflix: Netflix is a global streaming service that offers a wide range of movies, shows, documentaries, and originals in various languages and genres. You can watch Maari and Maari 2 on Netflix with a subscription plan that starts from Rs 199 per month.
  • -
  • Prime Video: Prime Video is an online video service that offers movies, shows, sports, and originals in various languages and genres. You can watch Maari and Maari 2 on Prime Video with a subscription plan that starts from Rs 129 per month or Rs 999 per year.
  • -
  • Zee5: Zee5 is a digital entertainment platform that offers movies, shows, live TV, music, and originals in various languages and genres. You can watch Maari and Maari 2 on Zee5 with a subscription plan that starts from Rs 99 per month or Rs 499 per year.
  • -
  • Hotstar: Hotstar is an online video service that offers movies, shows, sports, news, and originals in various languages and genres. You can watch Maari and Maari 2 on Hotstar with a subscription plan that starts from Rs 299 per month or Rs 1499 per year.
  • -
  • SonyLIV: SonyLIV is a video-on-demand service that offers movies, shows, sports, live TV, and originals in various languages and genres. You can watch Maari and Maari 2 on SonyLIV with a subscription plan that starts from Rs 299 per month or Rs 999 per year.
  • -
-

Legal torrent sites and magnet links

-

Another alternative to downloading Maari in utorrent is to use legal torrent sites and magnet links that offer Tamil movies. Some of them are:

-
    -
  • Tamilrockers: Tamilrockers is a torrent site that offers Tamil movies, shows, music, and web series for free download. You can find Maari and Maari 2 on Tamilrockers with magnet links that you can paste on utorrent or any other torrent client.
  • -
  • LimeTorrents: LimeTorrents is a torrent site that offers movies, shows, music, games, software, and anime for free download. You can find Maari and Maari 2 on LimeTorrents with torrent files or magnet links that you can open or paste on utorrent or any other torrent client.
  • -
  • Torrentz2: Torrentz2 is a torrent search engine that indexes torrents from various sources. You can search for Maari and Maari 2 on Torrentz2 and find torrent files or magnet links from different torrent sites that you can open or paste on utorrent or any other torrent client.
  • -
-

DVD and Blu-ray discs

-

Another alternative to downloading Maari in utorrent is to buy or rent DVD or Blu-ray discs of the movie. You can find DVD or Blu-ray discs of Maari and Maari 2 on online platforms like Amazon or Flipkart or offline stores like Landmark or Reliance Digital. You can watch the movie on your device using a DVD or Blu-ray player.

-

Conclusion

-

In conclusion, Maari is a Tamil movie that is popular for its comedy, action, music, and story. You can download it in utorrent using torrent files or magnet links from various torrent sites. However, this might involve some risks like legal issues, privacy threats, ethical concerns, etc. Therefore, you might want to consider some alternatives like streaming platforms, OTT services, legal torrent sites, or DVD/Blu-ray discs.

-

FAQs

-
    -
  1. What is the rating of Maari?
  2. -

    Maari has a rating of 6/10 on IMDb and 3/5 on Times of India.

    -
  3. Who composed the music of Maari?
  4. -

    Anirudh Ravichander composed the music of Maari.

    -
  5. Who directed the sequel of Maari?
  6. -

    Balaji Mohan directed the sequel of Maari.

    -
  7. Who played the female lead in Maari 2?
  8. -

    Sai Pallavi played the female lead in Maari 2.

    -
  9. Is there a third part of Maari?
  10. -

    No, there is no official confirmation of a third part of Maari.

    -
-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/rahul999r/Rahul_Kannada_TTS/src/glow_tts/texttospeech.py b/spaces/rahul999r/Rahul_Kannada_TTS/src/glow_tts/texttospeech.py deleted file mode 100644 index 3c88925cac0c56e52d35acfa5d6d7e5ce51329c7..0000000000000000000000000000000000000000 --- a/spaces/rahul999r/Rahul_Kannada_TTS/src/glow_tts/texttospeech.py +++ /dev/null @@ -1,146 +0,0 @@ -from __future__ import absolute_import, division, print_function, unicode_literals -from typing import Tuple - -from scipy.io.wavfile import write -from hifi.env import AttrDict -from hifi.models import Generator - -import numpy as np -import os -import json - -import torch -from text import text_to_sequence -import commons -import models -import utils -import sys -from argparse import ArgumentParser - - -def check_directory(dir): - if not os.path.exists(dir): - sys.exit("Error: {} directory does not exist".format(dir)) - - -class TextToMel: - def __init__(self, glow_model_dir, device="cuda"): - self.glow_model_dir = glow_model_dir - check_directory(self.glow_model_dir) - self.device = device - self.hps, self.glow_tts_model = self.load_glow_tts() - pass - - def load_glow_tts(self): - hps = utils.get_hparams_from_dir(self.glow_model_dir) - checkpoint_path = utils.latest_checkpoint_path(self.glow_model_dir) - symbols = list(hps.data.punc) + list(hps.data.chars) - glow_tts_model = models.FlowGenerator( - len(symbols) + getattr(hps.data, "add_blank", False), - out_channels=hps.data.n_mel_channels, - **hps.model - ) # .to(self.device) - - if self.device == "cuda": - glow_tts_model.to("cuda") - - utils.load_checkpoint(checkpoint_path, glow_tts_model) - glow_tts_model.decoder.store_inverse() - _ = glow_tts_model.eval() - - return hps, glow_tts_model - - def generate_mel(self, text, noise_scale=0.667, length_scale=1.0): - symbols = list(self.hps.data.punc) + list(self.hps.data.chars) - cleaner = self.hps.data.text_cleaners - if getattr(self.hps.data, "add_blank", False): - text_norm = text_to_sequence(text, symbols, cleaner) - text_norm = commons.intersperse(text_norm, len(symbols)) - else: # If not using "add_blank" option during training, adding spaces at the beginning and the end of utterance improves quality - text = " " + text.strip() + " " - text_norm = text_to_sequence(text, symbols, cleaner) - - sequence = np.array(text_norm)[None, :] - - if self.device == "cuda": - x_tst = torch.autograd.Variable(torch.from_numpy(sequence)).cuda().long() - x_tst_lengths = torch.tensor([x_tst.shape[1]]).cuda() - else: - x_tst = torch.autograd.Variable(torch.from_numpy(sequence)).long() - x_tst_lengths = torch.tensor([x_tst.shape[1]]) - - with torch.no_grad(): - (y_gen_tst, *_), *_, (attn_gen, *_) = self.glow_tts_model( - x_tst, - x_tst_lengths, - gen=True, - noise_scale=noise_scale, - length_scale=length_scale, - ) - - return y_gen_tst - #return y_gen_tst.cpu().detach().numpy() - - -class MelToWav: - def __init__(self, hifi_model_dir, device="cuda"): - self.hifi_model_dir = hifi_model_dir - check_directory(self.hifi_model_dir) - self.device = device - self.h, self.hifi_gan_generator = self.load_hifi_gan() - pass - - def load_hifi_gan(self): - checkpoint_path = utils.latest_checkpoint_path(self.hifi_model_dir, regex="g_*") - config_file = os.path.join(self.hifi_model_dir, "config.json") - data = open(config_file).read() - json_config = json.loads(data) - h = AttrDict(json_config) - torch.manual_seed(h.seed) - - generator = Generator(h).to(self.device) - - assert os.path.isfile(checkpoint_path) - print("Loading '{}'".format(checkpoint_path)) - state_dict_g = torch.load(checkpoint_path, map_location=self.device) - print("Complete.") - - generator.load_state_dict(state_dict_g["generator"]) - - generator.eval() - generator.remove_weight_norm() - - return h, generator - - def generate_wav(self, mel): - #mel = torch.FloatTensor(mel).to(self.device) - - y_g_hat = self.hifi_gan_generator(mel.to(self.device)) # passing through vocoder - audio = y_g_hat.squeeze() - audio = audio * 32768.0 - audio = audio.cpu().detach().numpy().astype("int16") - - return audio, self.h.sampling_rate - - - - - -if __name__ == "__main__": - - parser = ArgumentParser() - parser.add_argument("-m", "--model", required=True, type=str) - parser.add_argument("-g", "--gan", required=True, type=str) - parser.add_argument("-d", "--device", type=str, default="cpu") - parser.add_argument("-t", "--text", type=str, required=True) - parser.add_argument("-w", "--wav", type=str, required=True) - - args = parser.parse_args() - - text_to_mel = TextToMel(glow_model_dir=args.model, device=args.device) - mel_to_wav = MelToWav(hifi_model_dir=args.gan, device=args.device) - - mel = text_to_mel.generate_mel(args.text) - audio, sr = mel_to_wav.generate_wav(mel) - - write(filename=args.wav, rate=sr, data=audio) \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/ACDSee.Pro.v6.2.212.x64.Incl.Keymaker-CORE Serial Key Keygen Extra Quality.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/ACDSee.Pro.v6.2.212.x64.Incl.Keymaker-CORE Serial Key Keygen Extra Quality.md deleted file mode 100644 index 78f26a0a949a788dbcf0d4eefc4ed598fff786e1..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/ACDSee.Pro.v6.2.212.x64.Incl.Keymaker-CORE Serial Key Keygen Extra Quality.md +++ /dev/null @@ -1,6 +0,0 @@ -

ACDSee.Pro.v6.2.212.x64.Incl.Keymaker-CORE Serial Key Keygen


DOWNLOAD ✪✪✪ https://urlgoal.com/2uCJHy



-
-5 item. ACDSee.Pro.v6.2.212.x64.Incl.Keymaker-CORE Serial Key. 5 item. ACDSee.Pro.v6.2.212.x64.Incl.Keymaker-CORE Serial Key Keygen. 5 item. ACDSee.Pro.v6.2.212.x64.Incl.Keymaker-CORE Serial Key. 5 item. ACDSee.Pro.v6.2.212.x64.Incl.Keymaker-CORE Serial Key Keygen High Quality. 5 item. ACDSee.Pro.v6.2.212.x64.Incl.Keymaker-CORE Serial Key Keygen High Quality. 5 item. ACDSee.Pro.v6.2.212.x64.Incl.Keymaker-CORE Serial Key Keygen High Quality. 5 item. ACDSee.Pro.v6.2.212.x64.Incl.Keymaker-CORE Serial Key Keygen High Quality. 5 item. ACDSee.Pro.v6.2.212.x64.Incl.Keymaker-CORE Serial Key Keygen High Quality. 5 item. ACDSee.Pro.v6.2.212.x64.Incl.Keymaker-CORE Serial Key Keygen High Quality. 5 item. ACDSee.Pro.v6.2.212.x64.Incl.Keymaker-CORE Serial Key Keygen High Quality. 5 item. ACDSee.Pro.v6.2.212.x64.Incl.Keymaker-CORE Serial Key Keygen High Quality. 5 item. ACDSee.Pro.v6.2.212.x64.Incl.Keymaker-CORE Serial Key Keygen High Quality. 5 item. ACDSee.Pro.v6.2.212.x64.Incl.Keymaker-CORE Serial Key Keygen High Quality. 5 item. ACDSee.Pro.v6.2.212.x64.Incl.Keymaker-CORE Serial Key Keygen High Quality. 5 item. ACDSee.Pro.v6.2.212.x64.Incl.Keymaker-CORE Serial Key Keygen High Quality. 5 item. ACDSee.Pro.v6. 4fefd39f24
-
-
-

diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/BaibolySyFihiranapdf !!HOT!!.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/BaibolySyFihiranapdf !!HOT!!.md deleted file mode 100644 index 1eab3478bd4628299ab2747d79037c46a7f3f775..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/BaibolySyFihiranapdf !!HOT!!.md +++ /dev/null @@ -1,10 +0,0 @@ - -

https://www.slants.com/profiles/baiboly-sy-fihiranapdf https://www.livecoda.com/profile/baibolysyfihiranapdf/ https://slickg.com/user/baibolysyfihiranapdf/ https://indian-landscapes.com/profile/https-www-eelgrea-com.eelgrea-com.arctic.first-monday-5c.html https://www.accounting-planet.com/profile/BaibolySyFihiranapdf/profile.

-

https://www.virtualgoat.com/profile/BaibolySyFihiranapdf. https://www.cy7.com/cyber-cartel/messages/BibolySyFihiranapdf. html https://redstem.com/profile/BaibolySyFihiranapdf https://redstem.com/profile/BaibolySyFihiranapdf. https://www.personalsolutions.com/user/BaibolySyFihiranapdf/profile

-

BaibolySyFihiranapdf


Download Filehttps://urlgoal.com/2uCL2B



-

https://appjabob.com/profile/BaibolySyFihiranapdf/profile/ https://profiles.webhostingtalk.com/kdabodee.en.ftre/profile/http://w3layouts.com/profile/BaibolySyFihiranapdf/profile/ https://epackers.com/w3layouts/profile/BaibolySyFihiranapdf/profile/ https://www.youngmajorette.com/profile/BaibolySyFihiranapdf/profile/ https://faig.com/profile/https-faig-com.blogspot.in.html.fort-fifty-six.html

-

https://cdms.awfc.org/MemberProfile.asp?UserId=bbaibally https://dickbuttler.com/user/profile/BaibolySyFihiranapdf/ https://www.virtualgoat.com/profile/BaibolySyFihiranapdf/profile/ https://ftre.com/profile/BaibolySyFihiranapdf/profile/

-

https://www.bigrockresort.net/profile/Mission-To-Mars-Hindi-Dubbed-Movie-44/profile https://www.bigrockresort.net/profile/Mission-To-Mars-Hindi-Dubbed-Movie-44/profile. https://es.bsrschool.org/profile/BaibolySyFihiranapdf/profile

-

https://es.bsrschool.org/profile/BaibolySyFihiranapdf/profile https://es.bsrschool.org/profile/BaibolySyFihiranapdf/profile https://www.bigrockresort.net/profile/Mission-To-Mars-Hindi-Dubbed-Movie-44/profile

899543212b
-
-
\ No newline at end of file diff --git a/spaces/reha/Stick_Tech/terms.md b/spaces/reha/Stick_Tech/terms.md deleted file mode 100644 index b3f1803a34d81e98bbdda473086848a84a0ce4b1..0000000000000000000000000000000000000000 --- a/spaces/reha/Stick_Tech/terms.md +++ /dev/null @@ -1,57 +0,0 @@ -在使用此模型前请阅读以下协议,本协议修改自MasterSatori - -AI粘连科技模型使用协议 粘连科技Official https://space.bilibili.com/248582596 - -【前言】AI粘连科技模型所有者及训练者海龙王kokopelli@bilibili(以下也称“我”)希望通过《AI粘连科技模模型使用协议》(以下简称“本协议”)向您说明您在使用AI粘连科技模模型时应当履行的责任及使用范围。 - -【特别提示】在使用AI粘连科技模模型前,请您务必仔细阅读并透彻理解本协议,在确认充分理解并同意后再开始使用。 - -​ 本协议将帮助您了解以下内容: - -​ 一、免责声明 - -​ 二、您在非个人使用场合时使用AI粘连科技模型应当做的事 - -​ 三、AI粘连科技的使用范围 - -​ 四、如何联系我 - -​ (一) 免责声明: - -​ 您因使用AI粘连科技对其它任何实体(个人/企业)所造成的任何损失由您自身承担,您因使用AI粘连科技模型所产生的一切法律风险及法律纠纷由您自身承担。 - -​ (二) 您在非个人使用场合时使用AI粘连科技模型应当做的事: - -​ 1、注明soVITS项目作者:Rcell - -​ 2、注明我(可选):海龙王kokopelli@bilibili - -​ (三) AI粘连科技模型的使用范围: - -​ 1、您可以使用的范围: - -​ (1) 个人使用 - -​ (2) 将产生的音频用于投稿(投稿内容不得包含“您不可使用的范围”中的内容) - -​ (3) 符合投稿平台和当地法律的二创内容 - -​ (4) 使用本软件必须注明作品使用了AI - -​ 2、您不可使用的范围: - -​ (1) 商业使用 - -​ (2) 假冒本人 - -​ (3) 当作变声器等使用 - -​ (4) 将AI粘连科技模型再次上传 - -​ (5) 低创内容(合成的音频中有过多的爆音或电音属于“低创内容”) - -​ (6) 敏感内容(包括但不限于:政治、低俗、色情、暴力等) - -​ 3、补充内容: - -​ 在其他未被提及的场合使用AI粘连科技模型及其所产生的数据时您应当征求我的意见.海龙王kokopelli@bilibili。 diff --git a/spaces/riccorl/relik-entity-linking/relik/inference/data/tokenizers/__init__.py b/spaces/riccorl/relik-entity-linking/relik/inference/data/tokenizers/__init__.py deleted file mode 100644 index ad70314e8e0ccc18b946ff1317f6415c1892747a..0000000000000000000000000000000000000000 --- a/spaces/riccorl/relik-entity-linking/relik/inference/data/tokenizers/__init__.py +++ /dev/null @@ -1,89 +0,0 @@ -SPACY_LANGUAGE_MAPPER = { - "ca": "ca_core_news_sm", - "da": "da_core_news_sm", - "de": "de_core_news_sm", - "el": "el_core_news_sm", - "en": "en_core_web_sm", - "es": "es_core_news_sm", - "fr": "fr_core_news_sm", - "it": "it_core_news_sm", - "ja": "ja_core_news_sm", - "lt": "lt_core_news_sm", - "mk": "mk_core_news_sm", - "nb": "nb_core_news_sm", - "nl": "nl_core_news_sm", - "pl": "pl_core_news_sm", - "pt": "pt_core_news_sm", - "ro": "ro_core_news_sm", - "ru": "ru_core_news_sm", - "xx": "xx_sent_ud_sm", - "zh": "zh_core_web_sm", - "ca_core_news_sm": "ca_core_news_sm", - "ca_core_news_md": "ca_core_news_md", - "ca_core_news_lg": "ca_core_news_lg", - "ca_core_news_trf": "ca_core_news_trf", - "da_core_news_sm": "da_core_news_sm", - "da_core_news_md": "da_core_news_md", - "da_core_news_lg": "da_core_news_lg", - "da_core_news_trf": "da_core_news_trf", - "de_core_news_sm": "de_core_news_sm", - "de_core_news_md": "de_core_news_md", - "de_core_news_lg": "de_core_news_lg", - "de_dep_news_trf": "de_dep_news_trf", - "el_core_news_sm": "el_core_news_sm", - "el_core_news_md": "el_core_news_md", - "el_core_news_lg": "el_core_news_lg", - "en_core_web_sm": "en_core_web_sm", - "en_core_web_md": "en_core_web_md", - "en_core_web_lg": "en_core_web_lg", - "en_core_web_trf": "en_core_web_trf", - "es_core_news_sm": "es_core_news_sm", - "es_core_news_md": "es_core_news_md", - "es_core_news_lg": "es_core_news_lg", - "es_dep_news_trf": "es_dep_news_trf", - "fr_core_news_sm": "fr_core_news_sm", - "fr_core_news_md": "fr_core_news_md", - "fr_core_news_lg": "fr_core_news_lg", - "fr_dep_news_trf": "fr_dep_news_trf", - "it_core_news_sm": "it_core_news_sm", - "it_core_news_md": "it_core_news_md", - "it_core_news_lg": "it_core_news_lg", - "ja_core_news_sm": "ja_core_news_sm", - "ja_core_news_md": "ja_core_news_md", - "ja_core_news_lg": "ja_core_news_lg", - "ja_dep_news_trf": "ja_dep_news_trf", - "lt_core_news_sm": "lt_core_news_sm", - "lt_core_news_md": "lt_core_news_md", - "lt_core_news_lg": "lt_core_news_lg", - "mk_core_news_sm": "mk_core_news_sm", - "mk_core_news_md": "mk_core_news_md", - "mk_core_news_lg": "mk_core_news_lg", - "nb_core_news_sm": "nb_core_news_sm", - "nb_core_news_md": "nb_core_news_md", - "nb_core_news_lg": "nb_core_news_lg", - "nl_core_news_sm": "nl_core_news_sm", - "nl_core_news_md": "nl_core_news_md", - "nl_core_news_lg": "nl_core_news_lg", - "pl_core_news_sm": "pl_core_news_sm", - "pl_core_news_md": "pl_core_news_md", - "pl_core_news_lg": "pl_core_news_lg", - "pt_core_news_sm": "pt_core_news_sm", - "pt_core_news_md": "pt_core_news_md", - "pt_core_news_lg": "pt_core_news_lg", - "ro_core_news_sm": "ro_core_news_sm", - "ro_core_news_md": "ro_core_news_md", - "ro_core_news_lg": "ro_core_news_lg", - "ru_core_news_sm": "ru_core_news_sm", - "ru_core_news_md": "ru_core_news_md", - "ru_core_news_lg": "ru_core_news_lg", - "xx_ent_wiki_sm": "xx_ent_wiki_sm", - "xx_sent_ud_sm": "xx_sent_ud_sm", - "zh_core_web_sm": "zh_core_web_sm", - "zh_core_web_md": "zh_core_web_md", - "zh_core_web_lg": "zh_core_web_lg", - "zh_core_web_trf": "zh_core_web_trf", -} - -from relik.inference.data.tokenizers.regex_tokenizer import RegexTokenizer -from relik.inference.data.tokenizers.spacy_tokenizer import SpacyTokenizer -from relik.inference.data.tokenizers.whitespace_tokenizer import WhitespaceTokenizer diff --git a/spaces/rizam/literature-research-tool/scripts/train/train.py b/spaces/rizam/literature-research-tool/scripts/train/train.py deleted file mode 100644 index 9f0651f473fd4e5f72bab4c17152c2c0bdcbdbe6..0000000000000000000000000000000000000000 --- a/spaces/rizam/literature-research-tool/scripts/train/train.py +++ /dev/null @@ -1,171 +0,0 @@ -def train( - push_to_hub:bool, - num_epoch: int, - train_batch_size: int, - eval_batch_size: int, -): - import torch - import numpy as np - - # 1. Dataset - from datasets import load_dataset - dataset = load_dataset("Adapting/abstract-keyphrases") - - # 2. Model - from transformers import AutoTokenizer, AutoModelForSeq2SeqLM - from lrt.clustering.models import KeyBartAdapter - tokenizer = AutoTokenizer.from_pretrained("Adapting/KeyBartAdapter") - - ''' - Or you can just use the initial model weights from Huggingface: - model = AutoModelForSeq2SeqLM.from_pretrained("Adapting/KeyBartAdapter", - revision='9c3ed39c6ed5c7e141363e892d77cf8f589d5999') - ''' - - model = KeyBartAdapter(256) - - # 3. preprocess dataset - dataset = dataset.shuffle() - - def preprocess_function(examples): - inputs = examples['Abstract'] - targets = examples['Keywords'] - model_inputs = tokenizer(inputs, truncation=True) - - # Set up the tokenizer for targets - with tokenizer.as_target_tokenizer(): - labels = tokenizer(targets, truncation=True) - - model_inputs["labels"] = labels["input_ids"] - return model_inputs - - tokenized_dataset = dataset.map( - preprocess_function, - batched=True, - remove_columns=dataset["train"].column_names, - ) - - # 4. evaluation metrics - def compute_metrics(eval_preds): - preds = eval_preds.predictions - labels = eval_preds.label_ids - if isinstance(preds, tuple): - preds = preds[0] - print(preds.shape) - if len(preds.shape) == 3: - preds = preds.argmax(axis=-1) - - decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) - # Replace -100 in the labels as we can't decode them. - labels = np.where(labels != -100, labels, tokenizer.pad_token_id) - decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) - - # Some simple post-processing - decoded_preds = [a.strip().split(';') for a in decoded_preds] - decoded_labels = [a.strip().split(';') for a in decoded_labels] - - precs, recalls, f_scores = [], [], [] - num_match, num_pred, num_gold = [], [], [] - for pred, label in zip(decoded_preds, decoded_labels): - pred_set = set(pred) - label_set = set(label) - match_set = label_set.intersection(pred_set) - p = float(len(match_set)) / float(len(pred_set)) if len(pred_set) > 0 else 0.0 - r = float(len(match_set)) / float(len(label_set)) if len(label_set) > 0 else 0.0 - f1 = float(2 * (p * r)) / (p + r) if (p + r) > 0 else 0.0 - precs.append(p) - recalls.append(r) - f_scores.append(f1) - num_match.append(len(match_set)) - num_pred.append(len(pred_set)) - num_gold.append(len(label_set)) - - # print(f'raw_PRED: {raw_pred}') - print(f'PRED: num={len(pred_set)} - {pred_set}') - print(f'GT: num={len(label_set)} - {label_set}') - print(f'p={p}, r={r}, f1={f1}') - print('-' * 20) - - result = { - 'precision@M': np.mean(precs) * 100.0, - 'recall@M': np.mean(recalls) * 100.0, - 'fscore@M': np.mean(f_scores) * 100.0, - 'num_match': np.mean(num_match), - 'num_pred': np.mean(num_pred), - 'num_gold': np.mean(num_gold), - } - - result = {k: round(v, 2) for k, v in result.items()} - return result - - # 5. train - from transformers import DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer - - data_collator = DataCollatorForSeq2Seq(tokenizer, model=model) - - model_name = 'KeyBartAdapter' - - args = Seq2SeqTrainingArguments( - model_name, - evaluation_strategy="epoch", - save_strategy="epoch", - learning_rate=2e-5, - per_device_train_batch_size=train_batch_size, - per_device_eval_batch_size=eval_batch_size, - weight_decay=0.01, - save_total_limit=3, - num_train_epochs=num_epoch, - logging_steps=4, - load_best_model_at_end=True, - metric_for_best_model='fscore@M', - predict_with_generate=True, - fp16=torch.cuda.is_available(), # speeds up training on modern GPUs. - # eval_accumulation_steps=10, - ) - - trainer = Seq2SeqTrainer( - model, - args, - train_dataset=tokenized_dataset["train"], - eval_dataset=tokenized_dataset["train"], - data_collator=data_collator, - tokenizer=tokenizer, - compute_metrics=compute_metrics - ) - - trainer.train() - - # 6. push - if push_to_hub: - commit_msg = f'{model_name}_{num_epoch}' - tokenizer.push_to_hub(commit_message=commit_msg, repo_id=model_name) - model.push_to_hub(commit_message=commit_msg, repo_id=model_name) - - return model, tokenizer - -if __name__ == '__main__': - import sys - from pathlib import Path - project_root = Path(__file__).parent.parent.parent.absolute() - sys.path.append(project_root.__str__()) - - - # code - import argparse - parser = argparse.ArgumentParser() - - parser.add_argument("--epoch", help="number of epochs", default=30) - parser.add_argument("--train_batch_size", help="training batch size", default=16) - parser.add_argument("--eval_batch_size", help="evaluation batch size", default=16) - parser.add_argument("--push", help="whether push the model to hub", action='store_true') - - args = parser.parse_args() - print(args) - - model, tokenizer = train( - push_to_hub= bool(args.push), - num_epoch= int(args.epoch), - train_batch_size= int(args.train_batch_size), - eval_batch_size= int(args.eval_batch_size) - ) - diff --git a/spaces/rorallitri/biomedical-language-models/logs/Barbie Beauty Boutique Pc Game Iso Free Download.r gottingen gaysex mol Play with Barbie and Friends.md b/spaces/rorallitri/biomedical-language-models/logs/Barbie Beauty Boutique Pc Game Iso Free Download.r gottingen gaysex mol Play with Barbie and Friends.md deleted file mode 100644 index f335d19e620735fc76d85bdbc067c6da8dde2264..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Barbie Beauty Boutique Pc Game Iso Free Download.r gottingen gaysex mol Play with Barbie and Friends.md +++ /dev/null @@ -1,6 +0,0 @@ -

Barbie Beauty Boutique Pc Game Iso Free Download.r gottingen gaysex mol


Download Zip ☆☆☆ https://tinurll.com/2uzlzv



- - aaccfb2cb3
-
-
-

diff --git a/spaces/rorallitri/biomedical-language-models/logs/Download Kitab Durratun Nasihin Pdf 24 Kumpulan Nasehat Peringatan dan Hikayat.md b/spaces/rorallitri/biomedical-language-models/logs/Download Kitab Durratun Nasihin Pdf 24 Kumpulan Nasehat Peringatan dan Hikayat.md deleted file mode 100644 index d5012af40d4f918cb02535878b179edf8e239ec2..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Download Kitab Durratun Nasihin Pdf 24 Kumpulan Nasehat Peringatan dan Hikayat.md +++ /dev/null @@ -1,5 +0,0 @@ -
-

yarrai 19191a764c
-kitab-durratun-nasihin-pdf-free
[ -kitab-durratun-nasihin-pdf-free ]
[ -kitab-durratun-nasihin-pdf-free ]
[ -kitab-durratun-nasihin-pdf-free ]
link= -kitab-durratun-nasihin-pdf-free
link= -kitab-durratun-nasihin-pdf-free
link= -kitab-durratun-nasihin-pdf-free

-

Download Kitab Durratun Nasihin Pdf 24


Download Filehttps://tinurll.com/2uznJP



aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/HACK Adobe Photoshop CC 2014 [32 64 Bit] Activation Multilanguage.md b/spaces/rorallitri/biomedical-language-models/logs/HACK Adobe Photoshop CC 2014 [32 64 Bit] Activation Multilanguage.md deleted file mode 100644 index 6cbf24679ea86c05d76de33cefd356fcfe4cfa00..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/HACK Adobe Photoshop CC 2014 [32 64 Bit] Activation Multilanguage.md +++ /dev/null @@ -1,12 +0,0 @@ -

HACK Adobe Photoshop CC 2014 [32 64 Bit] Activation Multilanguage


Download Zip ►►►►► https://tinurll.com/2uznLW



-
-HACK Adobe Photoshop CC 2014 [32 64 Bit] Activation Multilanguage Crack. -Adobe Photoshop CC 2014 Activation Tool. -Adobe Photoshop CC 2014 Activation tool for 32-bit and 64-bit. -Activating Photoshop CC 2014 is the process of sending an email notification of successful activation. -Adobe Photoshop CC 2014 32-bit Download Adobe Photoshop CC 2014 Free 32 bit 64 bit Download. -Adobe Photoshop CC 2014 (32 Bit) Download Adobe Photoshop CC 2014 (32 Bit) Download. -Activating Photoshop CC 2014 is the process of sending an email message about successful activation. 8a78ff9644
-
-
-

diff --git a/spaces/rorallitri/biomedical-language-models/logs/HTML Compiler 2016.18 Serial Key (100 Full Version) !!BETTER!!.md b/spaces/rorallitri/biomedical-language-models/logs/HTML Compiler 2016.18 Serial Key (100 Full Version) !!BETTER!!.md deleted file mode 100644 index 196e17d89f1536d1e0823728fff7d9059e725085..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/HTML Compiler 2016.18 Serial Key (100 Full Version) !!BETTER!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

HTML Compiler 2016.18 serial key (100% Full Version)


Download Zip →→→ https://tinurll.com/2uzlsx



- -by R Gouicem · 2020 — Since then, a large number of scheduling algorithms have been developed. They are designed for different workloads and performance goals. Grants.gov/web/grants/applicants/organization-registration.html. First you need to check if your parameters meet the criteria set for your project. If you are not sure which eligibility criteria you should meet, please contact your organization's grants department or any other grant center. If you have applied for a grant with R Gouicem, you can contact someone in the Grants Office for more information: info@rfgouicem.org or rgouicem.org. 8a78ff9644
-
-
-

diff --git a/spaces/rorallitri/biomedical-language-models/logs/J?????w Pson????? ?? REPACK.md b/spaces/rorallitri/biomedical-language-models/logs/J?????w Pson????? ?? REPACK.md deleted file mode 100644 index 65e97104f62e8179d3529f08a71904f473e98651..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/J?????w Pson????? ?? REPACK.md +++ /dev/null @@ -1,6 +0,0 @@ -

j?????w pϠson????? ??


DOWNLOAD ……… https://tinurll.com/2uzm1K



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/rossellison/kpop-face-generator/stylegan3-fun/metrics/__init__.py b/spaces/rossellison/kpop-face-generator/stylegan3-fun/metrics/__init__.py deleted file mode 100644 index 939e7c6c8f94c4ea1141885c3c3295fe083b06aa..0000000000000000000000000000000000000000 --- a/spaces/rossellison/kpop-face-generator/stylegan3-fun/metrics/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -# empty diff --git a/spaces/rushic24/Priyanka-Chopra-TTS/training/clean_text.py b/spaces/rushic24/Priyanka-Chopra-TTS/training/clean_text.py deleted file mode 100644 index e6c3aab1cfe0e0f0b0716fec854fdd6b15427fb5..0000000000000000000000000000000000000000 --- a/spaces/rushic24/Priyanka-Chopra-TTS/training/clean_text.py +++ /dev/null @@ -1,113 +0,0 @@ -import argparse -import re - -import inflect -from training import DEFAULT_ALPHABET - -INFLECT_ENGINE = inflect.engine() -COMMA_NUMBER_RE = re.compile(r"([0-9][0-9\,]+[0-9])") -DECIMAL_NUMBER_RE = re.compile(r"([0-9]+\.[0-9]+)") -NUMBER_RE = re.compile(r"[0-9]+") -ORDINALS = re.compile(r"([0-9]+[st|nd|rd|th]+)") -CURRENCY = re.compile(r"([£|$|€]+[0-9]+)") -WHITESPACE_RE = re.compile(r"\s+") -ALLOWED_CHARACTERS_RE = re.compile("[^a-z ,.!?'-]+") -MONETARY_REPLACEMENT = {"$": " dollars", "£": " pounds", "€": " euros"} -ABBREVIATION_REPLACEMENT = { - "mr.": "mister", - "mrs.": "misess", - "dr.": "doctor", - "no.": "number", - "st.": "saint", - "co.": "company", - "jr.": "junior", - "maj.": "major", - "gen.": "general", - "drs.": "doctors", - "rev.": "reverend", - "lt.": "lieutenant", - "hon.": "honorable", - "sgt.": "sergeant", - "capt.": "captain", - "esq.": "esquire", - "ltd.": "limited", - "col.": "colonel", - "ft.": "fort", -} - - -def clean_text(text, symbols=DEFAULT_ALPHABET, remove_invalid_characters=True): - """ - Cleans text. This includes: - - Replacing monetary terms (i.e. $ -> dollars) - - Converting ordinals to full words (i.e. 1st -> first) - - Converting numbers to their full word format (i.e. 100 -> one hundred) - - Replacing abbreviations (i.e. dr. -> doctor) - - Removing invalid characters (non utf-8 or invalid punctuation) - - Parameters - ---------- - text : str - Text to clean - symbols : list (optional) - List of valid symbols in text (default is English alphabet & punctuation) - remove_invalid_characters : bool (optional) - Whether to remove characters not in symbols list (default is True) - - Returns - ------- - str - Cleaned text - """ - text = text.strip() - text = text.lower() - # Convert currency to words - money = re.findall(CURRENCY, text) - for amount in money: - for key, value in MONETARY_REPLACEMENT.items(): - if key in amount: - text = text.replace(amount, amount[1:] + value) - # Convert ordinals to words - ordinals = re.findall(ORDINALS, text) - for ordinal in ordinals: - text = text.replace(ordinal, INFLECT_ENGINE.number_to_words(ordinal)) - # Convert comma & decimal numbers to words - numbers = re.findall(COMMA_NUMBER_RE, text) + re.findall(DECIMAL_NUMBER_RE, text) - for number in numbers: - text = text.replace(number, INFLECT_ENGINE.number_to_words(number)) - # Convert standard numbers to words - numbers = re.findall(NUMBER_RE, text) - for number in numbers: - text = text.replace(number, INFLECT_ENGINE.number_to_words(number)) - # Replace abbreviations - for key, value in ABBREVIATION_REPLACEMENT.items(): - text = text.replace(" " + key + " ", " " + value + " ") - # Collapse whitespace - text = re.sub(WHITESPACE_RE, " ", text) - # Remove banned characters - if remove_invalid_characters: - text = "".join([c for c in text if c in symbols]) - return text - - -if __name__ == "__main__": - """Script to clean text for training""" - parser = argparse.ArgumentParser(description="Clean & improve text for training") - parser.add_argument("-f", "--file", help="Text file path", type=str, required=True) - parser.add_argument("-o", "--output", help="Output text file path", type=str, required=True) - args = parser.parse_args() - - with open(args.file) as f: - rows = f.readlines() - - cleaned_text = [] - - for row in rows: - filename, text = row.split("|") - text = clean_text(text) - cleaned_text.append(f"{filename}|{text}") - - with open(args.output, "w") as f: - for line in cleaned_text: - f.write(line) - f.write("\n") diff --git a/spaces/rushic24/Priyanka-Chopra-TTS/training/tacotron2_model/loss.py b/spaces/rushic24/Priyanka-Chopra-TTS/training/tacotron2_model/loss.py deleted file mode 100644 index 0ed6809dca1807e169d8cb1d84ffdf3c4f58f1b1..0000000000000000000000000000000000000000 --- a/spaces/rushic24/Priyanka-Chopra-TTS/training/tacotron2_model/loss.py +++ /dev/null @@ -1,49 +0,0 @@ -""" -BSD 3-Clause License - -Copyright (c) 2018, NVIDIA Corporation -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -""" -from torch import nn - - -class Tacotron2Loss(nn.Module): - def __init__(self): - super(Tacotron2Loss, self).__init__() - - def forward(self, model_output, targets): - mel_target, gate_target = targets[0], targets[1] - mel_target.requires_grad = False - gate_target.requires_grad = False - gate_target = gate_target.view(-1, 1) - - mel_out, mel_out_postnet, gate_out, _ = model_output - gate_out = gate_out.view(-1, 1) - mel_loss = nn.MSELoss()(mel_out, mel_target) + nn.MSELoss()(mel_out_postnet, mel_target) - gate_loss = nn.BCEWithLogitsLoss()(gate_out, gate_target) - return mel_loss + gate_loss diff --git a/spaces/sahshd/ChuanhuChatGPT/modules/overwrites.py b/spaces/sahshd/ChuanhuChatGPT/modules/overwrites.py deleted file mode 100644 index 035a4a52722d66ee28af1c05231ad1cea3339ef5..0000000000000000000000000000000000000000 --- a/spaces/sahshd/ChuanhuChatGPT/modules/overwrites.py +++ /dev/null @@ -1,94 +0,0 @@ -from __future__ import annotations -import logging - -from llama_index import Prompt -from typing import List, Tuple -import mdtex2html -from gradio_client import utils as client_utils - -from modules.presets import * -from modules.llama_func import * - - -def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]: - logging.debug("Compacting text chunks...🚀🚀🚀") - combined_str = [c.strip() for c in text_chunks if c.strip()] - combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)] - combined_str = "\n\n".join(combined_str) - # resplit based on self.max_chunk_overlap - text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1) - return text_splitter.split_text(combined_str) - - -def postprocess( - self, - y: List[List[str | Tuple[str] | Tuple[str, str] | None] | Tuple], - ) -> List[List[str | Dict | None]]: - """ - Parameters: - y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed. - Returns: - List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed. - """ - if y is None: - return [] - processed_messages = [] - for message_pair in y: - assert isinstance( - message_pair, (tuple, list) - ), f"Expected a list of lists or list of tuples. Received: {message_pair}" - assert ( - len(message_pair) == 2 - ), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}" - - processed_messages.append( - [ - self._postprocess_chat_messages(message_pair[0], "user"), - self._postprocess_chat_messages(message_pair[1], "bot"), - ] - ) - return processed_messages - -def postprocess_chat_messages( - self, chat_message: str | Tuple | List | None, message_type: str - ) -> str | Dict | None: - if chat_message is None: - return None - elif isinstance(chat_message, (tuple, list)): - filepath = chat_message[0] - mime_type = client_utils.get_mimetype(filepath) - filepath = self.make_temp_copy_if_needed(filepath) - return { - "name": filepath, - "mime_type": mime_type, - "alt_text": chat_message[1] if len(chat_message) > 1 else None, - "data": None, # These last two fields are filled in by the frontend - "is_file": True, - } - elif isinstance(chat_message, str): - if message_type == "bot": - if not detect_converted_mark(chat_message): - chat_message = convert_mdtext(chat_message) - elif message_type == "user": - if not detect_converted_mark(chat_message): - chat_message = convert_asis(chat_message) - return chat_message - else: - raise ValueError(f"Invalid message for Chatbot component: {chat_message}") - -with open("./assets/custom.js", "r", encoding="utf-8") as f, open("./assets/Kelpy-Codos.js", "r", encoding="utf-8") as f2: - customJS = f.read() - kelpyCodos = f2.read() - -def reload_javascript(): - print("Reloading javascript...") - js = f'' - def template_response(*args, **kwargs): - res = GradioTemplateResponseOriginal(*args, **kwargs) - res.body = res.body.replace(b'', f'{js}'.encode("utf8")) - res.init_headers() - return res - - gr.routes.templates.TemplateResponse = template_response - -GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse \ No newline at end of file diff --git a/spaces/sarinam/speaker-anonymization/IMSToucan/Layers/PositionwiseFeedForward.py b/spaces/sarinam/speaker-anonymization/IMSToucan/Layers/PositionwiseFeedForward.py deleted file mode 100644 index 1938b392e631c8c9d4179f2b34557a6b531a0174..0000000000000000000000000000000000000000 --- a/spaces/sarinam/speaker-anonymization/IMSToucan/Layers/PositionwiseFeedForward.py +++ /dev/null @@ -1,26 +0,0 @@ -# Written by Shigeki Karita, 2019 -# Published under Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) -# Adapted by Florian Lux, 2021 - - -import torch - - -class PositionwiseFeedForward(torch.nn.Module): - """ - Args: - idim (int): Input dimenstion. - hidden_units (int): The number of hidden units. - dropout_rate (float): Dropout rate. - - """ - - def __init__(self, idim, hidden_units, dropout_rate, activation=torch.nn.ReLU()): - super(PositionwiseFeedForward, self).__init__() - self.w_1 = torch.nn.Linear(idim, hidden_units) - self.w_2 = torch.nn.Linear(hidden_units, idim) - self.dropout = torch.nn.Dropout(dropout_rate) - self.activation = activation - - def forward(self, x): - return self.w_2(self.dropout(self.activation(self.w_1(x)))) diff --git a/spaces/scedlatioru/img-to-music/example/Akkor Szakitsunk Leiner Laura Pdf Download [BEST].md b/spaces/scedlatioru/img-to-music/example/Akkor Szakitsunk Leiner Laura Pdf Download [BEST].md deleted file mode 100644 index 4c9e7b30ac88d8188ffe6958999fd3640417423c..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Akkor Szakitsunk Leiner Laura Pdf Download [BEST].md +++ /dev/null @@ -1,6 +0,0 @@ -

akkor szakitsunk leiner laura pdf download


Download Ziphttps://gohhs.com/2uEySh



- -apexvs mathematics answers. DOWNLOAD APEXVS MATHEMATICS ANSWERS epoint edu vn ... Leiner Laura Akkor Szakitsunk · Malayalam Kochupusthakam ... 1fdad05405
-
-
-

diff --git a/spaces/scedlatioru/img-to-music/example/Naruto Shinobi Breakdown Pc Full Version Fixed.md b/spaces/scedlatioru/img-to-music/example/Naruto Shinobi Breakdown Pc Full Version Fixed.md deleted file mode 100644 index 164c65fbd62a846572414ca8a976ef5c0136cd0a..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Naruto Shinobi Breakdown Pc Full Version Fixed.md +++ /dev/null @@ -1,6 +0,0 @@ -

Naruto Shinobi Breakdown Pc Full Version


DOWNLOAD ✓✓✓ https://gohhs.com/2uEySe



-
- 4d29de3e1b
-
-
-

diff --git a/spaces/sdhsdhk/bingosjj/src/components/chat-scroll-anchor.tsx b/spaces/sdhsdhk/bingosjj/src/components/chat-scroll-anchor.tsx deleted file mode 100644 index ac809f4486a48e134cb69314c3d0dae5e68d614e..0000000000000000000000000000000000000000 --- a/spaces/sdhsdhk/bingosjj/src/components/chat-scroll-anchor.tsx +++ /dev/null @@ -1,29 +0,0 @@ -'use client' - -import * as React from 'react' -import { useInView } from 'react-intersection-observer' - -import { useAtBottom } from '@/lib/hooks/use-at-bottom' - -interface ChatScrollAnchorProps { - trackVisibility?: boolean -} - -export function ChatScrollAnchor({ trackVisibility }: ChatScrollAnchorProps) { - const isAtBottom = useAtBottom() - const { ref, entry, inView } = useInView({ - trackVisibility, - delay: 100, - rootMargin: '0px 0px -150px 0px' - }) - - React.useEffect(() => { - if (isAtBottom && trackVisibility && !inView) { - entry?.target.scrollIntoView({ - block: 'start' - }) - } - }, [inView, entry, isAtBottom, trackVisibility]) - - return
-} diff --git a/spaces/seanghay/KLEA/mel_processing.py b/spaces/seanghay/KLEA/mel_processing.py deleted file mode 100644 index fddf3ca9811448a1c31e0653f3bf38f6104e21e5..0000000000000000000000000000000000000000 --- a/spaces/seanghay/KLEA/mel_processing.py +++ /dev/null @@ -1,100 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/segments-tobias/conex/espnet2/enh/layers/mask_estimator.py b/spaces/segments-tobias/conex/espnet2/enh/layers/mask_estimator.py deleted file mode 100644 index 9e309b4d9a8af76ca18ed1b094c74eb2862a4496..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet2/enh/layers/mask_estimator.py +++ /dev/null @@ -1,91 +0,0 @@ -from typing import Tuple - -import numpy as np -import torch -from torch.nn import functional as F -from torch_complex.tensor import ComplexTensor - -from espnet.nets.pytorch_backend.nets_utils import make_pad_mask -from espnet.nets.pytorch_backend.rnn.encoders import RNN -from espnet.nets.pytorch_backend.rnn.encoders import RNNP - - -class MaskEstimator(torch.nn.Module): - def __init__( - self, type, idim, layers, units, projs, dropout, nmask=1, nonlinear="sigmoid" - ): - super().__init__() - subsample = np.ones(layers + 1, dtype=np.int) - - typ = type.lstrip("vgg").rstrip("p") - if type[-1] == "p": - self.brnn = RNNP(idim, layers, units, projs, subsample, dropout, typ=typ) - else: - self.brnn = RNN(idim, layers, units, projs, dropout, typ=typ) - - self.type = type - self.nmask = nmask - self.linears = torch.nn.ModuleList( - [torch.nn.Linear(projs, idim) for _ in range(nmask)] - ) - - if nonlinear not in ("sigmoid", "relu", "tanh", "crelu"): - raise ValueError("Not supporting nonlinear={}".format(nonlinear)) - - self.nonlinear = nonlinear - - def forward( - self, xs: ComplexTensor, ilens: torch.LongTensor - ) -> Tuple[Tuple[torch.Tensor, ...], torch.LongTensor]: - """Mask estimator forward function. - - Args: - xs: (B, F, C, T) - ilens: (B,) - Returns: - hs (torch.Tensor): The hidden vector (B, F, C, T) - masks: A tuple of the masks. (B, F, C, T) - ilens: (B,) - """ - assert xs.size(0) == ilens.size(0), (xs.size(0), ilens.size(0)) - _, _, C, input_length = xs.size() - # (B, F, C, T) -> (B, C, T, F) - xs = xs.permute(0, 2, 3, 1) - - # Calculate amplitude: (B, C, T, F) -> (B, C, T, F) - xs = (xs.real ** 2 + xs.imag ** 2) ** 0.5 - # xs: (B, C, T, F) -> xs: (B * C, T, F) - xs = xs.contiguous().view(-1, xs.size(-2), xs.size(-1)) - # ilens: (B,) -> ilens_: (B * C) - ilens_ = ilens[:, None].expand(-1, C).contiguous().view(-1) - - # xs: (B * C, T, F) -> xs: (B * C, T, D) - xs, _, _ = self.brnn(xs, ilens_) - # xs: (B * C, T, D) -> xs: (B, C, T, D) - xs = xs.view(-1, C, xs.size(-2), xs.size(-1)) - - masks = [] - for linear in self.linears: - # xs: (B, C, T, D) -> mask:(B, C, T, F) - mask = linear(xs) - - if self.nonlinear == "sigmoid": - mask = torch.sigmoid(mask) - elif self.nonlinear == "relu": - mask = torch.relu(mask) - elif self.nonlinear == "tanh": - mask = torch.tanh(mask) - elif self.nonlinear == "crelu": - mask = torch.clamp(mask, min=0, max=1) - # Zero padding - mask.masked_fill(make_pad_mask(ilens, mask, length_dim=2), 0) - - # (B, C, T, F) -> (B, F, C, T) - mask = mask.permute(0, 3, 1, 2) - - # Take cares of multi gpu cases: If input_length > max(ilens) - if mask.size(-1) < input_length: - mask = F.pad(mask, [0, input_length - mask.size(-1)], value=0) - masks.append(mask) - - return tuple(masks), ilens diff --git a/spaces/shi-labs/Versatile-Diffusion/lib/model_zoo/optimus_models/configuration_utils.py b/spaces/shi-labs/Versatile-Diffusion/lib/model_zoo/optimus_models/configuration_utils.py deleted file mode 100644 index 7efc735d4132124cd3d097cc1844f4407551b1db..0000000000000000000000000000000000000000 --- a/spaces/shi-labs/Versatile-Diffusion/lib/model_zoo/optimus_models/configuration_utils.py +++ /dev/null @@ -1,205 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Configuration base class and utilities.""" - -from __future__ import (absolute_import, division, print_function, - unicode_literals) - -import copy -import json -import logging -import os -from io import open - -from .file_utils import cached_path, CONFIG_NAME - -logger = logging.getLogger(__name__) - -class PretrainedConfig(object): - r""" Base class for all configuration classes. - Handles a few parameters common to all models' configurations as well as methods for loading/downloading/saving configurations. - - Note: - A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does **not** load the model weights. - It only affects the model's configuration. - - Class attributes (overridden by derived classes): - - ``pretrained_config_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained model configurations as values. - - Parameters: - ``finetuning_task``: string, default `None`. Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow or PyTorch) checkpoint. - ``num_labels``: integer, default `2`. Number of classes to use when the model is a classification model (sequences/tokens) - ``output_attentions``: boolean, default `False`. Should the model returns attentions weights. - ``output_hidden_states``: string, default `False`. Should the model returns all hidden-states. - ``torchscript``: string, default `False`. Is the model used with Torchscript. - """ - pretrained_config_archive_map = {} - - def __init__(self, **kwargs): - self.finetuning_task = kwargs.pop('finetuning_task', None) - self.num_labels = kwargs.pop('num_labels', 2) - self.output_attentions = kwargs.pop('output_attentions', False) - self.output_hidden_states = kwargs.pop('output_hidden_states', False) - self.torchscript = kwargs.pop('torchscript', False) - self.pruned_heads = kwargs.pop('pruned_heads', {}) - - def save_pretrained(self, save_directory): - """ Save a configuration object to the directory `save_directory`, so that it - can be re-loaded using the :func:`~pytorch_transformers.PretrainedConfig.from_pretrained` class method. - """ - assert os.path.isdir(save_directory), "Saving path should be a directory where the model and configuration can be saved" - - # If we save using the predefined names, we can load using `from_pretrained` - output_config_file = os.path.join(save_directory, CONFIG_NAME) - - self.to_json_file(output_config_file) - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): - r""" Instantiate a :class:`~pytorch_transformers.PretrainedConfig` (or a derived class) from a pre-trained model configuration. - - Parameters: - pretrained_model_name_or_path: either: - - - a string with the `shortcut name` of a pre-trained model configuration to load from cache or download, e.g.: ``bert-base-uncased``. - - a path to a `directory` containing a configuration file saved using the :func:`~pytorch_transformers.PretrainedConfig.save_pretrained` method, e.g.: ``./my_model_directory/``. - - a path or url to a saved configuration JSON `file`, e.g.: ``./my_model_directory/configuration.json``. - - cache_dir: (`optional`) string: - Path to a directory in which a downloaded pre-trained model - configuration should be cached if the standard cache should not be used. - - kwargs: (`optional`) dict: key/value pairs with which to update the configuration object after loading. - - - The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. - - Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled by the `return_unused_kwargs` keyword parameter. - - force_download: (`optional`) boolean, default False: - Force to (re-)download the model weights and configuration files and override the cached versions if they exists. - - proxies: (`optional`) dict, default None: - A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. - The proxies are used on each request. - - return_unused_kwargs: (`optional`) bool: - - - If False, then this function returns just the final configuration object. - - If True, then this functions returns a tuple `(config, unused_kwargs)` where `unused_kwargs` is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: ie the part of kwargs which has not been used to update `config` and is otherwise ignored. - - Examples:: - - # We can't instantiate directly the base class `PretrainedConfig` so let's show the examples on a - # derived class: BertConfig - config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache. - config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')` - config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json') - config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False) - assert config.output_attention == True - config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, - foo=False, return_unused_kwargs=True) - assert config.output_attention == True - assert unused_kwargs == {'foo': False} - - """ - cache_dir = kwargs.pop('cache_dir', None) - force_download = kwargs.pop('force_download', False) - proxies = kwargs.pop('proxies', None) - return_unused_kwargs = kwargs.pop('return_unused_kwargs', False) - - if pretrained_model_name_or_path in cls.pretrained_config_archive_map: - config_file = cls.pretrained_config_archive_map[pretrained_model_name_or_path] - elif os.path.isdir(pretrained_model_name_or_path): - config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME) - else: - config_file = pretrained_model_name_or_path - # redirect to the cache, if necessary - try: - resolved_config_file = cached_path(config_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies) - except EnvironmentError as e: - if pretrained_model_name_or_path in cls.pretrained_config_archive_map: - logger.error( - "Couldn't reach server at '{}' to download pretrained model configuration file.".format( - config_file)) - else: - logger.error( - "Model name '{}' was not found in model name list ({}). " - "We assumed '{}' was a path or url but couldn't find any file " - "associated to this path or url.".format( - pretrained_model_name_or_path, - ', '.join(cls.pretrained_config_archive_map.keys()), - config_file)) - raise e - if resolved_config_file == config_file: - logger.info("loading configuration file {}".format(config_file)) - else: - logger.info("loading configuration file {} from cache at {}".format( - config_file, resolved_config_file)) - - # Load config - config = cls.from_json_file(resolved_config_file) - - if hasattr(config, 'pruned_heads'): - config.pruned_heads = dict((int(key), set(value)) for key, value in config.pruned_heads.items()) - - # Update config with kwargs if needed - to_remove = [] - for key, value in kwargs.items(): - if hasattr(config, key): - setattr(config, key, value) - to_remove.append(key) - for key in to_remove: - kwargs.pop(key, None) - - logger.info("Model config %s", config) - if return_unused_kwargs: - return config, kwargs - else: - return config - - @classmethod - def from_dict(cls, json_object): - """Constructs a `Config` from a Python dictionary of parameters.""" - config = cls(vocab_size_or_config_json_file=-1) - for key, value in json_object.items(): - config.__dict__[key] = value - return config - - @classmethod - def from_json_file(cls, json_file): - """Constructs a `BertConfig` from a json file of parameters.""" - with open(json_file, "r", encoding='utf-8') as reader: - text = reader.read() - return cls.from_dict(json.loads(text)) - - def __eq__(self, other): - return self.__dict__ == other.__dict__ - - def __repr__(self): - return str(self.to_json_string()) - - def to_dict(self): - """Serializes this instance to a Python dictionary.""" - output = copy.deepcopy(self.__dict__) - return output - - def to_json_string(self): - """Serializes this instance to a JSON string.""" - return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" - - def to_json_file(self, json_file_path): - """ Save this instance to a json file.""" - with open(json_file_path, "w", encoding='utf-8') as writer: - writer.write(self.to_json_string()) diff --git a/spaces/sidharthism/fashion-eye/models/stylegan/stylegan_tf/training/networks_stylegan.py b/spaces/sidharthism/fashion-eye/models/stylegan/stylegan_tf/training/networks_stylegan.py deleted file mode 100644 index adc4b260f6f94570c793b0086280f757d2e19ad1..0000000000000000000000000000000000000000 --- a/spaces/sidharthism/fashion-eye/models/stylegan/stylegan_tf/training/networks_stylegan.py +++ /dev/null @@ -1,661 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -"""Network architectures used in the StyleGAN paper.""" - -import numpy as np -import tensorflow as tf -import dnnlib -import dnnlib.tflib as tflib - -# NOTE: Do not import any application-specific modules here! -# Specify all network parameters as kwargs. - -#---------------------------------------------------------------------------- -# Primitive ops for manipulating 4D activation tensors. -# The gradients of these are not necessary efficient or even meaningful. - -def _blur2d(x, f=[1,2,1], normalize=True, flip=False, stride=1): - assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:]) - assert isinstance(stride, int) and stride >= 1 - - # Finalize filter kernel. - f = np.array(f, dtype=np.float32) - if f.ndim == 1: - f = f[:, np.newaxis] * f[np.newaxis, :] - assert f.ndim == 2 - if normalize: - f /= np.sum(f) - if flip: - f = f[::-1, ::-1] - f = f[:, :, np.newaxis, np.newaxis] - f = np.tile(f, [1, 1, int(x.shape[1]), 1]) - - # No-op => early exit. - if f.shape == (1, 1) and f[0,0] == 1: - return x - - # Convolve using depthwise_conv2d. - orig_dtype = x.dtype - x = tf.cast(x, tf.float32) # tf.nn.depthwise_conv2d() doesn't support fp16 - f = tf.constant(f, dtype=x.dtype, name='filter') - strides = [1, 1, stride, stride] - x = tf.nn.depthwise_conv2d(x, f, strides=strides, padding='SAME', data_format='NCHW') - x = tf.cast(x, orig_dtype) - return x - -def _upscale2d(x, factor=2, gain=1): - assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:]) - assert isinstance(factor, int) and factor >= 1 - - # Apply gain. - if gain != 1: - x *= gain - - # No-op => early exit. - if factor == 1: - return x - - # Upscale using tf.tile(). - s = x.shape - x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1]) - x = tf.tile(x, [1, 1, 1, factor, 1, factor]) - x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor]) - return x - -def _downscale2d(x, factor=2, gain=1): - assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:]) - assert isinstance(factor, int) and factor >= 1 - - # 2x2, float32 => downscale using _blur2d(). - if factor == 2 and x.dtype == tf.float32: - f = [np.sqrt(gain) / factor] * factor - return _blur2d(x, f=f, normalize=False, stride=factor) - - # Apply gain. - if gain != 1: - x *= gain - - # No-op => early exit. - if factor == 1: - return x - - # Large factor => downscale using tf.nn.avg_pool(). - # NOTE: Requires tf_config['graph_options.place_pruned_graph']=True to work. - ksize = [1, 1, factor, factor] - return tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') - -#---------------------------------------------------------------------------- -# High-level ops for manipulating 4D activation tensors. -# The gradients of these are meant to be as efficient as possible. - -def blur2d(x, f=[1,2,1], normalize=True): - with tf.variable_scope('Blur2D'): - @tf.custom_gradient - def func(x): - y = _blur2d(x, f, normalize) - @tf.custom_gradient - def grad(dy): - dx = _blur2d(dy, f, normalize, flip=True) - return dx, lambda ddx: _blur2d(ddx, f, normalize) - return y, grad - return func(x) - -def upscale2d(x, factor=2): - with tf.variable_scope('Upscale2D'): - @tf.custom_gradient - def func(x): - y = _upscale2d(x, factor) - @tf.custom_gradient - def grad(dy): - dx = _downscale2d(dy, factor, gain=factor**2) - return dx, lambda ddx: _upscale2d(ddx, factor) - return y, grad - return func(x) - -def downscale2d(x, factor=2): - with tf.variable_scope('Downscale2D'): - @tf.custom_gradient - def func(x): - y = _downscale2d(x, factor) - @tf.custom_gradient - def grad(dy): - dx = _upscale2d(dy, factor, gain=1/factor**2) - return dx, lambda ddx: _downscale2d(ddx, factor) - return y, grad - return func(x) - -#---------------------------------------------------------------------------- -# Get/create weight tensor for a convolutional or fully-connected layer. - -def get_weight(shape, gain=np.sqrt(2), use_wscale=False, lrmul=1): - fan_in = np.prod(shape[:-1]) # [kernel, kernel, fmaps_in, fmaps_out] or [in, out] - he_std = gain / np.sqrt(fan_in) # He init - - # Equalized learning rate and custom learning rate multiplier. - if use_wscale: - init_std = 1.0 / lrmul - runtime_coef = he_std * lrmul - else: - init_std = he_std / lrmul - runtime_coef = lrmul - - # Create variable. - init = tf.initializers.random_normal(0, init_std) - return tf.get_variable('weight', shape=shape, initializer=init) * runtime_coef - -#---------------------------------------------------------------------------- -# Fully-connected layer. - -def dense(x, fmaps, **kwargs): - if len(x.shape) > 2: - x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])]) - w = get_weight([x.shape[1].value, fmaps], **kwargs) - w = tf.cast(w, x.dtype) - return tf.matmul(x, w) - -#---------------------------------------------------------------------------- -# Convolutional layer. - -def conv2d(x, fmaps, kernel, **kwargs): - assert kernel >= 1 and kernel % 2 == 1 - w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs) - w = tf.cast(w, x.dtype) - return tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='SAME', data_format='NCHW') - -#---------------------------------------------------------------------------- -# Fused convolution + scaling. -# Faster and uses less memory than performing the operations separately. - -def upscale2d_conv2d(x, fmaps, kernel, fused_scale='auto', **kwargs): - assert kernel >= 1 and kernel % 2 == 1 - assert fused_scale in [True, False, 'auto'] - if fused_scale == 'auto': - fused_scale = min(x.shape[2:]) * 2 >= 128 - - # Not fused => call the individual ops directly. - if not fused_scale: - return conv2d(upscale2d(x), fmaps, kernel, **kwargs) - - # Fused => perform both ops simultaneously using tf.nn.conv2d_transpose(). - w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs) - w = tf.transpose(w, [0, 1, 3, 2]) # [kernel, kernel, fmaps_out, fmaps_in] - w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT') - w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) - w = tf.cast(w, x.dtype) - os = [tf.shape(x)[0], fmaps, x.shape[2] * 2, x.shape[3] * 2] - return tf.nn.conv2d_transpose(x, w, os, strides=[1,1,2,2], padding='SAME', data_format='NCHW') - -def conv2d_downscale2d(x, fmaps, kernel, fused_scale='auto', **kwargs): - assert kernel >= 1 and kernel % 2 == 1 - assert fused_scale in [True, False, 'auto'] - if fused_scale == 'auto': - fused_scale = min(x.shape[2:]) >= 128 - - # Not fused => call the individual ops directly. - if not fused_scale: - return downscale2d(conv2d(x, fmaps, kernel, **kwargs)) - - # Fused => perform both ops simultaneously using tf.nn.conv2d(). - w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs) - w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT') - w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25 - w = tf.cast(w, x.dtype) - return tf.nn.conv2d(x, w, strides=[1,1,2,2], padding='SAME', data_format='NCHW') - -#---------------------------------------------------------------------------- -# Apply bias to the given activation tensor. - -def apply_bias(x, lrmul=1): - b = tf.get_variable('bias', shape=[x.shape[1]], initializer=tf.initializers.zeros()) * lrmul - b = tf.cast(b, x.dtype) - if len(x.shape) == 2: - return x + b - return x + tf.reshape(b, [1, -1, 1, 1]) - -#---------------------------------------------------------------------------- -# Leaky ReLU activation. More efficient than tf.nn.leaky_relu() and supports FP16. - -def leaky_relu(x, alpha=0.2): - with tf.variable_scope('LeakyReLU'): - alpha = tf.constant(alpha, dtype=x.dtype, name='alpha') - @tf.custom_gradient - def func(x): - y = tf.maximum(x, x * alpha) - @tf.custom_gradient - def grad(dy): - dx = tf.where(y >= 0, dy, dy * alpha) - return dx, lambda ddx: tf.where(y >= 0, ddx, ddx * alpha) - return y, grad - return func(x) - -#---------------------------------------------------------------------------- -# Pixelwise feature vector normalization. - -def pixel_norm(x, epsilon=1e-8): - with tf.variable_scope('PixelNorm'): - epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon') - return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon) - -#---------------------------------------------------------------------------- -# Instance normalization. - -def instance_norm(x, epsilon=1e-8): - assert len(x.shape) == 4 # NCHW - with tf.variable_scope('InstanceNorm'): - orig_dtype = x.dtype - x = tf.cast(x, tf.float32) - x -= tf.reduce_mean(x, axis=[2,3], keepdims=True) - epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon') - x *= tf.rsqrt(tf.reduce_mean(tf.square(x), axis=[2,3], keepdims=True) + epsilon) - x = tf.cast(x, orig_dtype) - return x - -#---------------------------------------------------------------------------- -# Style modulation. - -def style_mod(x, dlatent, **kwargs): - with tf.variable_scope('StyleMod'): - style = apply_bias(dense(dlatent, fmaps=x.shape[1]*2, gain=1, **kwargs)) - style = tf.reshape(style, [-1, 2, x.shape[1]] + [1] * (len(x.shape) - 2)) - return x * (style[:,0] + 1) + style[:,1] - -#---------------------------------------------------------------------------- -# Noise input. - -def apply_noise(x, noise_var=None, randomize_noise=True): - assert len(x.shape) == 4 # NCHW - with tf.variable_scope('Noise'): - if noise_var is None or randomize_noise: - noise = tf.random_normal([tf.shape(x)[0], 1, x.shape[2], x.shape[3]], dtype=x.dtype) - else: - noise = tf.cast(noise_var, x.dtype) - weight = tf.get_variable('weight', shape=[x.shape[1].value], initializer=tf.initializers.zeros()) - return x + noise * tf.reshape(tf.cast(weight, x.dtype), [1, -1, 1, 1]) - -#---------------------------------------------------------------------------- -# Minibatch standard deviation. - -def minibatch_stddev_layer(x, group_size=4, num_new_features=1): - with tf.variable_scope('MinibatchStddev'): - group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size. - s = x.shape # [NCHW] Input shape. - y = tf.reshape(x, [group_size, -1, num_new_features, s[1]//num_new_features, s[2], s[3]]) # [GMncHW] Split minibatch into M groups of size G. Split channels into n channel groups c. - y = tf.cast(y, tf.float32) # [GMncHW] Cast to FP32. - y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMncHW] Subtract mean over group. - y = tf.reduce_mean(tf.square(y), axis=0) # [MncHW] Calc variance over group. - y = tf.sqrt(y + 1e-8) # [MncHW] Calc stddev over group. - y = tf.reduce_mean(y, axis=[2,3,4], keepdims=True) # [Mn111] Take average over fmaps and pixels. - y = tf.reduce_mean(y, axis=[2]) # [Mn11] Split channels into c channel groups - y = tf.cast(y, x.dtype) # [Mn11] Cast back to original data type. - y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [NnHW] Replicate over group and pixels. - return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap. - -#---------------------------------------------------------------------------- -# Style-based generator used in the StyleGAN paper. -# Composed of two sub-networks (G_mapping and G_synthesis) that are defined below. - -def G_style( - latents_in, # First input: Latent vectors (Z) [minibatch, latent_size]. - labels_in, # Second input: Conditioning labels [minibatch, label_size]. - truncation_psi = 0.7, # Style strength multiplier for the truncation trick. None = disable. - truncation_cutoff = 8, # Number of layers for which to apply the truncation trick. None = disable. - truncation_psi_val = None, # Value for truncation_psi to use during validation. - truncation_cutoff_val = None, # Value for truncation_cutoff to use during validation. - dlatent_avg_beta = 0.995, # Decay for tracking the moving average of W during training. None = disable. - style_mixing_prob = 0.9, # Probability of mixing styles during training. None = disable. - is_training = False, # Network is under training? Enables and disables specific features. - is_validation = False, # Network is under validation? Chooses which value to use for truncation_psi. - is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. - components = dnnlib.EasyDict(), # Container for sub-networks. Retained between calls. - **kwargs): # Arguments for sub-networks (G_mapping and G_synthesis). - - # Validate arguments. - assert not is_training or not is_validation - assert isinstance(components, dnnlib.EasyDict) - if is_validation: - truncation_psi = truncation_psi_val - truncation_cutoff = truncation_cutoff_val - if is_training or (truncation_psi is not None and not tflib.is_tf_expression(truncation_psi) and truncation_psi == 1): - truncation_psi = None - if is_training or (truncation_cutoff is not None and not tflib.is_tf_expression(truncation_cutoff) and truncation_cutoff <= 0): - truncation_cutoff = None - if not is_training or (dlatent_avg_beta is not None and not tflib.is_tf_expression(dlatent_avg_beta) and dlatent_avg_beta == 1): - dlatent_avg_beta = None - if not is_training or (style_mixing_prob is not None and not tflib.is_tf_expression(style_mixing_prob) and style_mixing_prob <= 0): - style_mixing_prob = None - - # Setup components. - if 'synthesis' not in components: - components.synthesis = tflib.Network('G_synthesis', func_name=G_synthesis, **kwargs) - num_layers = components.synthesis.input_shape[1] - dlatent_size = components.synthesis.input_shape[2] - if 'mapping' not in components: - components.mapping = tflib.Network('G_mapping', func_name=G_mapping, dlatent_broadcast=num_layers, **kwargs) - - # Setup variables. - lod_in = tf.get_variable('lod', initializer=np.float32(0), trainable=False) - dlatent_avg = tf.get_variable('dlatent_avg', shape=[dlatent_size], initializer=tf.initializers.zeros(), trainable=False) - - # Evaluate mapping network. - dlatents = components.mapping.get_output_for(latents_in, labels_in, **kwargs) - - # Update moving average of W. - if dlatent_avg_beta is not None: - with tf.variable_scope('DlatentAvg'): - batch_avg = tf.reduce_mean(dlatents[:, 0], axis=0) - update_op = tf.assign(dlatent_avg, tflib.lerp(batch_avg, dlatent_avg, dlatent_avg_beta)) - with tf.control_dependencies([update_op]): - dlatents = tf.identity(dlatents) - - # Perform style mixing regularization. - if style_mixing_prob is not None: - with tf.name_scope('StyleMix'): - latents2 = tf.random_normal(tf.shape(latents_in)) - dlatents2 = components.mapping.get_output_for(latents2, labels_in, **kwargs) - layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis] - cur_layers = num_layers - tf.cast(lod_in, tf.int32) * 2 - mixing_cutoff = tf.cond( - tf.random_uniform([], 0.0, 1.0) < style_mixing_prob, - lambda: tf.random_uniform([], 1, cur_layers, dtype=tf.int32), - lambda: cur_layers) - dlatents = tf.where(tf.broadcast_to(layer_idx < mixing_cutoff, tf.shape(dlatents)), dlatents, dlatents2) - - # Apply truncation trick. - if truncation_psi is not None and truncation_cutoff is not None: - with tf.variable_scope('Truncation'): - layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis] - ones = np.ones(layer_idx.shape, dtype=np.float32) - coefs = tf.where(layer_idx < truncation_cutoff, truncation_psi * ones, ones) - dlatents = tflib.lerp(dlatent_avg, dlatents, coefs) - - # Evaluate synthesis network. - with tf.control_dependencies([tf.assign(components.synthesis.find_var('lod'), lod_in)]): - images_out = components.synthesis.get_output_for(dlatents, force_clean_graph=is_template_graph, **kwargs) - return tf.identity(images_out, name='images_out') - -#---------------------------------------------------------------------------- -# Mapping network used in the StyleGAN paper. - -def G_mapping( - latents_in, # First input: Latent vectors (Z) [minibatch, latent_size]. - labels_in, # Second input: Conditioning labels [minibatch, label_size]. - latent_size = 512, # Latent vector (Z) dimensionality. - label_size = 0, # Label dimensionality, 0 if no labels. - dlatent_size = 512, # Disentangled latent (W) dimensionality. - dlatent_broadcast = None, # Output disentangled latent (W) as [minibatch, dlatent_size] or [minibatch, dlatent_broadcast, dlatent_size]. - mapping_layers = 8, # Number of mapping layers. - mapping_fmaps = 512, # Number of activations in the mapping layers. - mapping_lrmul = 0.01, # Learning rate multiplier for the mapping layers. - mapping_nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu'. - use_wscale = True, # Enable equalized learning rate? - normalize_latents = True, # Normalize latent vectors (Z) before feeding them to the mapping layers? - dtype = 'float32', # Data type to use for activations and outputs. - **_kwargs): # Ignore unrecognized keyword args. - - act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[mapping_nonlinearity] - - # Inputs. - latents_in.set_shape([None, latent_size]) - labels_in.set_shape([None, label_size]) - latents_in = tf.cast(latents_in, dtype) - labels_in = tf.cast(labels_in, dtype) - x = latents_in - - # Embed labels and concatenate them with latents. - if label_size: - with tf.variable_scope('LabelConcat'): - w = tf.get_variable('weight', shape=[label_size, latent_size], initializer=tf.initializers.random_normal()) - y = tf.matmul(labels_in, tf.cast(w, dtype)) - x = tf.concat([x, y], axis=1) - - # Normalize latents. - if normalize_latents: - x = pixel_norm(x) - - # Mapping layers. - for layer_idx in range(mapping_layers): - with tf.variable_scope('Dense%d' % layer_idx): - fmaps = dlatent_size if layer_idx == mapping_layers - 1 else mapping_fmaps - x = dense(x, fmaps=fmaps, gain=gain, use_wscale=use_wscale, lrmul=mapping_lrmul) - x = apply_bias(x, lrmul=mapping_lrmul) - x = act(x) - - # Broadcast. - if dlatent_broadcast is not None: - with tf.variable_scope('Broadcast'): - x = tf.tile(x[:, np.newaxis], [1, dlatent_broadcast, 1]) - - # Output. - assert x.dtype == tf.as_dtype(dtype) - return tf.identity(x, name='dlatents_out') - -#---------------------------------------------------------------------------- -# Synthesis network used in the StyleGAN paper. - -def G_synthesis( - dlatents_in, # Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size]. - dlatent_size = 512, # Disentangled latent (W) dimensionality. - num_channels = 3, # Number of output color channels. - resolution = 1024, # Output resolution. - fmap_base = 8192, # Overall multiplier for the number of feature maps. - fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution. - fmap_max = 512, # Maximum number of feature maps in any layer. - use_styles = True, # Enable style inputs? - const_input_layer = True, # First layer is a learned constant? - use_noise = True, # Enable noise inputs? - randomize_noise = True, # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables. - nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu' - use_wscale = True, # Enable equalized learning rate? - use_pixel_norm = False, # Enable pixelwise feature vector normalization? - use_instance_norm = True, # Enable instance normalization? - dtype = 'float32', # Data type to use for activations and outputs. - fused_scale = 'auto', # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically. - blur_filter = [1,2,1], # Low-pass filter to apply when resampling activations. None = no filtering. - structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically. - is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. - force_clean_graph = False, # True = construct a clean graph that looks nice in TensorBoard, False = default behavior. - **_kwargs): # Ignore unrecognized keyword args. - - resolution_log2 = int(np.log2(resolution)) - assert resolution == 2**resolution_log2 and resolution >= 4 - def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max) - def blur(x): return blur2d(x, blur_filter) if blur_filter else x - if is_template_graph: force_clean_graph = True - if force_clean_graph: randomize_noise = False - if structure == 'auto': structure = 'linear' if force_clean_graph else 'recursive' - act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[nonlinearity] - num_layers = resolution_log2 * 2 - 2 - num_styles = num_layers if use_styles else 1 - images_out = None - - # Primary inputs. - dlatents_in.set_shape([None, num_styles, dlatent_size]) - dlatents_in = tf.cast(dlatents_in, dtype) - lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0), trainable=False), dtype) - - # Noise inputs. - noise_inputs = [] - if use_noise: - for layer_idx in range(num_layers): - res = layer_idx // 2 + 2 - shape = [1, use_noise, 2**res, 2**res] - noise_inputs.append(tf.get_variable('noise%d' % layer_idx, shape=shape, initializer=tf.initializers.random_normal(), trainable=False)) - - # Things to do at the end of each layer. - def layer_epilogue(x, layer_idx): - if use_noise: - x = apply_noise(x, noise_inputs[layer_idx], randomize_noise=randomize_noise) - x = apply_bias(x) - x = act(x) - if use_pixel_norm: - x = pixel_norm(x) - if use_instance_norm: - x = instance_norm(x) - if use_styles: - x = style_mod(x, dlatents_in[:, layer_idx], use_wscale=use_wscale) - return x - - # Early layers. - with tf.variable_scope('4x4'): - if const_input_layer: - with tf.variable_scope('Const'): - x = tf.get_variable('const', shape=[1, nf(1), 4, 4], initializer=tf.initializers.ones()) - x = layer_epilogue(tf.tile(tf.cast(x, dtype), [tf.shape(dlatents_in)[0], 1, 1, 1]), 0) - else: - with tf.variable_scope('Dense'): - x = dense(dlatents_in[:, 0], fmaps=nf(1)*16, gain=gain/4, use_wscale=use_wscale) # tweak gain to match the official implementation of Progressing GAN - x = layer_epilogue(tf.reshape(x, [-1, nf(1), 4, 4]), 0) - with tf.variable_scope('Conv'): - x = layer_epilogue(conv2d(x, fmaps=nf(1), kernel=3, gain=gain, use_wscale=use_wscale), 1) - - # Building blocks for remaining layers. - def block(res, x): # res = 3..resolution_log2 - with tf.variable_scope('%dx%d' % (2**res, 2**res)): - with tf.variable_scope('Conv0_up'): - x = layer_epilogue(blur(upscale2d_conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale, fused_scale=fused_scale)), res*2-4) - with tf.variable_scope('Conv1'): - x = layer_epilogue(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale), res*2-3) - return x - def torgb(res, x): # res = 2..resolution_log2 - lod = resolution_log2 - res - with tf.variable_scope('ToRGB_lod%d' % lod): - return apply_bias(conv2d(x, fmaps=num_channels, kernel=1, gain=1, use_wscale=use_wscale)) - - # Fixed structure: simple and efficient, but does not support progressive growing. - if structure == 'fixed': - for res in range(3, resolution_log2 + 1): - x = block(res, x) - images_out = torgb(resolution_log2, x) - - # Linear structure: simple but inefficient. - if structure == 'linear': - images_out = torgb(2, x) - for res in range(3, resolution_log2 + 1): - lod = resolution_log2 - res - x = block(res, x) - img = torgb(res, x) - images_out = upscale2d(images_out) - with tf.variable_scope('Grow_lod%d' % lod): - images_out = tflib.lerp_clip(img, images_out, lod_in - lod) - - # Recursive structure: complex but efficient. - if structure == 'recursive': - def cset(cur_lambda, new_cond, new_lambda): - return lambda: tf.cond(new_cond, new_lambda, cur_lambda) - def grow(x, res, lod): - y = block(res, x) - img = lambda: upscale2d(torgb(res, y), 2**lod) - img = cset(img, (lod_in > lod), lambda: upscale2d(tflib.lerp(torgb(res, y), upscale2d(torgb(res - 1, x)), lod_in - lod), 2**lod)) - if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1)) - return img() - images_out = grow(x, 3, resolution_log2 - 3) - - assert images_out.dtype == tf.as_dtype(dtype) - return tf.identity(images_out, name='images_out') - -#---------------------------------------------------------------------------- -# Discriminator used in the StyleGAN paper. - -def D_basic( - images_in, # First input: Images [minibatch, channel, height, width]. - labels_in, # Second input: Labels [minibatch, label_size]. - num_channels = 1, # Number of input color channels. Overridden based on dataset. - resolution = 32, # Input resolution. Overridden based on dataset. - label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset. - fmap_base = 8192, # Overall multiplier for the number of feature maps. - fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution. - fmap_max = 512, # Maximum number of feature maps in any layer. - nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', - use_wscale = True, # Enable equalized learning rate? - mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable. - mbstd_num_features = 1, # Number of features for the minibatch standard deviation layer. - dtype = 'float32', # Data type to use for activations and outputs. - fused_scale = 'auto', # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically. - blur_filter = [1,2,1], # Low-pass filter to apply when resampling activations. None = no filtering. - structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically. - is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. - **_kwargs): # Ignore unrecognized keyword args. - - resolution_log2 = int(np.log2(resolution)) - assert resolution == 2**resolution_log2 and resolution >= 4 - def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max) - def blur(x): return blur2d(x, blur_filter) if blur_filter else x - if structure == 'auto': structure = 'linear' if is_template_graph else 'recursive' - act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[nonlinearity] - - images_in.set_shape([None, num_channels, resolution, resolution]) - labels_in.set_shape([None, label_size]) - images_in = tf.cast(images_in, dtype) - labels_in = tf.cast(labels_in, dtype) - lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype) - scores_out = None - - # Building blocks. - def fromrgb(x, res): # res = 2..resolution_log2 - with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)): - return act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=1, gain=gain, use_wscale=use_wscale))) - def block(x, res): # res = 2..resolution_log2 - with tf.variable_scope('%dx%d' % (2**res, 2**res)): - if res >= 3: # 8x8 and up - with tf.variable_scope('Conv0'): - x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale))) - with tf.variable_scope('Conv1_down'): - x = act(apply_bias(conv2d_downscale2d(blur(x), fmaps=nf(res-2), kernel=3, gain=gain, use_wscale=use_wscale, fused_scale=fused_scale))) - else: # 4x4 - if mbstd_group_size > 1: - x = minibatch_stddev_layer(x, mbstd_group_size, mbstd_num_features) - with tf.variable_scope('Conv'): - x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale))) - with tf.variable_scope('Dense0'): - x = act(apply_bias(dense(x, fmaps=nf(res-2), gain=gain, use_wscale=use_wscale))) - with tf.variable_scope('Dense1'): - x = apply_bias(dense(x, fmaps=max(label_size, 1), gain=1, use_wscale=use_wscale)) - return x - - # Fixed structure: simple and efficient, but does not support progressive growing. - if structure == 'fixed': - x = fromrgb(images_in, resolution_log2) - for res in range(resolution_log2, 2, -1): - x = block(x, res) - scores_out = block(x, 2) - - # Linear structure: simple but inefficient. - if structure == 'linear': - img = images_in - x = fromrgb(img, resolution_log2) - for res in range(resolution_log2, 2, -1): - lod = resolution_log2 - res - x = block(x, res) - img = downscale2d(img) - y = fromrgb(img, res - 1) - with tf.variable_scope('Grow_lod%d' % lod): - x = tflib.lerp_clip(x, y, lod_in - lod) - scores_out = block(x, 2) - - # Recursive structure: complex but efficient. - if structure == 'recursive': - def cset(cur_lambda, new_cond, new_lambda): - return lambda: tf.cond(new_cond, new_lambda, cur_lambda) - def grow(res, lod): - x = lambda: fromrgb(downscale2d(images_in, 2**lod), res) - if lod > 0: x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1)) - x = block(x(), res); y = lambda: x - if res > 2: y = cset(y, (lod_in > lod), lambda: tflib.lerp(x, fromrgb(downscale2d(images_in, 2**(lod+1)), res - 1), lod_in - lod)) - return y() - scores_out = grow(2, resolution_log2 - 2) - - # Label conditioning from "Which Training Methods for GANs do actually Converge?" - if label_size: - with tf.variable_scope('LabelSwitch'): - scores_out = tf.reduce_sum(scores_out * labels_in, axis=1, keepdims=True) - - assert scores_out.dtype == tf.as_dtype(dtype) - scores_out = tf.identity(scores_out, name='scores_out') - return scores_out - -#---------------------------------------------------------------------------- diff --git a/spaces/silencewing/server/youyou/.history/math_20230613231821.html b/spaces/silencewing/server/youyou/.history/math_20230613231821.html deleted file mode 100644 index a253fff85df2dd40a25babb6eede6871cc7185f6..0000000000000000000000000000000000000000 --- a/spaces/silencewing/server/youyou/.history/math_20230613231821.html +++ /dev/null @@ -1,234 +0,0 @@ - - - - - - - - - - Document - - - - -
- - - - - - - - - - - - - - - - - - - - - - - - -
题目
答案
正误
得分
-
- - - - diff --git a/spaces/simonduerr/ProteinMPNNESM/ProteinMPNN/vanilla_proteinmpnn/helper_scripts/other_tools/make_pssm_dict.py b/spaces/simonduerr/ProteinMPNNESM/ProteinMPNN/vanilla_proteinmpnn/helper_scripts/other_tools/make_pssm_dict.py deleted file mode 100644 index c6cf83df6febb2ac9e12da3e127dbc9a7ea08d7f..0000000000000000000000000000000000000000 --- a/spaces/simonduerr/ProteinMPNNESM/ProteinMPNN/vanilla_proteinmpnn/helper_scripts/other_tools/make_pssm_dict.py +++ /dev/null @@ -1,64 +0,0 @@ -import pandas as pd -import numpy as np - -import glob -import random -import numpy as np -import json - - -def softmax(x, T): - return np.exp(x/T)/np.sum(np.exp(x/T), -1, keepdims=True) - -def parse_pssm(path): - data = pd.read_csv(path, skiprows=2) - floats_list_list = [] - for i in range(data.values.shape[0]): - str1 = data.values[i][0][4:] - floats_list = [] - for item in str1.split(): - floats_list.append(float(item)) - floats_list_list.append(floats_list) - np_lines = np.array(floats_list_list) - return np_lines - -np_lines = parse_pssm('/home/swang523/RLcage/capsid/monomersfordesign/8-16-21/pssm_rainity_final_8-16-21_int/build_0.2089_0.98_0.4653_19_2.00_0.005745.pssm') - -mpnn_alphabet = 'ACDEFGHIKLMNPQRSTVWYX' -input_alphabet = 'ARNDCQEGHILKMFPSTWYV' - -permutation_matrix = np.zeros([20,21]) -for i in range(20): - letter1 = input_alphabet[i] - for j in range(21): - letter2 = mpnn_alphabet[j] - if letter1 == letter2: - permutation_matrix[i,j]=1. - -pssm_log_odds = np_lines[:,:20] @ permutation_matrix -pssm_probs = np_lines[:,20:40] @ permutation_matrix - -X_mask = np.concatenate([np.zeros([1,20]), np.ones([1,1])], -1) - -def softmax(x, T): - return np.exp(x/T)/np.sum(np.exp(x/T), -1, keepdims=True) - -#Load parsed PDBs: -with open('/home/justas/projects/cages/parsed/test.jsonl', 'r') as json_file: - json_list = list(json_file) - -my_dict = {} -for json_str in json_list: - result = json.loads(json_str) - all_chain_list = [item[-1:] for item in list(result) if item[:9]=='seq_chain'] - pssm_dict = {} - for chain in all_chain_list: - pssm_dict[chain] = {} - pssm_dict[chain]['pssm_coef'] = (np.ones(len(result['seq_chain_A']))).tolist() #a number between 0.0 and 1.0 specifying how much attention put to PSSM, can be adjusted later as a flag - pssm_dict[chain]['pssm_bias'] = (softmax(pssm_log_odds-X_mask*1e8, 1.0)).tolist() #PSSM like, [length, 21] such that sum over the last dimension adds up to 1.0 - pssm_dict[chain]['pssm_log_odds'] = (pssm_log_odds).tolist() - my_dict[result['name']] = pssm_dict - -#Write output to: -with open('/home/justas/projects/lab_github/mpnn/data/pssm_dict.jsonl', 'w') as f: - f.write(json.dumps(my_dict) + '\n') diff --git a/spaces/simonduerr/diffdock/datasets/esm_embedding_preparation.py b/spaces/simonduerr/diffdock/datasets/esm_embedding_preparation.py deleted file mode 100644 index f6162d2fd983c0dc844a61d90f49119a0813d183..0000000000000000000000000000000000000000 --- a/spaces/simonduerr/diffdock/datasets/esm_embedding_preparation.py +++ /dev/null @@ -1,88 +0,0 @@ -import os -from argparse import FileType, ArgumentParser - -import numpy as np -import pandas as pd -from Bio.PDB import PDBParser -from Bio.Seq import Seq -from Bio.SeqRecord import SeqRecord -from tqdm import tqdm -from Bio import SeqIO - - - -def esm_embedding_prep(out_file, protein_path): - biopython_parser = PDBParser() - - three_to_one = { - "ALA": "A", - "ARG": "R", - "ASN": "N", - "ASP": "D", - "CYS": "C", - "GLN": "Q", - "GLU": "E", - "GLY": "G", - "HIS": "H", - "ILE": "I", - "LEU": "L", - "LYS": "K", - "MET": "M", - "MSE": "M", # MSE this is almost the same AA as MET. The sulfur is just replaced by Selen - "PHE": "F", - "PRO": "P", - "PYL": "O", - "SER": "S", - "SEC": "U", - "THR": "T", - "TRP": "W", - "TYR": "Y", - "VAL": "V", - "ASX": "B", - "GLX": "Z", - "XAA": "X", - "XLE": "J", - } - - file_paths = [protein_path] - sequences = [] - ids = [] - for file_path in tqdm(file_paths): - structure = biopython_parser.get_structure("random_id", file_path) - structure = structure[0] - for i, chain in enumerate(structure): - seq = "" - for res_idx, residue in enumerate(chain): - if residue.get_resname() == "HOH": - continue - residue_coords = [] - c_alpha, n, c = None, None, None - for atom in residue: - if atom.name == "CA": - c_alpha = list(atom.get_vector()) - if atom.name == "N": - n = list(atom.get_vector()) - if atom.name == "C": - c = list(atom.get_vector()) - if ( - c_alpha != None and n != None and c != None - ): # only append residue if it is an amino acid - try: - seq += three_to_one[residue.get_resname()] - except Exception as e: - seq += "-" - print( - "encountered unknown AA: ", - residue.get_resname(), - " in the complex ", - file_path, - ". Replacing it with a dash - .", - ) - sequences.append(seq) - ids.append(f"{os.path.basename(file_path)}_chain_{i}") - records = [] - for (index, seq) in zip(ids, sequences): - record = SeqRecord(Seq(seq), str(index)) - record.description = "" - records.append(record) - SeqIO.write(records, out_file, "fasta") diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Express Your Love with I Love You Ramana Ringtone - Download Now.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Express Your Love with I Love You Ramana Ringtone - Download Now.md deleted file mode 100644 index 11975301372732399cf3789b1e20bb7ae9f34fc7..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Express Your Love with I Love You Ramana Ringtone - Download Now.md +++ /dev/null @@ -1,143 +0,0 @@ - -

I Love You Ramana Ringtone Download: How to Express Your Love with a Customized Sound

-

Do you want to make your phone more personal and unique? Do you want to show your love for Ramana in a creative and fun way? Do you want to surprise him with a special sound every time he calls or texts you? If you answered yes to any of these questions, then you need to download the "I Love You Ramana" ringtone.

-

i love you ramana ringtone download


DOWNLOAD >>>>> https://ssurll.com/2uNZg6



-

What is a ringtone and why do you need one?

-

A ringtone is a sound that plays when your phone receives a call or a notification

-

A ringtone is a short audio clip that plays on your phone when someone calls you or sends you a message. It can be a song, a voice, a sound effect, or anything else that you like. A ringtone helps you identify who is calling or texting you without looking at your phone.

-

A ringtone can reflect your personality, mood, and preferences

-

A ringtone can also be a way of expressing yourself and your style. You can choose a ringtone that matches your personality, mood, and preferences. For example, if you are a fan of rock music, you can choose a rock song as your ringtone. If you are feeling happy, you can choose a cheerful sound as your ringtone. If you are into astrology, you can choose a zodiac sign as your ringtone.

-

A ringtone can also convey a message to the caller or the recipient of the notification

-

A ringtone can also be a way of communicating something to the person who is calling or texting you. For example, if you are busy, you can choose a busy signal as your ringtone. If you are in love, you can choose a romantic sound as your ringtone. If you are angry, you can choose an angry sound as your ringtone.

-

Who is Ramana and why do you love him?

-

Ramana is a popular name in India, especially in the south

-

Ramana is a common name in India, especially in the southern states. It is derived from Rama, the name of one of the most revered Hindu gods. Rama is known as the ideal king, husband, son, and brother. He is also the hero of the epic Ramayana, which narrates his adventures and his devotion to his wife Sita.

-

i love you ramana mp3 ringtone free download
-download i love you ramana sound effects ringtone
-i love you ramana romantic ringtone for mobile phone
-how to download i love you ramana ringtone on zedge
-i love you ramana ringtone prokerala download link
-best i love you ramana ringtone for android and iphone
-i love you ramana ringtone with name and music
-i love you ramana voice ringtone download in high quality
-i love you ramana ringtone remix download online
-i love you ramana flute ringtone download 2023
-i love you ramana instrumental ringtone download free
-i love you ramana ringtone download pagalworld
-i love you ramana female version ringtone download
-i love you ramana song ringtone download mp3
-i love you ramana caller tune ringtone download
-i love you ramana whatsapp status ringtone download
-i love you ramana video ringtone download hd
-i love you ramana tamil ringtone download 320kbps
-i love you ramana hindi ringtone download mr jatt
-i love you ramana telugu ringtone download naa songs
-i love you ramana kannada ringtone download wapwon
-i love you ramana malayalam ringtone download kuttyweb
-i love you ramana marathi ringtone download vipmarathi
-i love you ramana punjabi ringtone download djpunjab
-i love you ramana bengali ringtone download webmusic
-i love you ramana gujarati ringtone download ringtonemaza
-i love you ramana odia ringtone download odiagaana
-i love you ramana urdu ringtone download songs.pk
-i love you ramana arabic ringtone download mp3juice
-i love you ramana english ringtone download zedge.net
-i love you ramana spanish ringtone download tonosdellamadagratis.com
-i love you ramana french ringtone download sonnerieportable.com
-i love you ramana german ringtone download klingeltonkostenlos.de
-i love you ramana italian ringtone download suoneriegratis.net
-i love you ramana portuguese ringtone download toquesparacelular.net
-i love you ramana russian ringtone download zvonok.mobi
-i love you ramana chinese ringtone download shouji.baidu.com/ring/
-i love you ramana japanese ringtone download chakushin.jp/ring/
-i love you ramana korean ringtone download bell365.com/ring/
-i love you ramana thai ringtone download monophonicring.com/ring/

-

Ramana can refer to a person, a deity, or a spiritual teacher

-

R

Ramana can also refer to a deity or a spiritual teacher. For example, Ramana Maharshi was a famous Indian sage who taught the philosophy of self-inquiry and non-duality. He is regarded as one of the greatest sages of the 20th century. Many people visit his ashram in Tiruvannamalai, Tamil Nadu, to seek his guidance and blessings.

-

You love Ramana because he is your partner, your friend, your idol, or your guru

-

You love Ramana for different reasons. Maybe he is your partner, your soulmate, your lover, or your spouse. You love him for his qualities, his actions, his words, and his presence in your life. You want to make him happy and show him how much you care.

-

Maybe he is your friend, your confidant, your companion, or your ally. You love him for his support, his loyalty, his honesty, and his friendship. You want to share your joys and sorrows with him and be there for him.

-

Maybe he is your idol, your role model, your inspiration, or your hero. You love him for his achievements, his talents, his wisdom, and his charisma. You want to learn from him and follow his footsteps.

-

Maybe he is your guru, your teacher, your master, or your guide. You love him for his teachings, his grace, his compassion, and his enlightenment. You want to attain his state of mind and realize your true self.

-

How to download the "I Love You Ramana" ringtone?

-

There are many websites and apps that offer free ringtones for download

-

If you want to download the "I Love You Ramana" ringtone, you have many options to choose from. There are many websites and apps that offer free ringtones for download. Some of them are:

-
    -
  • Zedge: This is one of the most popular platforms for downloading ringtones, wallpapers, stickers, and more. You can browse through thousands of ringtones in different categories and genres. You can also upload your own ringtones and share them with others.
  • -
  • Myxer: This is another popular platform for downloading ringtones, music, videos, and more. You can create your own ringtones by uploading audio files or recording your voice. You can also edit and customize your ringtones with effects and filters.
  • -
  • Mobile9: This is a platform for downloading ringtones, themes, games, apps, and more. You can find ringtones in various languages and styles. You can also join the community and interact with other users.
  • -
-

You can search for "I Love You Ramana" ringtone on these platforms and download it to your device

-

To download the "I Love You Ramana" ringtone from these platforms, you need to follow these steps:

-
    -
  1. Go to the website or app of your choice and search for "I Love You Ramana" ringtone.
  2. -
  3. Select the ringtone that you like from the results and preview it.
  4. -
  5. Click on the download button and save the ringtone file to your device.
  6. -
  7. Transfer the ringtone file to your phone if needed.
  8. -
-

You can also create your own "I Love You Ramana" ringtone using online tools or software

-

If you want to create your own "I Love You Ramana" ringtone, you can use online tools or software that allow you to make ringtones easily. Some of them are:

-
    -
  • Ringtone Maker: This is an online tool that lets you make ringtones from any audio file. You can upload an audio file from your computer or a URL. You can also record your voice using a microphone. You can then cut and edit the audio file to make a ringtone.
  • -
  • Audacity: This is a free software that lets you record and edit audio files. You can import an audio file from your computer or record your voice using a microphone. You can then use various tools and effects to edit the audio file to make a ringtone.
  • -
  • GarageBand: This is a software that lets you create music and ringtones on your Mac or iOS device. You can use different instruments, loops, samples, and effects to create a ringtone. You can also record your voice using a microphone.
  • -
To create your own "I Love You Ramana" ringtone using these tools or software, you need to follow these steps:

-
    -
  1. Choose the tool or software that you want to use and open it.
  2. -
  3. Select an audio file from your computer or a URL, or record your voice using a microphone.
  4. -
  5. Cut and edit the audio file to make a ringtone. You can add effects, filters, transitions, and more.
  6. -
  7. Save the ringtone file to your device and transfer it to your phone if needed.
  8. -
-

How to set the "I Love You Ramana" ringtone as your default or contact-specific sound?

-

Depending on your device and operating system, you can change your ringtone settings in different ways

-

Once you have downloaded or created your "I Love You Ramana" ringtone, you need to set it as your default or contact-specific sound. The way to do this depends on your device and operating system. Here are some general steps that you can follow:

-
    -
  • Go to the settings of your phone and look for the sound or ringtone option.
  • -
  • Select the ringtone option and browse through the available ringtones on your phone.
  • -
  • Find and select the "I Love You Ramana" ringtone that you have downloaded or created.
  • -
  • Confirm your selection and exit the settings.
  • -
-

You can set the "I Love You Ramana" ringtone as your default sound for all calls and notifications

-

If you want to set the "I Love You Ramana" ringtone as your default sound for all calls and notifications, you can do so by following the steps above. This way, whenever you receive a call or a notification, you will hear the "I Love You Ramana" ringtone playing on your phone.

-

You can also set the "I Love You Ramana" ringtone as a contact-specific sound for Ramana or anyone else you love

-

If you want to set the "I Love You Ramana" ringtone as a contact-specific sound for Ramana or anyone else you love, you can do so by following these steps:

-
    -
  • Go to the contacts app on your phone and look for the contact that you want to assign the ringtone to.
  • -
  • Select the contact and tap on the edit or customize option.
  • -
  • Look for the ringtone option and tap on it.
  • -
  • Browse through the available ringtones on your phone and find and select the "I Love You Ramana" ringtone that you have downloaded or created.
  • -
  • Confirm your selection and save the changes.
  • -
-

This way, whenever you receive a call or a notification from that contact, you will hear the "I Love You Ramana" ringtone playing on your phone.

-

Conclusion: Enjoy your "I Love You Ramana" ringtone and share it with others

-

In this article, we have explained what a ringtone is and why you need one. We have also introduced you to Ramana, who is someone you love and want to express your love to. We have shown you how to download or create the "I Love You Ramana" ringtone and how to set it as your default or contact-specific sound. We hope that you enjoy your "I Love You Ramana" ringtone and share it with others who love Ramana too.

-

If you have any questions or feedback about this article, please feel free to leave a comment below. We would love to hear from you. Thank you for reading!

-

Frequently Asked Questions

-

Q: Where can I find more ringtones like "I Love You Ramana"?

-

A: You can find more ringtones like "I Love You Ramana" on various websites and apps that offer free ringtones for download. Some of them are Zedge, Myxer, Mobile9, and more. You can also create your own ringtones using online tools or software like Ringtone Maker, Audacity, GarageBand, and more.

-

Q: How can I make my "I Love You Ramana" ringtone more unique and personal?

-

A: You can make your "I Love You Ramana" ringtone more unique and personal by adding some elements that are specific to you and Ramana. For example, you can add his name, his photo, his voice, his favorite song, his favorite quote, his birthday, his anniversary, or anything else that is meaningful to him and you. You can also add some effects, filters, transitions, and more to make your ringtone more unique and personal. You can use online tools or software to edit and customize your ringtone.

-

Q: How can I share my "I Love You Ramana" ringtone with others?

-

A: You can share your "I Love You Ramana" ringtone with others by sending it to them via email, message, Bluetooth, or any other method that your device supports. You can also upload your ringtone to a website or an app that allows you to share ringtones with others. You can also post your ringtone on social media platforms and tag Ramana and your friends who love him too.

-

Q: How can I delete or change my "I Love You Ramana" ringtone?

-

A: You can delete or change your "I Love You Ramana" ringtone by following these steps:

-
    -
  • Go to the settings of your phone and look for the sound or ringtone option.
  • -
  • Select the ringtone option and browse through the available ringtones on your phone.
  • -
  • Find and select the "I Love You Ramana" ringtone that you want to delete or change.
  • -
  • Click on the delete or change button and confirm your action.
  • -
  • Select another ringtone that you want to use instead of the "I Love You Ramana" ringtone.
  • -
-

Q: What are some other ways to express my love for Ramana?

-

A: Besides using the "I Love You Ramana" ringtone, there are many other ways to express your love for Ramana. Some of them are:

-
    -
  • Tell him how you feel about him in words or in writing.
  • -
  • Show him your affection with hugs, kisses, cuddles, and compliments.
  • -
  • Surprise him with gifts, flowers, chocolates, cards, or anything else that he likes.
  • -
  • Spend quality time with him and do things that he enjoys.
  • -
  • Support him in his goals and dreams and encourage him in his challenges.
  • -
  • Respect him and his opinions and listen to him attentively.
  • -
  • Trust him and be loyal to him and expect the same from him.
  • -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/simsa/Fashion-Image-Captioning-using-BLIP-2/README.md b/spaces/simsa/Fashion-Image-Captioning-using-BLIP-2/README.md deleted file mode 100644 index ffdb06bafcf964f11c6a23716d023a01a3587b7c..0000000000000000000000000000000000000000 --- a/spaces/simsa/Fashion-Image-Captioning-using-BLIP-2/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Fashion Image Captioning Using BLIP 2 -emoji: 🚀 -colorFrom: blue -colorTo: blue -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: Upyaya/Fashion-Image-Captioning-using-BLIP-2 ---- diff --git a/spaces/singhk28/nocodeml/README.md b/spaces/singhk28/nocodeml/README.md deleted file mode 100644 index 9eb326c854872bffaa9b6232352afc495937a705..0000000000000000000000000000000000000000 --- a/spaces/singhk28/nocodeml/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Nocodeml -emoji: 🏢 -colorFrom: blue -colorTo: gray -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: cc-by-nc-4.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/skf15963/summary/fengshen/examples/clip_finetune/clip_finetune_flickr.py b/spaces/skf15963/summary/fengshen/examples/clip_finetune/clip_finetune_flickr.py deleted file mode 100644 index 9cac74d87e861cf0ffff64c9ca03330208db90c3..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/examples/clip_finetune/clip_finetune_flickr.py +++ /dev/null @@ -1,259 +0,0 @@ -import sys -sys.path.append('../../') -from data.clip_dataloader.flickr import FlickrDataModule -import pytorch_lightning as pl -import numpy as np -import torch -from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts -import torch.nn.functional as F -import math -import copy -import argparse -from transformers import CLIPModel, BertForSequenceClassification - -class CLIPLightning(pl.LightningModule): - def __init__(self, model_name='ViT-B/32', minibatch_size=2): - """A lightning wrapper for a CLIP model as specified in the paper. - - Args: - model_name (str): A case sensitive visual model name. - config (dict): A dictionary containing the CLIP instantiation parameters. - """ - super().__init__() - - self.prepare_data_per_node = True - self.model_name = 'ViT-B/32' - # self.model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") - self.clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") # NOTE load from openAI - self.text_encoder = BertForSequenceClassification.from_pretrained("IDEA-CCNL/Taiyi-CLIP-Roberta-102M-Chinese") - self.minibatch_size = minibatch_size - self.isViT = 'ViT' in self.model_name - self.automatic_optimization = False - - # Training loss: https://github.com/openai/CLIP/issues/83 - # Mini-batching thanks to https://github.com/crowsonkb / https://twitter.com/RiversHaveWings - # Multi-GPU support: https://github.com/MicPie/clasp - - def training_step(self, train_batch, idx): - # get optimizers and scheduler - optimizer = self.optimizers() - - image, text, labels = train_batch - n = math.ceil(len(image) // self.minibatch_size) - image_mbs = torch.chunk(image, n) - text_mbs = torch.chunk(text, n) - - with torch.no_grad(): - ims = [F.normalize(self.clip_model.get_image_features(im), dim=1) for im in image_mbs] - txt = [F.normalize(self.text_encoder(t).logits, dim=1) for t in text_mbs] - # gather from all GPUs 这里的LOSS要把所有GPU的汇集起来一起算才对 - ims = self.all_gather(torch.cat(ims)) - txt = self.all_gather(torch.cat(txt)) - - if len(ims.shape) == 3: - ims = list(ims) - txt = list(txt) - else: - ims = [ims] - txt = [txt] - - image_logits = torch.cat(ims) @ torch.cat(txt).t() * self.clip_model.logit_scale.exp() - ground_truth = torch.arange(len(image_logits)).long().to(image_logits.device) - loss = (F.cross_entropy(image_logits, ground_truth) + - F.cross_entropy(image_logits.t(), ground_truth)).div(2) - acc_i = (torch.argmax(image_logits, 1) == ground_truth).sum() - acc_t = (torch.argmax(image_logits, 0) == ground_truth).sum() - self.log_dict({'loss': loss / len(ims), 'acc': (acc_i + acc_t) / 2 / len(image) / len(ims)}, prog_bar=True) - - if isinstance(optimizer, list): - optimizer = optimizer[0] - optimizer.zero_grad() - - # image loss - for j, mb in enumerate(image_mbs[:-1]): - # 最后一部分样本舍弃。(对齐的bug) - images_tmp = copy.deepcopy(ims) - images_tmp[self.global_rank][j * self.minibatch_size:(j+1)*self.minibatch_size] = \ - F.normalize(self.clip_model.get_image_features(mb), dim=1) - image_logits = torch.cat(images_tmp) @ torch.cat(txt).t() * self.clip_model.logit_scale.exp() - ground_truth = torch.arange(len(image_logits)).long().to(image_logits.device) - loss = (F.cross_entropy(image_logits, ground_truth) + F.cross_entropy(image_logits.t(), ground_truth))/2 - self.manual_backward(loss) - - # text loss - for j, mb in enumerate(text_mbs[:-1]): - text_tmp = copy.deepcopy(txt) - text_tmp[self.global_rank][j * self.minibatch_size:(j+1)*self.minibatch_size] = \ - F.normalize(self.text_encoder(mb).logits, dim=1) - image_logits = torch.cat(ims) @ torch.cat(text_tmp).t() * self.clip_model.logit_scale.exp() - loss = (F.cross_entropy(image_logits, ground_truth) + F.cross_entropy(image_logits.t(), ground_truth))/2 - self.manual_backward(loss) - - optimizer.step() - lr_scheduler = self.lr_schedulers() - lr_scheduler.step() - self.clip_model.logit_scale.data.clamp_(-np.log(100), np.log(100)) - - def validation_step(self, val_batch, idx): - image, text, labels = val_batch - img_embed = self.clip_model.get_image_features(image) - txt_embed = self.text_encoder(text).logits - # print(img_embed.shape) - image_norm = F.normalize(img_embed, dim=1) - text_norm = F.normalize(txt_embed, dim=1) - image_logits = image_norm @ text_norm.t() * self.clip_model.logit_scale.exp() - text_logits = text_norm @ image_norm.t() * self.clip_model.logit_scale.exp() - # print(image_logits.shape) - # image_logits, text_logits = self.forward(image, text) - ground_truth = torch.arange(len(image_logits)).long().to(image_logits.device) - loss = (F.cross_entropy(image_logits, ground_truth) + F.cross_entropy(text_logits, ground_truth)).div(2) - self.log('val_loss', loss, prog_bar=True) - return [image_norm, text_norm, labels] - - def validation_epoch_end(self, outputs): - image_features = torch.cat([x[0] for x in outputs]) - text_features = torch.cat([x[1] for x in outputs]) - labels = [label for x in outputs for label in x[2]] - print(image_features.shape, text_features.shape, len(labels)) - self.get_metrics(image_features, text_features, labels, 100) - - def test_step(self, test_batch, idx): - image, text, labels = test_batch - image_features = self.clip_model.get_image_features(image) - text_features = self.text_encoder(text).logits - image_features = image_features / image_features.norm(dim=1, keepdim=True) - text_features = text_features / text_features.norm(dim=1, keepdim=True) - return [image_features, text_features, labels] - - def test_epoch_end(self, outputs): - image_features = torch.cat([x[0] for x in outputs]) - text_features = torch.cat([x[1] for x in outputs]) - labels = [label for x in outputs for label in x[2]] - print(image_features.shape, text_features.shape, len(labels)) - self.get_metrics(image_features, text_features, labels, 100) - - def get_metrics(self, image_features, text_features, labels, logit_scale): - # 计算相似度,支持多个样本的情况(比如一个图片有多个caption) - # img2txt计算的时候要用到,因为一张图片可能对应多个文本。 - # txt2img计算的时候不需要(一般一个text只有一个对应图片) - # metrics = {} - logits_per_image = (logit_scale * image_features @ text_features.t()).detach().cpu() - logits_per_text = logits_per_image.t().detach().cpu() - - logits = {"image_to_text": logits_per_image, "text_to_image": logits_per_text} - - label2idx = {} # 计算label到idx的映射。 - repeat_id = [] - for i, label in enumerate(labels): - if label not in label2idx: - label2idx[label] = [i] - else: - # 表示该index的标签出现过,记录这个index,后续算txt2img分数的时候,这些index的权值要降低。 - label2idx[label].append(i) - repeat_id.append(i) - # print(label2idx) # 标注了每个label的idx - - # print('repeat_id:', repeat_id) - ground_truth = [label2idx[label] for label in labels] - # print(ground_truth) - - for name, logit in logits.items(): - # print(name, logit.shape) - if name == 'text_to_image': - logit[:, repeat_id] -= 1e8 # 这部分的分数要降低。(重复出现的图片,直接忽略) - r1_stat, r5_stat, r10_stat = [], [], [] - ranking = torch.argsort(logit, descending=True) # index of the largest element to the smallest - # print(name, ranking[:, :10]) - for i, each_query in enumerate(ranking[:, :10]): - for j, q in enumerate(each_query): - if q in ground_truth[i]: - if j == 0: - r1_stat.append(1) - r5_stat.append(1) - r10_stat.append(1) - break - if j < 5: - r5_stat.append(1) - r10_stat.append(1) - break - if j < 10: - r10_stat.append(1) - break - print(f'{name} r1:{sum(r1_stat)/len(logit)}, r5:{sum(r5_stat)/len(logit)}, r10:{sum(r10_stat)/len(logit)}') - - def configure_optimizers(self): - lr = { - "RN50": 5e-4, - "RN101": 5e-4, - "RN50x4": 5e-4, - "RN50x16": 4e-4, - "RN50x64": 3.6e-4, - "ViT-B/32": 5e-4, - "ViT-B/16": 5e-4, - "ViT-L/14": 4e-4, - "ViT-L/14-336px": 2e-5 - }[self.model_name] - - optimizer = torch.optim.AdamW( - [{'params': self.clip_model.parameters()}, {'params': self.text_encoder.parameters()}], - lr=lr, - betas=( - 0.9, - 0.98 if self.isViT else 0.999 - ), - eps=1e-6 if self.isViT else 1e-8, - weight_decay=0.2 - ) - - # Source: https://github.com/openai/CLIP/issues/107 - # Use pip install 'git+https://github.com/katsura-jp/pytorch-cosine-annealing-with-warmup' - lr_scheduler = CosineAnnealingWarmRestarts( - optimizer, - T_0=2000 - ) - # CosineAnnealingWarmupRestarts - return {'optimizer': optimizer, 'lr_scheduler': lr_scheduler} - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - - # model_name - parser.add_argument('--model', type=str, - default="ViT-B/32", - help='model definition') - - # experiment setting - parser.add_argument('--batch_size', type=int, default=128) - parser.add_argument('--num_epoches', type=int, default=1) - parser.add_argument('--num_gpus', type=int, default=2) - - # dataset - parser.add_argument('--train_filename', type=str, - help='dir or csv file') - parser.add_argument('--train_root', type=str, - help='image root path') - parser.add_argument('--val_filename', type=str, - help='dir or csv file') - parser.add_argument('--val_root', type=str, - help='image root path') - parser.add_argument('--test_filename', type=str, - help='dir or csv file') - parser.add_argument('--test_root', type=str, - help='image root path') - parser.add_argument('--num_workers', type=int, default=0) - - # huggingface pretrain model 定义 - parser.add_argument('--pretrain_model', type=str, - default="openai/clip-vit-base-patch32", - help='defalut load from openai') # "wf-genius/TaiYi-CLIP-ViT-B-32" 是我训好的 NOTE - - args = parser.parse_args() - dm = FlickrDataModule(args) - - model = CLIPLightning(model_name=args.model, minibatch_size=args.batch_size//2) - trainer = pl.Trainer(gpus=args.num_gpus, precision=16, max_epochs=args.num_epoches) - trainer.test(model, dm) # zero-shot test - trainer.fit(model, dm) # finetune on train set - trainer.test(model, dm) # test again - diff --git a/spaces/skyler36237/vits-uma-genshin-honkai/commons.py b/spaces/skyler36237/vits-uma-genshin-honkai/commons.py deleted file mode 100644 index 40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9..0000000000000000000000000000000000000000 --- a/spaces/skyler36237/vits-uma-genshin-honkai/commons.py +++ /dev/null @@ -1,172 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/smartinezbragado/reddit-topic-modelling/templates/error.html b/spaces/smartinezbragado/reddit-topic-modelling/templates/error.html deleted file mode 100644 index 1d29010bdadefcbfe589f04497e27dc61c6971d6..0000000000000000000000000000000000000000 --- a/spaces/smartinezbragado/reddit-topic-modelling/templates/error.html +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - - - Reddit Topic Modelling - - - reddit-logo - -
-

Error: {{ type_of_error }}

-
- - - - \ No newline at end of file diff --git a/spaces/songwy/VITS-Umamusume-voice-synthesizer/text/mandarin.py b/spaces/songwy/VITS-Umamusume-voice-synthesizer/text/mandarin.py deleted file mode 100644 index 093d8826809aa2681f6088174427337a59e0c882..0000000000000000000000000000000000000000 --- a/spaces/songwy/VITS-Umamusume-voice-synthesizer/text/mandarin.py +++ /dev/null @@ -1,329 +0,0 @@ -import os -import sys -import re -from pypinyin import lazy_pinyin, BOPOMOFO -import jieba -import cn2an -import logging - -logging.getLogger('jieba').setLevel(logging.WARNING) -jieba.initialize() - - -# List of (Latin alphabet, bopomofo) pairs: -_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'ㄟˉ'), - ('b', 'ㄅㄧˋ'), - ('c', 'ㄙㄧˉ'), - ('d', 'ㄉㄧˋ'), - ('e', 'ㄧˋ'), - ('f', 'ㄝˊㄈㄨˋ'), - ('g', 'ㄐㄧˋ'), - ('h', 'ㄝˇㄑㄩˋ'), - ('i', 'ㄞˋ'), - ('j', 'ㄐㄟˋ'), - ('k', 'ㄎㄟˋ'), - ('l', 'ㄝˊㄛˋ'), - ('m', 'ㄝˊㄇㄨˋ'), - ('n', 'ㄣˉ'), - ('o', 'ㄡˉ'), - ('p', 'ㄆㄧˉ'), - ('q', 'ㄎㄧㄡˉ'), - ('r', 'ㄚˋ'), - ('s', 'ㄝˊㄙˋ'), - ('t', 'ㄊㄧˋ'), - ('u', 'ㄧㄡˉ'), - ('v', 'ㄨㄧˉ'), - ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), - ('x', 'ㄝˉㄎㄨˋㄙˋ'), - ('y', 'ㄨㄞˋ'), - ('z', 'ㄗㄟˋ') -]] - -# List of (bopomofo, romaji) pairs: -_bopomofo_to_romaji = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'ʧ⁼'), - ('ㄑ', 'ʧʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ʦ`⁼'), - ('ㄔ', 'ʦ`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ʦ⁼'), - ('ㄘ', 'ʦʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'e'), - ('ㄞ', 'ai'), - ('ㄟ', 'ei'), - ('ㄠ', 'au'), - ('ㄡ', 'ou'), - ('ㄧㄢ', 'yeNN'), - ('ㄢ', 'aNN'), - ('ㄧㄣ', 'iNN'), - ('ㄣ', 'əNN'), - ('ㄤ', 'aNg'), - ('ㄧㄥ', 'iNg'), - ('ㄨㄥ', 'uNg'), - ('ㄩㄥ', 'yuNg'), - ('ㄥ', 'əNg'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - -# List of (romaji, ipa) pairs: -_romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ʃy', 'ʃ'), - ('ʧʰy', 'ʧʰ'), - ('ʧ⁼y', 'ʧ⁼'), - ('NN', 'n'), - ('Ng', 'ŋ'), - ('y', 'j'), - ('h', 'x') -]] - -# List of (bopomofo, ipa) pairs: -_bopomofo_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'x'), - ('ㄐ', 'tʃ⁼'), - ('ㄑ', 'tʃʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ts`⁼'), - ('ㄔ', 'ts`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ts⁼'), - ('ㄘ', 'tsʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'ɛ'), - ('ㄞ', 'aɪ'), - ('ㄟ', 'eɪ'), - ('ㄠ', 'ɑʊ'), - ('ㄡ', 'oʊ'), - ('ㄧㄢ', 'jɛn'), - ('ㄩㄢ', 'ɥæn'), - ('ㄢ', 'an'), - ('ㄧㄣ', 'in'), - ('ㄩㄣ', 'ɥn'), - ('ㄣ', 'ən'), - ('ㄤ', 'ɑŋ'), - ('ㄧㄥ', 'iŋ'), - ('ㄨㄥ', 'ʊŋ'), - ('ㄩㄥ', 'jʊŋ'), - ('ㄥ', 'əŋ'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - -# List of (bopomofo, ipa2) pairs: -_bopomofo_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'pwo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'tɕ'), - ('ㄑ', 'tɕʰ'), - ('ㄒ', 'ɕ'), - ('ㄓ', 'tʂ'), - ('ㄔ', 'tʂʰ'), - ('ㄕ', 'ʂ'), - ('ㄖ', 'ɻ'), - ('ㄗ', 'ts'), - ('ㄘ', 'tsʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ɤ'), - ('ㄝ', 'ɛ'), - ('ㄞ', 'aɪ'), - ('ㄟ', 'eɪ'), - ('ㄠ', 'ɑʊ'), - ('ㄡ', 'oʊ'), - ('ㄧㄢ', 'jɛn'), - ('ㄩㄢ', 'yæn'), - ('ㄢ', 'an'), - ('ㄧㄣ', 'in'), - ('ㄩㄣ', 'yn'), - ('ㄣ', 'ən'), - ('ㄤ', 'ɑŋ'), - ('ㄧㄥ', 'iŋ'), - ('ㄨㄥ', 'ʊŋ'), - ('ㄩㄥ', 'jʊŋ'), - ('ㄥ', 'ɤŋ'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'y'), - ('ˉ', '˥'), - ('ˊ', '˧˥'), - ('ˇ', '˨˩˦'), - ('ˋ', '˥˩'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - - -def number_to_chinese(text): - numbers = re.findall(r'\d+(?:\.?\d+)?', text) - for number in numbers: - text = text.replace(number, cn2an.an2cn(number), 1) - return text - - -def chinese_to_bopomofo(text): - text = text.replace('、', ',').replace(';', ',').replace(':', ',') - words = jieba.lcut(text, cut_all=False) - text = '' - for word in words: - bopomofos = lazy_pinyin(word, BOPOMOFO) - if not re.search('[\u4e00-\u9fff]', word): - text += word - continue - for i in range(len(bopomofos)): - bopomofos[i] = re.sub(r'([\u3105-\u3129])$', r'\1ˉ', bopomofos[i]) - if text != '': - text += ' ' - text += ''.join(bopomofos) - return text - - -def latin_to_bopomofo(text): - for regex, replacement in _latin_to_bopomofo: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_romaji(text): - for regex, replacement in _bopomofo_to_romaji: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_ipa(text): - for regex, replacement in _bopomofo_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_ipa2(text): - for regex, replacement in _bopomofo_to_ipa2: - text = re.sub(regex, replacement, text) - return text - - -def chinese_to_romaji(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_romaji(text) - text = re.sub('i([aoe])', r'y\1', text) - text = re.sub('u([aoəe])', r'w\1', text) - text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', - r'\1ɹ`\2', text).replace('ɻ', 'ɹ`') - text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text) - return text - - -def chinese_to_lazy_ipa(text): - text = chinese_to_romaji(text) - for regex, replacement in _romaji_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def chinese_to_ipa(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_ipa(text) - text = re.sub('i([aoe])', r'j\1', text) - text = re.sub('u([aoəe])', r'w\1', text) - text = re.sub('([sɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', - r'\1ɹ`\2', text).replace('ɻ', 'ɹ`') - text = re.sub('([s][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text) - return text - - -def chinese_to_ipa2(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_ipa2(text) - text = re.sub(r'i([aoe])', r'j\1', text) - text = re.sub(r'u([aoəe])', r'w\1', text) - text = re.sub(r'([ʂɹ]ʰ?)([˩˨˧˦˥ ]+|$)', r'\1ʅ\2', text) - text = re.sub(r'(sʰ?)([˩˨˧˦˥ ]+|$)', r'\1ɿ\2', text) - return text \ No newline at end of file diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/checkpoint_utils.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/checkpoint_utils.py deleted file mode 100644 index ef5d4c9022c3c35722f0bc9150260c7a65d35e5f..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/checkpoint_utils.py +++ /dev/null @@ -1,858 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import ast -import collections -import contextlib -import logging -import numpy as np -import os -import re -import time -import traceback -from collections import OrderedDict -from typing import Any, Dict, Optional, Union - -import torch -from fairseq.data import data_utils -from fairseq.dataclass.configs import CheckpointConfig -from fairseq.dataclass.utils import ( - convert_namespace_to_omegaconf, - overwrite_args_by_name, -) -from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP -from fairseq.file_io import PathManager -from fairseq.models import FairseqDecoder, FairseqEncoder -from omegaconf import DictConfig, open_dict, OmegaConf - - -logger = logging.getLogger(__name__) - - -def save_checkpoint(cfg: CheckpointConfig, trainer, epoch_itr, val_loss): - from fairseq import meters - - # only one worker should attempt to create the required dir - if trainer.data_parallel_rank == 0: - os.makedirs(cfg.save_dir, exist_ok=True) - - prev_best = getattr(save_checkpoint, "best", val_loss) - if val_loss is not None: - best_function = max if cfg.maximize_best_checkpoint_metric else min - save_checkpoint.best = best_function(val_loss, prev_best) - - if cfg.no_save: - return - - trainer.consolidate_optimizer() # TODO(SS): do we need this if no_save_optimizer_state - - if not trainer.should_save_checkpoint_on_current_rank: - if trainer.always_call_state_dict_during_save_checkpoint: - trainer.state_dict() - return - - write_timer = meters.StopwatchMeter() - write_timer.start() - - epoch = epoch_itr.epoch - end_of_epoch = epoch_itr.end_of_epoch() - updates = trainer.get_num_updates() - - logger.info(f"Preparing to save checkpoint for epoch {epoch} @ {updates} updates") - - def is_better(a, b): - return a >= b if cfg.maximize_best_checkpoint_metric else a <= b - - suffix = trainer.checkpoint_suffix - checkpoint_conds = collections.OrderedDict() - checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = ( - end_of_epoch and not cfg.no_epoch_checkpoints and epoch % cfg.save_interval == 0 - ) - checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = ( - not end_of_epoch - and cfg.save_interval_updates > 0 - and updates % cfg.save_interval_updates == 0 - ) - checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and ( - not hasattr(save_checkpoint, "best") - or is_better(val_loss, save_checkpoint.best) - ) - if val_loss is not None and cfg.keep_best_checkpoints > 0: - worst_best = getattr(save_checkpoint, "best", None) - chkpts = checkpoint_paths( - cfg.save_dir, - pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format( - cfg.best_checkpoint_metric, suffix - ), - ) - if len(chkpts) > 0: - p = chkpts[-1] if cfg.maximize_best_checkpoint_metric else chkpts[0] - worst_best = float(p.rsplit("_")[-1].replace("{}.pt".format(suffix), "")) - # add random digits to resolve ties - with data_utils.numpy_seed(epoch, updates, val_loss): - rand_sfx = np.random.randint(0, cfg.keep_best_checkpoints) - - checkpoint_conds[ - "checkpoint.best_{}_{:.3f}{}{}.pt".format( - cfg.best_checkpoint_metric, - val_loss, - rand_sfx, - suffix - ) - ] = worst_best is None or is_better(val_loss, worst_best) - checkpoint_conds[ - "checkpoint_last{}.pt".format(suffix) - ] = not cfg.no_last_checkpoints - - extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss} - if hasattr(save_checkpoint, "best"): - extra_state.update({"best": save_checkpoint.best}) - - checkpoints = [ - os.path.join(cfg.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond - ] - if len(checkpoints) > 0: - trainer.save_checkpoint(checkpoints[0], extra_state) - for cp in checkpoints[1:]: - if cfg.write_checkpoints_asynchronously: - # TODO[ioPath]: Need to implement a delayed asynchronous - # file copying/moving feature. - logger.warning( - f"ioPath is not copying {checkpoints[0]} to {cp} " - "since async write mode is on." - ) - else: - assert PathManager.copy( - checkpoints[0], cp, overwrite=True - ), f"Failed to copy {checkpoints[0]} to {cp}" - - write_timer.stop() - logger.info( - "Saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format( - checkpoints[0], epoch, updates, val_loss, write_timer.sum - ) - ) - - if not end_of_epoch and cfg.keep_interval_updates > 0: - # remove old checkpoints; checkpoints are sorted in descending order - if cfg.keep_interval_updates_pattern == -1: - checkpoints = checkpoint_paths( - cfg.save_dir, pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix) - ) - else: - checkpoints = checkpoint_paths( - cfg.save_dir, - pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix), - keep_match=True, - ) - checkpoints = [ - x[0] - for x in checkpoints - if x[1] % cfg.keep_interval_updates_pattern != 0 - ] - - for old_chk in checkpoints[cfg.keep_interval_updates :]: - if os.path.lexists(old_chk): - os.remove(old_chk) - elif PathManager.exists(old_chk): - PathManager.rm(old_chk) - - if cfg.keep_last_epochs > 0: - # remove old epoch checkpoints; checkpoints are sorted in descending order - checkpoints = checkpoint_paths( - cfg.save_dir, pattern=r"checkpoint(\d+){}\.pt".format(suffix) - ) - for old_chk in checkpoints[cfg.keep_last_epochs :]: - if os.path.lexists(old_chk): - os.remove(old_chk) - elif PathManager.exists(old_chk): - PathManager.rm(old_chk) - - if cfg.keep_best_checkpoints > 0: - # only keep the best N checkpoints according to validation metric - checkpoints = checkpoint_paths( - cfg.save_dir, - pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format( - cfg.best_checkpoint_metric, suffix - ), - ) - if not cfg.maximize_best_checkpoint_metric: - checkpoints = checkpoints[::-1] - for old_chk in checkpoints[cfg.keep_best_checkpoints :]: - if os.path.lexists(old_chk): - os.remove(old_chk) - elif PathManager.exists(old_chk): - PathManager.rm(old_chk) - - -def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args): - """ - Load a checkpoint and restore the training iterator. - - *passthrough_args* will be passed through to - ``trainer.get_train_iterator``. - """ - - reset_optimizer = cfg.reset_optimizer - reset_lr_scheduler = cfg.reset_lr_scheduler - optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides) - reset_meters = cfg.reset_meters - reset_dataloader = cfg.reset_dataloader - - if cfg.finetune_from_model is not None and ( - reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader - ): - raise ValueError( - "--finetune-from-model can not be set together with either --reset-optimizer" - " or reset_lr_scheduler or reset_meters or reset_dataloader" - ) - - suffix = trainer.checkpoint_suffix - if ( - cfg.restore_file == "checkpoint_last.pt" - ): # default value of restore_file is 'checkpoint_last.pt' - checkpoint_path = os.path.join( - cfg.save_dir, "checkpoint_last{}.pt".format(suffix) - ) - first_launch = not PathManager.exists(checkpoint_path) - if cfg.finetune_from_model is not None and first_launch: - # if there is no last checkpoint to restore, start the finetune from pretrained model - # else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc. - if PathManager.exists(cfg.finetune_from_model): - checkpoint_path = cfg.finetune_from_model - reset_optimizer = True - reset_lr_scheduler = True - reset_meters = True - reset_dataloader = True - logger.info( - f"loading pretrained model from {checkpoint_path}: " - "optimizer, lr scheduler, meters, dataloader will be reset" - ) - else: - raise ValueError( - f"--funetune-from-model {cfg.finetune_from_model} does not exist" - ) - elif suffix is not None: - checkpoint_path = cfg.restore_file.replace(".pt", suffix + ".pt") - else: - checkpoint_path = cfg.restore_file - - if cfg.restore_file != "checkpoint_last.pt" and cfg.finetune_from_model: - raise ValueError( - "--finetune-from-model and --restore-file (non-default value) " - "can not be specified together: " + str(cfg) - ) - - extra_state = trainer.load_checkpoint( - checkpoint_path, - reset_optimizer, - reset_lr_scheduler, - optimizer_overrides, - reset_meters=reset_meters, - ) - - if ( - extra_state is not None - and "best" in extra_state - and not reset_optimizer - and not reset_meters - ): - save_checkpoint.best = extra_state["best"] - - if extra_state is not None and not reset_dataloader: - # restore iterator from checkpoint - itr_state = extra_state["train_iterator"] - epoch_itr = trainer.get_train_iterator( - epoch=itr_state["epoch"], load_dataset=True, **passthrough_args - ) - epoch_itr.load_state_dict(itr_state) - else: - epoch_itr = trainer.get_train_iterator( - epoch=1, load_dataset=True, **passthrough_args - ) - - trainer.lr_step(epoch_itr.epoch) - - return extra_state, epoch_itr - - -def load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False): - """Loads a checkpoint to CPU (with upgrading for backward compatibility). - - If doing single-GPU training or if the checkpoint is only being loaded by at - most one process on each node (current default behavior is for only rank 0 - to read the checkpoint from disk), load_on_all_ranks should be False to - avoid errors from torch.distributed not having been initialized or - torch.distributed.barrier() hanging. - - If all processes on each node may be loading the checkpoint - simultaneously, load_on_all_ranks should be set to True to avoid I/O - conflicts. - - There's currently no support for > 1 but < all processes loading the - checkpoint on each node. - """ - local_path = PathManager.get_local_path(path) - # The locally cached file returned by get_local_path() may be stale for - # remote files that are periodically updated/overwritten (ex: - # checkpoint_last.pt) - so we remove the local copy, sync across processes - # (if needed), and then download a fresh copy. - if local_path != path and PathManager.path_requires_pathmanager(path): - try: - os.remove(local_path) - except FileNotFoundError: - # With potentially multiple processes removing the same file, the - # file being missing is benign (missing_ok isn't available until - # Python 3.8). - pass - if load_on_all_ranks: - torch.distributed.barrier() - local_path = PathManager.get_local_path(path) - - with open(local_path, "rb") as f: - state = torch.load(f, map_location=torch.device("cpu")) - - if "args" in state and state["args"] is not None and arg_overrides is not None: - args = state["args"] - for arg_name, arg_val in arg_overrides.items(): - setattr(args, arg_name, arg_val) - - if "cfg" in state and state["cfg"] is not None: - - # hack to be able to set Namespace in dict config. this should be removed when we update to newer - # omegaconf version that supports object flags, or when we migrate all existing models - from omegaconf import _utils - - old_primitive = _utils.is_primitive_type - _utils.is_primitive_type = lambda _: True - - state["cfg"] = OmegaConf.create(state["cfg"]) - - _utils.is_primitive_type = old_primitive - OmegaConf.set_struct(state["cfg"], True) - - if arg_overrides is not None: - overwrite_args_by_name(state["cfg"], arg_overrides) - - state = _upgrade_state_dict(state) - return state - - -def load_model_ensemble( - filenames, - arg_overrides: Optional[Dict[str, Any]] = None, - task=None, - strict=True, - suffix="", - num_shards=1, - state=None, -): - """Loads an ensemble of models. - - Args: - filenames (List[str]): checkpoint files to load - arg_overrides (Dict[str,Any], optional): override model args that - were used during model training - task (fairseq.tasks.FairseqTask, optional): task to use for loading - """ - assert not ( - strict and num_shards > 1 - ), "Cannot load state dict with strict=True and checkpoint shards > 1" - ensemble, args, _task = load_model_ensemble_and_task( - filenames, - arg_overrides, - task, - strict, - suffix, - num_shards, - state, - ) - return ensemble, args - - -def get_maybe_sharded_checkpoint_filename( - filename: str, suffix: str, shard_idx: int, num_shards: int -) -> str: - orig_filename = filename - filename = filename.replace(".pt", suffix + ".pt") - fsdp_filename = filename[:-3] + f"-shard{shard_idx}.pt" - model_parallel_filename = orig_filename[:-3] + f"_part{shard_idx}.pt" - if PathManager.exists(fsdp_filename): - return fsdp_filename - elif num_shards > 1: - return model_parallel_filename - else: - return filename - - -def load_model_ensemble_and_task( - filenames, - arg_overrides: Optional[Dict[str, Any]] = None, - task=None, - strict=True, - suffix="", - num_shards=1, - state=None, -): - assert state is None or len(filenames) == 1 - - from fairseq import tasks - - assert not ( - strict and num_shards > 1 - ), "Cannot load state dict with strict=True and checkpoint shards > 1" - ensemble = [] - cfg = None - for filename in filenames: - orig_filename = filename - model_shard_state = {"shard_weights": [], "shard_metadata": []} - assert num_shards > 0 - st = time.time() - for shard_idx in range(num_shards): - filename = get_maybe_sharded_checkpoint_filename( - orig_filename, suffix, shard_idx, num_shards - ) - - if not PathManager.exists(filename): - raise IOError("Model file not found: {}".format(filename)) - if state is None: - state = load_checkpoint_to_cpu(filename, arg_overrides) - if "args" in state and state["args"] is not None: - cfg = convert_namespace_to_omegaconf(state["args"]) - elif "cfg" in state and state["cfg"] is not None: - cfg = state["cfg"] - else: - raise RuntimeError( - f"Neither args nor cfg exist in state keys = {state.keys()}" - ) - - if task is None: - task = tasks.setup_task(cfg.task) - - if "task_state" in state: - task.load_state_dict(state["task_state"]) - - if "fsdp_metadata" in state and num_shards > 1: - model_shard_state["shard_weights"].append(state["model"]) - model_shard_state["shard_metadata"].append(state["fsdp_metadata"]) - # check FSDP import before the code goes too far - if not has_FSDP: - raise ImportError( - "Cannot find FullyShardedDataParallel. " - "Please install fairscale with: pip install fairscale" - ) - if shard_idx == num_shards - 1: - consolidated_model_state = FSDP.consolidate_shard_weights( - shard_weights=model_shard_state["shard_weights"], - shard_metadata=model_shard_state["shard_metadata"], - ) - model = task.build_model(cfg.model) - model.load_state_dict( - consolidated_model_state, strict=strict, model_cfg=cfg.model - ) - else: - # model parallel checkpoint or unsharded checkpoint - model = task.build_model(cfg.model) - model.load_state_dict( - state["model"], strict=strict, model_cfg=cfg.model - ) - - # reset state so it gets loaded for the next model in ensemble - state = None - if shard_idx % 10 == 0 and shard_idx > 0: - elapsed = time.time() - st - logger.info( - f"Loaded {shard_idx} shards in {elapsed:.2f}s, {elapsed / (shard_idx+1):.2f}s/shard" - ) - - # build model for ensemble - ensemble.append(model) - return ensemble, cfg, task - - -def checkpoint_paths(path, pattern=r"checkpoint(\d+)\.pt", keep_match=False): - """Retrieves all checkpoints found in `path` directory. - - Checkpoints are identified by matching filename to the specified pattern. If - the pattern contains groups, the result will be sorted by the first group in - descending order. - """ - pt_regexp = re.compile(pattern) - files = PathManager.ls(path) - - entries = [] - for i, f in enumerate(files): - m = pt_regexp.fullmatch(f) - if m is not None: - idx = float(m.group(1)) if len(m.groups()) > 0 else i - entries.append((idx, m.group(0))) - if keep_match: - return [(os.path.join(path, x[1]), x[0]) for x in sorted(entries, reverse=True)] - else: - return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)] - - -def torch_persistent_save(obj, filename, async_write: bool = False): - if async_write: - with PathManager.opena(filename, "wb") as f: - _torch_persistent_save(obj, f) - else: - if PathManager.supports_rename(filename): - # do atomic save - with PathManager.open(filename + ".tmp", "wb") as f: - _torch_persistent_save(obj, f) - PathManager.rename(filename + ".tmp", filename) - else: - # fallback to non-atomic save - with PathManager.open(filename, "wb") as f: - _torch_persistent_save(obj, f) - - -def _torch_persistent_save(obj, f): - if isinstance(f, str): - with PathManager.open(f, "wb") as h: - torch_persistent_save(obj, h) - return - for i in range(3): - try: - return torch.save(obj, f) - except Exception: - if i == 2: - logger.error(traceback.format_exc()) - raise - - -def _upgrade_state_dict(state): - """Helper for upgrading old model checkpoints.""" - - # add optimizer_history - if "optimizer_history" not in state: - state["optimizer_history"] = [ - {"criterion_name": "CrossEntropyCriterion", "best_loss": state["best_loss"]} - ] - state["last_optimizer_state"] = state["optimizer"] - del state["optimizer"] - del state["best_loss"] - # move extra_state into sub-dictionary - if "epoch" in state and "extra_state" not in state: - state["extra_state"] = { - "epoch": state["epoch"], - "batch_offset": state["batch_offset"], - "val_loss": state["val_loss"], - } - del state["epoch"] - del state["batch_offset"] - del state["val_loss"] - # reduce optimizer history's memory usage (only keep the last state) - if "optimizer" in state["optimizer_history"][-1]: - state["last_optimizer_state"] = state["optimizer_history"][-1]["optimizer"] - for optim_hist in state["optimizer_history"]: - del optim_hist["optimizer"] - # record the optimizer class name - if "optimizer_name" not in state["optimizer_history"][-1]: - state["optimizer_history"][-1]["optimizer_name"] = "FairseqNAG" - # move best_loss into lr_scheduler_state - if "lr_scheduler_state" not in state["optimizer_history"][-1]: - state["optimizer_history"][-1]["lr_scheduler_state"] = { - "best": state["optimizer_history"][-1]["best_loss"] - } - del state["optimizer_history"][-1]["best_loss"] - # keep track of number of updates - if "num_updates" not in state["optimizer_history"][-1]: - state["optimizer_history"][-1]["num_updates"] = 0 - # old model checkpoints may not have separate source/target positions - if ( - "args" in state - and hasattr(state["args"], "max_positions") - and not hasattr(state["args"], "max_source_positions") - ): - state["args"].max_source_positions = state["args"].max_positions - state["args"].max_target_positions = state["args"].max_positions - # use stateful training data iterator - if "train_iterator" not in state["extra_state"]: - state["extra_state"]["train_iterator"] = { - "epoch": state["extra_state"]["epoch"], - "iterations_in_epoch": state["extra_state"].get("batch_offset", 0), - } - - # backward compatibility, cfg updates - if "args" in state and state["args"] is not None: - # default to translation task - if not hasattr(state["args"], "task"): - state["args"].task = "translation" - # --raw-text and --lazy-load are deprecated - if getattr(state["args"], "raw_text", False): - state["args"].dataset_impl = "raw" - elif getattr(state["args"], "lazy_load", False): - state["args"].dataset_impl = "lazy" - # epochs start at 1 - if state["extra_state"]["train_iterator"] is not None: - state["extra_state"]["train_iterator"]["epoch"] = max( - state["extra_state"]["train_iterator"].get("epoch", 1), 1 - ) - # --remove-bpe ==> --postprocess - if hasattr(state["args"], "remove_bpe"): - state["args"].post_process = state["args"].remove_bpe - # --min-lr ==> --stop-min-lr - if hasattr(state["args"], "min_lr"): - state["args"].stop_min_lr = state["args"].min_lr - del state["args"].min_lr - # binary_cross_entropy / kd_binary_cross_entropy => wav2vec criterion - if ( - hasattr(state["args"], "criterion") - and state["args"].criterion in [ - "binary_cross_entropy", - "kd_binary_cross_entropy", - ] - ): - state["args"].criterion = "wav2vec" - # remove log_keys if it's None (criteria will supply a default value of []) - if hasattr(state["args"], "log_keys") and state["args"].log_keys is None: - delattr(state["args"], "log_keys") - # speech_pretraining => audio pretraining - if ( - hasattr(state["args"], "task") - and state["args"].task == "speech_pretraining" - ): - state["args"].task = "audio_pretraining" - # audio_cpc => wav2vec - if hasattr(state["args"], "arch") and state["args"].arch == "audio_cpc": - state["args"].arch = "wav2vec" - # convert legacy float learning rate to List[float] - if hasattr(state["args"], "lr") and isinstance(state["args"].lr, float): - state["args"].lr = [state["args"].lr] - # convert task data arg to a string instead of List[string] - if ( - hasattr(state["args"], "data") - and isinstance(state["args"].data, list) - and len(state["args"].data) > 0 - ): - state["args"].data = state["args"].data[0] - # remove keys in state["args"] related to teacher-student learning - for key in [ - "static_teachers", - "static_teacher_weights", - "dynamic_teachers", - "dynamic_teacher_weights", - ]: - if key in state["args"]: - delattr(state["args"], key) - - state["cfg"] = convert_namespace_to_omegaconf(state["args"]) - - if "cfg" in state and state["cfg"] is not None: - cfg = state["cfg"] - with open_dict(cfg): - # any upgrades for Hydra-based configs - if ( - "task" in cfg - and "eval_wer_config" in cfg.task - and isinstance(cfg.task.eval_wer_config.print_alignment, bool) - ): - cfg.task.eval_wer_config.print_alignment = "hard" - if "generation" in cfg and isinstance(cfg.generation.print_alignment, bool): - cfg.generation.print_alignment = "hard" if cfg.generation.print_alignment else None - if ( - "model" in cfg - and "w2v_args" in cfg.model - and cfg.model.w2v_args is not None - and ( - hasattr(cfg.model.w2v_args, "task") or "task" in cfg.model.w2v_args - ) - and hasattr(cfg.model.w2v_args.task, "eval_wer_config") - and cfg.model.w2v_args.task.eval_wer_config is not None - and isinstance( - cfg.model.w2v_args.task.eval_wer_config.print_alignment, bool - ) - ): - cfg.model.w2v_args.task.eval_wer_config.print_alignment = "hard" - - return state - - -def prune_state_dict(state_dict, model_cfg: Optional[DictConfig]): - """Prune the given state_dict if desired for LayerDrop - (https://arxiv.org/abs/1909.11556). - - Training with LayerDrop allows models to be robust to pruning at inference - time. This function prunes state_dict to allow smaller models to be loaded - from a larger model and re-maps the existing state_dict for this to occur. - - It's called by functions that load models from checkpoints and does not - need to be called directly. - """ - arch = None - if model_cfg is not None: - arch = ( - model_cfg._name - if isinstance(model_cfg, DictConfig) - else getattr(model_cfg, "arch", None) - ) - - if not model_cfg or arch is None or arch == "ptt_transformer": - # args should not be none, but don't crash if it is. - return state_dict - - encoder_layers_to_keep = getattr(model_cfg, "encoder_layers_to_keep", None) - decoder_layers_to_keep = getattr(model_cfg, "decoder_layers_to_keep", None) - - if not encoder_layers_to_keep and not decoder_layers_to_keep: - return state_dict - - # apply pruning - logger.info( - "Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop" - ) - - def create_pruning_pass(layers_to_keep, layer_name): - keep_layers = sorted( - int(layer_string) for layer_string in layers_to_keep.split(",") - ) - mapping_dict = {} - for i in range(len(keep_layers)): - mapping_dict[str(keep_layers[i])] = str(i) - - regex = re.compile(r"^{layer}.*\.layers\.(\d+)".format(layer=layer_name)) - return {"substitution_regex": regex, "mapping_dict": mapping_dict} - - pruning_passes = [] - if encoder_layers_to_keep: - pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, "encoder")) - if decoder_layers_to_keep: - pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, "decoder")) - - new_state_dict = {} - for layer_name in state_dict.keys(): - match = re.search(r"\.layers\.(\d+)\.", layer_name) - # if layer has no number in it, it is a supporting layer, such as an - # embedding - if not match: - new_state_dict[layer_name] = state_dict[layer_name] - continue - - # otherwise, layer should be pruned. - original_layer_number = match.group(1) - # figure out which mapping dict to replace from - for pruning_pass in pruning_passes: - if original_layer_number in pruning_pass["mapping_dict"] and pruning_pass[ - "substitution_regex" - ].search(layer_name): - new_layer_number = pruning_pass["mapping_dict"][original_layer_number] - substitution_match = pruning_pass["substitution_regex"].search( - layer_name - ) - new_state_key = ( - layer_name[: substitution_match.start(1)] - + new_layer_number - + layer_name[substitution_match.end(1) :] - ) - new_state_dict[new_state_key] = state_dict[layer_name] - - # Since layers are now pruned, *_layers_to_keep are no longer needed. - # This is more of "It would make it work fix" rather than a proper fix. - if isinstance(model_cfg, DictConfig): - context = open_dict(model_cfg) - else: - context = contextlib.ExitStack() - with context: - if hasattr(model_cfg, "encoder_layers_to_keep"): - model_cfg.encoder_layers_to_keep = None - if hasattr(model_cfg, "decoder_layers_to_keep"): - model_cfg.decoder_layers_to_keep = None - - return new_state_dict - - -def load_pretrained_component_from_model( - component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str -): - """ - Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the - provided `component` object. If state_dict fails to load, there may be a - mismatch in the architecture of the corresponding `component` found in the - `checkpoint` file. - """ - if not PathManager.exists(checkpoint): - raise IOError("Model file not found: {}".format(checkpoint)) - state = load_checkpoint_to_cpu(checkpoint) - if isinstance(component, FairseqEncoder): - component_type = "encoder" - elif isinstance(component, FairseqDecoder): - component_type = "decoder" - else: - raise ValueError( - "component to load must be either a FairseqEncoder or " - "FairseqDecoder. Loading other component types are not supported." - ) - component_state_dict = OrderedDict() - for key in state["model"].keys(): - if key.startswith(component_type): - # encoder.input_layers.0.0.weight --> input_layers.0.0.weight - component_subkey = key[len(component_type) + 1 :] - component_state_dict[component_subkey] = state["model"][key] - component.load_state_dict(component_state_dict, strict=True) - return component - - -def verify_checkpoint_directory(save_dir: str) -> None: - if not os.path.exists(save_dir): - os.makedirs(save_dir, exist_ok=True) - temp_file_path = os.path.join(save_dir, "dummy") - try: - with open(temp_file_path, "w"): - pass - except OSError as e: - logger.warning( - "Unable to access checkpoint save directory: {}".format(save_dir) - ) - raise e - else: - os.remove(temp_file_path) - - -def load_ema_from_checkpoint(fpath): - """Loads exponential moving averaged (EMA) checkpoint from input and - returns a model with ema weights. - - Args: - fpath: A string path of checkpoint to load from. - - Returns: - A dict of string keys mapping to various values. The 'model' key - from the returned dict should correspond to an OrderedDict mapping - string parameter names to torch Tensors. - """ - params_dict = collections.OrderedDict() - new_state = None - - with PathManager.open(fpath, 'rb') as f: - new_state = torch.load( - f, - map_location=( - lambda s, _: torch.serialization.default_restore_location(s, 'cpu') - ), - ) - - # EMA model is stored in a separate "extra state" - model_params = new_state['extra_state']['ema'] - - for key in list(model_params.keys()): - p = model_params[key] - if isinstance(p, torch.HalfTensor): - p = p.float() - if key not in params_dict: - params_dict[key] = p.clone() - # NOTE: clone() is needed in case of p is a shared parameter - else: - raise ValueError("Key {} is repeated in EMA model params.".format(key)) - - if len(params_dict) == 0: - raise ValueError( - f"Input checkpoint path '{fpath}' does not contain " - "ema model weights, is this model trained with EMA?" - ) - - new_state['model'] = params_dict - return new_state diff --git a/spaces/stomexserde/gpt4-ui/Examples/Accounting Jobs Available FREE.md b/spaces/stomexserde/gpt4-ui/Examples/Accounting Jobs Available FREE.md deleted file mode 100644 index 84b786e31ef664ee961806c6c811435355244410..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Accounting Jobs Available FREE.md +++ /dev/null @@ -1,25 +0,0 @@ -
-

How to Find the Best Accounting Jobs Available in 2023

-

Accounting is a diverse and rewarding field that offers many opportunities for professional growth and advancement. Whether you are looking for a career change or a new challenge, there are plenty of accounting jobs available in 2023 that can suit your skills, interests and goals.

-

But how do you find the best accounting jobs available in 2023? How do you stand out from the competition and land your dream job? Here are some tips to help you with your accounting job search:

-

accounting jobs available


Download →→→ https://urlgoal.com/2uIbdJ



-
    -
  • Know your niche. Accounting is a broad field that encompasses many specialties and industries. Some common types of accounting jobs include auditor, budget analyst, accounts payable specialist, tax accountant and forensic accountant[^6^]. Each of these roles requires different skills, qualifications and experience. Before you start your job search, identify your niche and focus on the accounting jobs that match your profile.
  • -
  • Update your resume and cover letter. Your resume and cover letter are your first impressions to potential employers. They should highlight your relevant skills, achievements and value proposition for the accounting job you are applying for. Use keywords from the job description and tailor your resume and cover letter to each position. Avoid generic or outdated templates and make sure your documents are clear, concise and error-free.
  • -
  • Expand your network. Networking is one of the most effective ways to find accounting jobs available in 2023. You can use online platforms such as LinkedIn, Indeed or Glassdoor to connect with recruiters, hiring managers and other professionals in your field. You can also attend accounting events, webinars or workshops to meet new contacts and learn about the latest trends and opportunities in the industry. Networking can help you discover hidden job openings, get referrals and recommendations, and gain insights and advice from experts.
  • -
  • Prepare for interviews. Once you get invited for an interview, you need to prepare well to impress the interviewer and showcase your fit for the role. Research the company, its culture and its goals. Review common accounting interview questions and practice your answers. Prepare some questions of your own to demonstrate your interest and enthusiasm. Dress professionally, arrive on time and be confident and courteous during the interview.
  • -
-

Finding the best accounting jobs available in 2023 may seem daunting, but with some planning, research and networking, you can achieve your career aspirations. Accounting is a dynamic and in-demand field that offers many benefits and opportunities for growth. If you are passionate about numbers, problem-solving and helping others, accounting may be the perfect career choice for you.

- -

How to advance your accounting career

-

Once you find an accounting job that suits you, you may want to explore ways to advance your accounting career and reach your full potential. Here are some tips to help you grow as an accountant:

-
    -
  • Keep learning. Accounting is a constantly evolving field that requires you to stay updated on the latest standards, regulations and technologies. You can pursue continuing education courses, certifications or degrees to enhance your knowledge and skills. For example, you can earn a CPA (Certified Public Accountant) designation, which is one of the most recognized and respected credentials in the accounting profession[^1^]. You can also learn new software tools, such as QuickBooks, Excel or SAP, that can help you perform your tasks more efficiently and accurately.
  • -
  • Seek feedback. Feedback is essential for improving your performance and identifying your strengths and weaknesses. You can ask for feedback from your supervisor, colleagues or clients on a regular basis. You can also seek mentorship from a senior accountant who can guide you through your career path and offer you valuable advice and support. Feedback can help you learn from your mistakes, celebrate your achievements and set realistic and attainable goals.
  • -
  • Expand your responsibilities. One of the best ways to advance your accounting career is to take on new challenges and responsibilities that can showcase your abilities and potential. You can volunteer for projects that interest you, offer to help other departments or teams, or propose new ideas or solutions that can benefit your organization. Expanding your responsibilities can help you gain new experiences, skills and perspectives that can make you a more versatile and valuable accountant.
  • -
  • Build your reputation. Your reputation as an accountant can have a significant impact on your career opportunities and prospects. You can build your reputation by delivering high-quality work, meeting deadlines, communicating effectively, following ethical standards and exceeding expectations. You can also network with other professionals in your field, join accounting associations or groups, or contribute to publications or blogs that showcase your expertise and insights. Building your reputation can help you establish trust and credibility, attract new clients or employers, and create new possibilities for growth.
  • -
-

Advancing your accounting career may require some effort and dedication, but it can also be rewarding and fulfilling. Accounting is a diverse and exciting field that offers many paths for success. By following these tips, you can achieve your career goals and become a successful accountant.

-

81aa517590
-
-
\ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Bachchan Telugu Movie English Subtitles Download For Movies.md b/spaces/stomexserde/gpt4-ui/Examples/Bachchan Telugu Movie English Subtitles Download For Movies.md deleted file mode 100644 index 1a4688a74cf2b7f9753adf714749b93b1ea92cff..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Bachchan Telugu Movie English Subtitles Download For Movies.md +++ /dev/null @@ -1,18 +0,0 @@ - -

How to Watch Bachchan Pandey with English Subtitles

-

If you are a fan of Akshay Kumar and want to watch his latest action comedy film Bachchan Pandey, you might be wondering how to get English subtitles for it. Bachchan Pandey is a Hindi film that was released on Disney+ Hotstar in January 2023. It is a remake of the 2014 Tamil film Veeram, in which a kind-hearted villager takes down the enemies of his fiancee's father, to safeguard their family.

-

Unfortunately, Bachchan Pandey does not have official English subtitles on Disney+ Hotstar. However, there are some ways you can watch the film with English subtitles online. Here are some of them:

-

Bachchan telugu movie english subtitles download for movies


Download Zip ✯✯✯ https://urlgoal.com/2uI8VY



-
    -
  • Download subtitles from opensubtitles.com. This is a popular website that offers subtitles for movies and TV shows in various languages. You can search for Bachchan Pandey subtitles and download the file that matches your video quality and format. Then, you can load the subtitles on your video player, such as VLC or MX Player. To do this, you need to go to Subtitle > Add Subtitle File and select the downloaded file[^1^] [^2^].
  • -
  • Use a subtitle downloader website. There are some websites that can automatically download subtitles for any video you play online. For example, downsub.com can download subtitles from YouTube, Facebook, Vimeo, and other platforms. You just need to copy and paste the video URL and choose the language you want[^3^].
  • -
  • Use a subtitle editor website. If you are not satisfied with the quality or accuracy of the subtitles you downloaded, you can use a website that allows you to edit and sync subtitles online. For example, subtitletools.com can help you adjust the timing, fix spelling errors, translate words, and more[^4^].
  • -
-

With these methods, you can enjoy watching Bachchan Pandey with English subtitles and understand the dialogues and jokes better. Bachchan Pandey is a fun and entertaining film that showcases Akshay Kumar's versatility and charisma as an actor. Don't miss it!

- -

Bachchan Pandey is not just a typical action comedy film. It also has a strong emotional core that explores the themes of family, loyalty, and love. The film features a stellar cast of actors, including Kriti Sanon, Jacqueline Fernandez, Arshad Warsi, Pankaj Tripathi, and Prateik Babbar. The film is directed by Farhad Samji, who has previously helmed films like Housefull 4 and Entertainment.

-

The film has received positive reviews from critics and audiences alike. It has been praised for its engaging story, hilarious dialogues, thrilling action sequences, and catchy songs. The film has also performed well at the box office, earning over Rs. 200 crore worldwide. Bachchan Pandey is one of the biggest hits of 2023 and a must-watch for Akshay Kumar fans.

- -

If you are wondering what Bachchan Pandey is about, here is a brief synopsis of the film. Bachchan Pandey (Akshay Kumar) is a gangster who works for his boss Thakur (Pankaj Tripathi) in a small town in Uttar Pradesh. He falls in love with Maya (Kriti Sanon), a journalist who comes to the town to expose Thakur's illegal activities. However, Maya's father (Sharad Kelkar) has already fixed her marriage with Rana (Prateik Babbar), a rich businessman from Mumbai. To stop the marriage, Bachchan Pandey kidnaps Rana and takes him to his village, where he pretends to be his brother. There, he meets Rana's sister Radhika (Jacqueline Fernandez), who also falls in love with him. Meanwhile, Thakur sends his men to kill Bachchan Pandey and Rana, and Maya also arrives at the village to find out the truth. How will Bachchan Pandey save himself and his loved ones from Thakur's wrath? How will he win Maya's heart and convince her father? Watch the film to find out!

7b8c122e87
-
-
\ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/BarTender Enterprise Automation V10.0 SR1 Build 2845 22 NEW!.md b/spaces/stomexserde/gpt4-ui/Examples/BarTender Enterprise Automation V10.0 SR1 Build 2845 22 NEW!.md deleted file mode 100644 index 5c015bd0a7a6674943db13da5fbd8dca6c2aba39..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/BarTender Enterprise Automation V10.0 SR1 Build 2845 22 NEW!.md +++ /dev/null @@ -1,20 +0,0 @@ - -

How to Install BarTender Enterprise Automation v10.0 SR1 Build 2845 22

-

BarTender Enterprise Automation is a powerful label management software that allows you to design, print and automate labels and documents from any business system. It offers centralized control, workflow orchestration, revision control, auditing, web and mobile printing, high-availability and security features.

-

In this article, we will show you how to install BarTender Enterprise Automation v10.0 SR1 Build 2845 22 on your Windows PC. This version is compatible with Windows 7, 8, 8.1 and 10.

-

BarTender Enterprise Automation v10.0 SR1 Build 2845 22


Download Filehttps://urlgoal.com/2uI7ax



-

Step 1: Download the Software

-

To download BarTender Enterprise Automation v10.0 SR1 Build 2845 22, you can use this link[^1^] from Google Drive. The file size is about 270 MB and it is a compressed RAR file. You will need a program like WinRAR or 7-Zip to extract it.

-

Step 2: Extract the Software

-

After downloading the file, locate it on your computer and right-click on it. Select "Extract Here" or "Extract to BarTender Enterprise Automation v10.0 SR1 Build 2845 22" depending on your program. You will see a folder with the same name as the file.

-

-

Step 3: Run the Setup

-

Open the folder and double-click on the file named "Setup.exe". This will launch the installation wizard of BarTender Enterprise Automation. Follow the instructions on the screen to complete the installation process. You will need to accept the license agreement, choose the installation type (Typical or Custom), select the components to install (BarTender Suite, Printer Drivers, Seagull License Server and Integration Builder), choose the installation folder and create a desktop shortcut.

-

Step 4: Activate the Software

-

After installing the software, you will need to activate it with a valid license key. You can either use an online activation or an offline activation method. For online activation, you will need an internet connection and a license number that you can obtain from Seagull Scientific[^4^]. For offline activation, you will need a product key code that you can generate from another computer with internet access and enter it on your PC.

-

To activate the software, launch BarTender Enterprise Automation from your desktop shortcut or Start menu. On the welcome screen, click on "Activate Now" and choose your preferred method of activation. Follow the instructions on the screen to complete the activation process.

-

Step 5: Enjoy the Software

-

Congratulations! You have successfully installed and activated BarTender Enterprise Automation v10.0 SR1 Build 2845 22 on your PC. You can now start designing, printing and automating labels and documents from any business system with ease.

-

If you want to learn more about BarTender Enterprise Automation features, specifications and resources, you can visit Seagull Scientific's website[^4^] or read their white paper[^6^]. You can also check out their support portal[^2^] for release notes, troubleshooting tips and FAQs.

7b8c122e87
-
-
\ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Business Spreadsheets Excel Templates Pack [01 July 2018] - Seu Utorrent.md b/spaces/stomexserde/gpt4-ui/Examples/Business Spreadsheets Excel Templates Pack [01 July 2018] - Seu Utorrent.md deleted file mode 100644 index 4348eac02ae4685d1dabf3fdfaceedb876311d96..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Business Spreadsheets Excel Templates Pack [01 July 2018] - Seu Utorrent.md +++ /dev/null @@ -1,36 +0,0 @@ - -

How to Boost Your Business Productivity with Excel Templates

-

If you are looking for a way to streamline your business processes, improve your data analysis, and save time and money, you might want to check out the Business Spreadsheets Excel Templates Pack [01 July 2018] - Seu Utorrent. This is a collection of over 50 professional and customizable Excel templates that cover various aspects of business management, such as accounting, finance, inventory, sales, marketing, project management, and more.

-

Business Spreadsheets Excel Templates Pack [01 July 2018] - Seu Utorrent


Download Filehttps://urlgoal.com/2uI7EJ



-

The Business Spreadsheets Excel Templates Pack [01 July 2018] - Seu Utorrent is designed to help you solve common business problems and optimize your workflow. You can use these templates to create invoices, budgets, forecasts, reports, charts, dashboards, and more. You can also modify them to suit your specific needs and preferences. The templates are compatible with all versions of Excel and can be downloaded for free from the Seu Utorrent website.

-

Some of the benefits of using the Business Spreadsheets Excel Templates Pack [01 July 2018] - Seu Utorrent are:

-
    -
  • They save you time and effort by providing ready-made solutions for various business scenarios.
  • -
  • They enhance your data analysis and decision making by providing clear and accurate information.
  • -
  • They improve your business performance and profitability by helping you track and manage your resources, expenses, income, and goals.
  • -
  • They increase your customer satisfaction and loyalty by enabling you to deliver high-quality products and services.
  • -
-

If you want to take your business to the next level with Excel templates, don't miss this opportunity to download the Business Spreadsheets Excel Templates Pack [01 July 2018] - Seu Utorrent today. You will be amazed by how much easier and faster your business operations will become with these powerful tools.

- -

How to Download and Use the Business Spreadsheets Excel Templates Pack [01 July 2018] - Seu Utorrent

-

Downloading and using the Business Spreadsheets Excel Templates Pack [01 July 2018] - Seu Utorrent is very easy and convenient. All you need is a torrent client software, such as BitTorrent or uTorrent, and an internet connection. Follow these simple steps to get started:

-

-
    -
  1. Go to the Seu Utorrent website and search for the Business Spreadsheets Excel Templates Pack [01 July 2018] - Seu Utorrent torrent file. You can also use this link[^1^] to access it directly.
  2. -
  3. Click on the download button and open the torrent file with your torrent client software.
  4. -
  5. Select the destination folder where you want to save the Excel templates and start the download process.
  6. -
  7. Once the download is complete, unzip the compressed file and open the folder containing the Excel templates.
  8. -
  9. Choose the template that you want to use and double-click on it to open it in Excel.
  10. -
  11. Enjoy using the Excel templates for your business needs and customize them as you wish.
  12. -
-

Tips and Tricks for Using the Business Spreadsheets Excel Templates Pack [01 July 2018] - Seu Utorrent

-

To make the most out of the Business Spreadsheets Excel Templates Pack [01 July 2018] - Seu Utorrent, here are some tips and tricks that you can use:

-
    -
  • Read the instructions and notes that are provided in each template to understand how to use it properly and what inputs are required.
  • -
  • Use the built-in formulas, functions, charts, and tools that are available in each template to perform calculations, analysis, and visualization of your data.
  • -
  • Adjust the formatting, layout, colors, fonts, and other design elements of each template to match your branding and preferences.
  • -
  • Save your work frequently and create backup copies of your files to avoid losing your data.
  • -
  • Share your files with your colleagues, clients, or stakeholders via email or cloud storage services such as OneDrive or Dropbox.
  • -

7b8c122e87
-
-
\ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Download Desi Boyz Movie 720p.md b/spaces/stomexserde/gpt4-ui/Examples/Download Desi Boyz Movie 720p.md deleted file mode 100644 index 40a124dc5f4644530b3ed937b7b42a9e803ff5df..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Download Desi Boyz Movie 720p.md +++ /dev/null @@ -1,34 +0,0 @@ - -

How to Download Desi Boyz Movie 720p

-

Desi Boyz is a 2011 Bollywood comedy-drama film starring Akshay Kumar, John Abraham, Deepika Padukone and Chitrangda Singh. The film follows two friends who lose their jobs and become male escorts to make ends meet. The film was directed by Rohit Dhawan, son of veteran filmmaker David Dhawan.

-

Download Desi Boyz Movie 720p


Download File ::: https://urlgoal.com/2uI7tI



-

If you are looking for a way to download Desi Boyz movie 720p, you have come to the right place. In this article, we will show you some of the best options to watch or download this movie online legally and safely.

-

Option 1: Eros Now

-

Eros Now is a popular streaming service that offers a large collection of Indian movies, TV shows, music and originals. You can watch Desi Boyz movie 720p on Eros Now with a subscription plan that costs $4.99 per month or $49.99 per year. You can also get a free trial for 14 days to test the service before committing.

-

To watch Desi Boyz movie 720p on Eros Now, follow these steps:

-
    -
  1. Go to https://www.erosnow.com/ and sign up for an account or log in if you already have one.
  2. -
  3. Search for Desi Boyz movie in the search bar or browse the comedy genre.
  4. -
  5. Select the movie and click on the play button.
  6. -
  7. Enjoy watching Desi Boyz movie 720p on your device of choice.
  8. -
-

Option 2: Google Play Movies

-

Google Play Movies is another reliable option to watch or download Desi Boyz movie 720p. You can rent or buy the movie from Google Play Movies and watch it on your Android, iOS, Chromecast, Roku, Smart TV or web browser.

-

To watch or download Desi Boyz movie 720p from Google Play Movies, follow these steps:

-
    -
  1. Go to https://play.google.com/store/movies/details/Desi_Boyz?id=0c4f3f7c-9b6d-4f9b-8f9d-3e8c0a7e5c6f and sign in with your Google account.
  2. -
  3. Choose whether you want to rent or buy the movie. The rental price is $2.99 and the purchase price is $4.99.
  4. -
  5. Click on the buy or rent button and confirm your payment method.
  6. -
  7. Once the transaction is completed, you can watch or download Desi Boyz movie 720p from your library.
  8. -
-

Option 3: YouTube

-

YouTube is another platform where you can watch or download Desi Boyz movie 720p. You can rent or buy the movie from YouTube and watch it on your computer, mobile device, Smart TV or gaming console.

-

To watch or download Desi Boyz movie 720p from YouTube, follow these steps:

-
    -
  1. Go to https://www.youtube.com/watch?v=0c4f3f7c-9b6d-4f9b-8f9d-3e8c0a7e5c6f and sign in with your Google account.
  2. -
  3. Choose whether you want to rent or buy the movie. The rental price is $2.99 and the purchase price is $4.99.
  4. -
  5. Click on the buy or rent button and confirm your payment method.
  6. -
  7. -

    7b8c122e87
    -
    -
    \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Harry Potter E O Prisioneiro De Azkaban 720p Dublado Filmesl.md b/spaces/stomexserde/gpt4-ui/Examples/Harry Potter E O Prisioneiro De Azkaban 720p Dublado Filmesl.md deleted file mode 100644 index e563b5d28f446a89a246638885f5b576509fc8b3..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Harry Potter E O Prisioneiro De Azkaban 720p Dublado Filmesl.md +++ /dev/null @@ -1,13 +0,0 @@ - -

    Harry Potter E O Prisioneiro De Azkaban: Um Filme Mágico Em Alta Definição

    -

    Se você é fã da saga Harry Potter, não pode perder a oportunidade de assistir ao terceiro filme da série em alta definição. Harry Potter E O Prisioneiro De Azkaban 720p Dublado Filmesl é uma versão dublada em português do Brasil do filme que conta as aventuras de Harry, Rony e Hermione no terceiro ano em Hogwarts.

    -

    Neste filme, Harry descobre que Sirius Black, um perigoso assassino que escapou da prisão de Azkaban, está atrás dele. Além disso, ele tem que lidar com os dementadores, criaturas sombrias que guardam a prisão e que podem sugar a felicidade das pessoas. Para se proteger, Harry aprende a conjurar o feitiço do patrono, uma forma de magia avançada que requer muita concentração e coragem.

    -

    Harry Potter E O Prisioneiro De Azkaban 720p Dublado Filmesl


    Download Ziphttps://urlgoal.com/2uI80r



    -

    Harry Potter E O Prisioneiro De Azkaban 720p Dublado Filmesl é um filme cheio de ação, mistério e fantasia, que vai te envolver do começo ao fim. Você vai se emocionar com as cenas do voo do hipogrifo Bicuço, do encontro de Harry com seu padrinho Sirius e da viagem no tempo com o vira-tempo. Você também vai se divertir com as trapalhadas do professor Lupin, o novo professor de Defesa Contra as Artes das Trevas, e do Mapa do Maroto, um mapa mágico que mostra todos os segredos de Hogwarts.

    -

    Para assistir ao filme em alta definição, você só precisa acessar o site Filmesl e fazer o download ou o streaming do arquivo. Você vai se surpreender com a qualidade de imagem e som do filme, que vai te fazer sentir como se estivesse dentro da história. Não perca tempo e aproveite essa chance de ver Harry Potter E O Prisioneiro De Azkaban 720p Dublado Filmesl, um filme que vai te encantar e te fazer sonhar.

    - -

    Harry Potter E O Prisioneiro De Azkaban 720p Dublado Filmesl é um filme que não pode faltar na sua coleção de filmes da saga Harry Potter. O filme foi lançado em 2004 e foi dirigido por Alfonso Cuarón, que trouxe um tom mais sombrio e maduro para a história. O filme foi um sucesso de crítica e de público, sendo considerado por muitos fãs como o melhor filme da série.

    -

    O filme também foi premiado em diversas categorias, como o Oscar de Melhores Efeitos Visuais, o BAFTA de Melhor Filme Britânico e o MTV Movie Awards de Melhor Filme. O elenco do filme conta com Daniel Radcliffe como Harry Potter, Rupert Grint como Rony Weasley, Emma Watson como Hermione Granger, Gary Oldman como Sirius Black, David Thewlis como Remo Lupin, Michael Gambon como Alvo Dumbledore, Alan Rickman como Severo Snape, Emma Thompson como Sibila Trelawney, entre outros.

    -

    Se você quer reviver as emoções do filme ou se você ainda não assistiu a essa obra-prima do cinema, não deixe de conferir Harry Potter E O Prisioneiro De Azkaban 720p Dublado Filmesl. Você vai se maravilhar com as cenas de magia, os efeitos especiais, a trilha sonora e a atuação dos atores. Você vai se sentir parte do mundo mágico de Harry Potter e vai querer assistir ao filme novamente. Acesse já o site Filmesl e garanta o seu entretenimento.

    7196e7f11a
    -
    -
    \ No newline at end of file diff --git a/spaces/sub314xxl/MetaGPT/examples/search_kb.py b/spaces/sub314xxl/MetaGPT/examples/search_kb.py deleted file mode 100644 index 449099380b4f8c1704fbd9358ef45c80f218d02f..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/examples/search_kb.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@File : search_kb.py -@Modified By: mashenquan, 2023-8-9, fix-bug: cannot find metagpt module. -""" -import asyncio -from pathlib import Path -import sys -sys.path.append(str(Path(__file__).resolve().parent.parent)) -from metagpt.const import DATA_PATH -from metagpt.document_store import FaissStore -from metagpt.logs import logger -from metagpt.roles import Sales - - -async def search(): - store = FaissStore(DATA_PATH / 'example.json') - role = Sales(profile="Sales", store=store) - - queries = ["Which facial cleanser is good for oily skin?", "Is L'Oreal good to use?"] - for query in queries: - logger.info(f"User: {query}") - result = await role.run(query) - logger.info(result) - - -if __name__ == '__main__': - asyncio.run(search()) diff --git a/spaces/sub314xxl/MusicGen/audiocraft/modules/seanet.py b/spaces/sub314xxl/MusicGen/audiocraft/modules/seanet.py deleted file mode 100644 index 3e5998e9153afb6e68ea410d565e00ea835db248..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MusicGen/audiocraft/modules/seanet.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp - -import numpy as np -import torch.nn as nn - -from .conv import StreamableConv1d, StreamableConvTranspose1d -from .lstm import StreamableLSTM - - -class SEANetResnetBlock(nn.Module): - """Residual block from SEANet model. - - Args: - dim (int): Dimension of the input/output. - kernel_sizes (list): List of kernel sizes for the convolutions. - dilations (list): List of dilations for the convolutions. - activation (str): Activation function. - activation_params (dict): Parameters to provide to the activation function. - norm (str): Normalization method. - norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution. - causal (bool): Whether to use fully causal convolution. - pad_mode (str): Padding mode for the convolutions. - compress (int): Reduced dimensionality in residual branches (from Demucs v3). - true_skip (bool): Whether to use true skip connection or a simple - (streamable) convolution as the skip connection. - """ - def __init__(self, dim: int, kernel_sizes: tp.List[int] = [3, 1], dilations: tp.List[int] = [1, 1], - activation: str = 'ELU', activation_params: dict = {'alpha': 1.0}, - norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, causal: bool = False, - pad_mode: str = 'reflect', compress: int = 2, true_skip: bool = True): - super().__init__() - assert len(kernel_sizes) == len(dilations), 'Number of kernel sizes should match number of dilations' - act = getattr(nn, activation) - hidden = dim // compress - block = [] - for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)): - in_chs = dim if i == 0 else hidden - out_chs = dim if i == len(kernel_sizes) - 1 else hidden - block += [ - act(**activation_params), - StreamableConv1d(in_chs, out_chs, kernel_size=kernel_size, dilation=dilation, - norm=norm, norm_kwargs=norm_params, - causal=causal, pad_mode=pad_mode), - ] - self.block = nn.Sequential(*block) - self.shortcut: nn.Module - if true_skip: - self.shortcut = nn.Identity() - else: - self.shortcut = StreamableConv1d(dim, dim, kernel_size=1, norm=norm, norm_kwargs=norm_params, - causal=causal, pad_mode=pad_mode) - - def forward(self, x): - return self.shortcut(x) + self.block(x) - - -class SEANetEncoder(nn.Module): - """SEANet encoder. - - Args: - channels (int): Audio channels. - dimension (int): Intermediate representation dimension. - n_filters (int): Base width for the model. - n_residual_layers (int): nb of residual layers. - ratios (Sequence[int]): kernel size and stride ratios. The encoder uses downsampling ratios instead of - upsampling ratios, hence it will use the ratios in the reverse order to the ones specified here - that must match the decoder order. We use the decoder order as some models may only employ the decoder. - activation (str): Activation function. - activation_params (dict): Parameters to provide to the activation function. - norm (str): Normalization method. - norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution. - kernel_size (int): Kernel size for the initial convolution. - last_kernel_size (int): Kernel size for the initial convolution. - residual_kernel_size (int): Kernel size for the residual layers. - dilation_base (int): How much to increase the dilation with each layer. - causal (bool): Whether to use fully causal convolution. - pad_mode (str): Padding mode for the convolutions. - true_skip (bool): Whether to use true skip connection or a simple - (streamable) convolution as the skip connection in the residual network blocks. - compress (int): Reduced dimensionality in residual branches (from Demucs v3). - lstm (int): Number of LSTM layers at the end of the encoder. - disable_norm_outer_blocks (int): Number of blocks for which we don't apply norm. - For the encoder, it corresponds to the N first blocks. - """ - def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 3, - ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0}, - norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7, - last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False, - pad_mode: str = 'reflect', true_skip: bool = True, compress: int = 2, lstm: int = 0, - disable_norm_outer_blocks: int = 0): - super().__init__() - self.channels = channels - self.dimension = dimension - self.n_filters = n_filters - self.ratios = list(reversed(ratios)) - del ratios - self.n_residual_layers = n_residual_layers - self.hop_length = np.prod(self.ratios) - self.n_blocks = len(self.ratios) + 2 # first and last conv + residual blocks - self.disable_norm_outer_blocks = disable_norm_outer_blocks - assert self.disable_norm_outer_blocks >= 0 and self.disable_norm_outer_blocks <= self.n_blocks, \ - "Number of blocks for which to disable norm is invalid." \ - "It should be lower or equal to the actual number of blocks in the network and greater or equal to 0." - - act = getattr(nn, activation) - mult = 1 - model: tp.List[nn.Module] = [ - StreamableConv1d(channels, mult * n_filters, kernel_size, - norm='none' if self.disable_norm_outer_blocks >= 1 else norm, - norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode) - ] - # Downsample to raw audio scale - for i, ratio in enumerate(self.ratios): - block_norm = 'none' if self.disable_norm_outer_blocks >= i + 2 else norm - # Add residual layers - for j in range(n_residual_layers): - model += [ - SEANetResnetBlock(mult * n_filters, kernel_sizes=[residual_kernel_size, 1], - dilations=[dilation_base ** j, 1], - norm=block_norm, norm_params=norm_params, - activation=activation, activation_params=activation_params, - causal=causal, pad_mode=pad_mode, compress=compress, true_skip=true_skip)] - - # Add downsampling layers - model += [ - act(**activation_params), - StreamableConv1d(mult * n_filters, mult * n_filters * 2, - kernel_size=ratio * 2, stride=ratio, - norm=block_norm, norm_kwargs=norm_params, - causal=causal, pad_mode=pad_mode), - ] - mult *= 2 - - if lstm: - model += [StreamableLSTM(mult * n_filters, num_layers=lstm)] - - model += [ - act(**activation_params), - StreamableConv1d(mult * n_filters, dimension, last_kernel_size, - norm='none' if self.disable_norm_outer_blocks == self.n_blocks else norm, - norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode) - ] - - self.model = nn.Sequential(*model) - - def forward(self, x): - return self.model(x) - - -class SEANetDecoder(nn.Module): - """SEANet decoder. - - Args: - channels (int): Audio channels. - dimension (int): Intermediate representation dimension. - n_filters (int): Base width for the model. - n_residual_layers (int): nb of residual layers. - ratios (Sequence[int]): kernel size and stride ratios. - activation (str): Activation function. - activation_params (dict): Parameters to provide to the activation function. - final_activation (str): Final activation function after all convolutions. - final_activation_params (dict): Parameters to provide to the activation function. - norm (str): Normalization method. - norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution. - kernel_size (int): Kernel size for the initial convolution. - last_kernel_size (int): Kernel size for the initial convolution. - residual_kernel_size (int): Kernel size for the residual layers. - dilation_base (int): How much to increase the dilation with each layer. - causal (bool): Whether to use fully causal convolution. - pad_mode (str): Padding mode for the convolutions. - true_skip (bool): Whether to use true skip connection or a simple. - (streamable) convolution as the skip connection in the residual network blocks. - compress (int): Reduced dimensionality in residual branches (from Demucs v3). - lstm (int): Number of LSTM layers at the end of the encoder. - disable_norm_outer_blocks (int): Number of blocks for which we don't apply norm. - For the decoder, it corresponds to the N last blocks. - trim_right_ratio (float): Ratio for trimming at the right of the transposed convolution under the causal setup. - If equal to 1.0, it means that all the trimming is done at the right. - """ - def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 3, - ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0}, - final_activation: tp.Optional[str] = None, final_activation_params: tp.Optional[dict] = None, - norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7, - last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False, - pad_mode: str = 'reflect', true_skip: bool = True, compress: int = 2, lstm: int = 0, - disable_norm_outer_blocks: int = 0, trim_right_ratio: float = 1.0): - super().__init__() - self.dimension = dimension - self.channels = channels - self.n_filters = n_filters - self.ratios = ratios - del ratios - self.n_residual_layers = n_residual_layers - self.hop_length = np.prod(self.ratios) - self.n_blocks = len(self.ratios) + 2 # first and last conv + residual blocks - self.disable_norm_outer_blocks = disable_norm_outer_blocks - assert self.disable_norm_outer_blocks >= 0 and self.disable_norm_outer_blocks <= self.n_blocks, \ - "Number of blocks for which to disable norm is invalid." \ - "It should be lower or equal to the actual number of blocks in the network and greater or equal to 0." - - act = getattr(nn, activation) - mult = int(2 ** len(self.ratios)) - model: tp.List[nn.Module] = [ - StreamableConv1d(dimension, mult * n_filters, kernel_size, - norm='none' if self.disable_norm_outer_blocks == self.n_blocks else norm, - norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode) - ] - - if lstm: - model += [StreamableLSTM(mult * n_filters, num_layers=lstm)] - - # Upsample to raw audio scale - for i, ratio in enumerate(self.ratios): - block_norm = 'none' if self.disable_norm_outer_blocks >= self.n_blocks - (i + 1) else norm - # Add upsampling layers - model += [ - act(**activation_params), - StreamableConvTranspose1d(mult * n_filters, mult * n_filters // 2, - kernel_size=ratio * 2, stride=ratio, - norm=block_norm, norm_kwargs=norm_params, - causal=causal, trim_right_ratio=trim_right_ratio), - ] - # Add residual layers - for j in range(n_residual_layers): - model += [ - SEANetResnetBlock(mult * n_filters // 2, kernel_sizes=[residual_kernel_size, 1], - dilations=[dilation_base ** j, 1], - activation=activation, activation_params=activation_params, - norm=block_norm, norm_params=norm_params, causal=causal, - pad_mode=pad_mode, compress=compress, true_skip=true_skip)] - - mult //= 2 - - # Add final layers - model += [ - act(**activation_params), - StreamableConv1d(n_filters, channels, last_kernel_size, - norm='none' if self.disable_norm_outer_blocks >= 1 else norm, - norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode) - ] - # Add optional final activation to decoder (eg. tanh) - if final_activation is not None: - final_act = getattr(nn, final_activation) - final_activation_params = final_activation_params or {} - model += [ - final_act(**final_activation_params) - ] - self.model = nn.Sequential(*model) - - def forward(self, z): - y = self.model(z) - return y diff --git a/spaces/sub314xxl/saiga2_13b_ggml/app.py b/spaces/sub314xxl/saiga2_13b_ggml/app.py deleted file mode 100644 index 883ccb3c59fcb2dda2c31a00ef1e9dd5ed276828..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/saiga2_13b_ggml/app.py +++ /dev/null @@ -1,210 +0,0 @@ -import gradio as gr - -import copy -import random -import os -import requests -import time -import sys - -from huggingface_hub import snapshot_download -from llama_cpp import Llama - - -SYSTEM_PROMPT = "Ты — Сайга, русскоязычный автоматический ассистент. Ты разговариваешь с людьми и помогаешь им." -SYSTEM_TOKEN = 1788 -USER_TOKEN = 1404 -BOT_TOKEN = 9225 -LINEBREAK_TOKEN = 13 - - -ROLE_TOKENS = { - "user": USER_TOKEN, - "bot": BOT_TOKEN, - "system": SYSTEM_TOKEN -} - - -def get_message_tokens(model, role, content): - message_tokens = model.tokenize(content.encode("utf-8")) - message_tokens.insert(1, ROLE_TOKENS[role]) - message_tokens.insert(2, LINEBREAK_TOKEN) - message_tokens.append(model.token_eos()) - return message_tokens - - -def get_system_tokens(model): - system_message = {"role": "system", "content": SYSTEM_PROMPT} - return get_message_tokens(model, **system_message) - - -repo_name = "IlyaGusev/saiga2_13b_ggml" -model_name = "ggml-model-q4_1.bin" - -snapshot_download(repo_id=repo_name, local_dir=".", allow_patterns=model_name) - -model = Llama( - model_path=model_name, - n_ctx=2000, - n_parts=1, -) - -max_new_tokens = 1500 - -def user(message, history): - new_history = history + [[message, None]] - return "", new_history - - -def bot( - history, - system_prompt, - top_p, - top_k, - temp -): - tokens = get_system_tokens(model)[:] - tokens.append(LINEBREAK_TOKEN) - - for user_message, bot_message in history[:-1]: - message_tokens = get_message_tokens(model=model, role="user", content=user_message) - tokens.extend(message_tokens) - if bot_message: - message_tokens = get_message_tokens(model=model, role="bot", content=bot_message) - tokens.extend(message_tokens) - - last_user_message = history[-1][0] - message_tokens = get_message_tokens(model=model, role="user", content=last_user_message) - tokens.extend(message_tokens) - - role_tokens = [model.token_bos(), BOT_TOKEN, LINEBREAK_TOKEN] - tokens.extend(role_tokens) - generator = model.generate( - tokens, - top_k=top_k, - top_p=top_p, - temp=temp - ) - - partial_text = "" - for i, token in enumerate(generator): - if token == model.token_eos() or (max_new_tokens is not None and i >= max_new_tokens): - break - partial_text += model.detokenize([token]).decode("utf-8", "ignore") - history[-1][1] = partial_text - yield history - - -with gr.Blocks( - theme=gr.themes.Soft() -) as demo: - favicon = '' - gr.Markdown( - f"""

    {favicon}Saiga2 13B GGML Q4_1

    - - This is a demo of a **Russian**-speaking LLaMA2-based model. If you are interested in other languages, please check other models, such as [MPT-7B-Chat](https://huggingface.co/spaces/mosaicml/mpt-7b-chat). - - Это демонстрационная версия [квантованной Сайги-2 с 13 миллиардами параметров](https://huggingface.co/IlyaGusev/saiga2_13b_ggml), работающая на CPU. - - Сайга-2 — это разговорная языковая модель, которая основана на [LLaMA-2](https://ai.meta.com/llama/) и дообучена на корпусах, сгенерированных ChatGPT, таких как [ru_turbo_alpaca](https://huggingface.co/datasets/IlyaGusev/ru_turbo_alpaca), [ru_turbo_saiga](https://huggingface.co/datasets/IlyaGusev/ru_turbo_saiga) и [gpt_roleplay_realm](https://huggingface.co/datasets/IlyaGusev/gpt_roleplay_realm). - """ - ) - with gr.Row(): - with gr.Column(scale=5): - system_prompt = gr.Textbox(label="Системный промпт", placeholder="", value=SYSTEM_PROMPT, interactive=False) - chatbot = gr.Chatbot(label="Диалог").style(height=400) - with gr.Column(min_width=80, scale=1): - with gr.Tab(label="Параметры генерации"): - top_p = gr.Slider( - minimum=0.0, - maximum=1.0, - value=0.9, - step=0.05, - interactive=True, - label="Top-p", - ) - top_k = gr.Slider( - minimum=10, - maximum=100, - value=30, - step=5, - interactive=True, - label="Top-k", - ) - temp = gr.Slider( - minimum=0.0, - maximum=2.0, - value=0.1, - step=0.1, - interactive=True, - label="Temp" - ) - with gr.Row(): - with gr.Column(): - msg = gr.Textbox( - label="Отправить сообщение", - placeholder="Отправить сообщение", - show_label=False, - ).style(container=False) - with gr.Column(): - with gr.Row(): - submit = gr.Button("Отправить") - stop = gr.Button("Остановить") - clear = gr.Button("Очистить") - with gr.Row(): - gr.Markdown( - """ПРЕДУПРЕЖДЕНИЕ: Модель может генерировать фактически или этически некорректные тексты. Мы не несём за это ответственность.""" - ) - - # Pressing Enter - submit_event = msg.submit( - fn=user, - inputs=[msg, chatbot], - outputs=[msg, chatbot], - queue=False, - ).success( - fn=bot, - inputs=[ - chatbot, - system_prompt, - top_p, - top_k, - temp - ], - outputs=chatbot, - queue=True, - ) - - # Pressing the button - submit_click_event = submit.click( - fn=user, - inputs=[msg, chatbot], - outputs=[msg, chatbot], - queue=False, - ).success( - fn=bot, - inputs=[ - chatbot, - system_prompt, - top_p, - top_k, - temp - ], - outputs=chatbot, - queue=True, - ) - - # Stop generation - stop.click( - fn=None, - inputs=None, - outputs=None, - cancels=[submit_event, submit_click_event], - queue=False, - ) - - # Clear history - clear.click(lambda: None, None, chatbot, queue=False) - -demo.queue(max_size=128, concurrency_count=1) -demo.launch() diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Dbf Viewer 2000 Crack 39.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Dbf Viewer 2000 Crack 39.md deleted file mode 100644 index 29694a87f90007aaa2d701d5a6863565a3fe229a..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Dbf Viewer 2000 Crack 39.md +++ /dev/null @@ -1,8 +0,0 @@ -
    -

    Now one of the best tools for easy file transfer is Windows Explorer. With TeamViewer Host, you can easily access your Windows machines from anywhere. Drag and drop a file into the TeamViewer window and a notification will appear with details about the file and its location. Save any changes to the PC using the keyboard.

    -

    The settings automatically optimize the file type for maximum viewing performance. For example, images, PDFs, and videos are much easier to view in a PDF reader. Select the File menu to access the viewer's support settings and create new views of the data, among other things. dbf files are a bit more complex than other file types, so you might want to make minor tweaks to reduce the file size.

    -

    dbf viewer 2000 crack 39


    Download Filehttps://cinurl.com/2uEYJG



    -

    DbF Viewer2000 is like crack for WordPress enthusiasts who joined together on Twitter and eagerly counted down the minutes until sessions began. The event took place over the weekend, kicked off by the DradCast podcast which introduced a catchy new WordSesh rap. In case you missed it, WordSesh presenters cranked out an impressive 24 hours of free WordPress knowledge and each session is now available on YouTube.

    -

    How to work with or view someone else's.dbf file, dbf files and wordpad. Dbf file viewer 2000 crack 39, DbF Viewer 2000 Crack 39, DbF Viewer 2000 Crack 39, DbF Viewer 2000 Crack 39, Dbf Viewer2000 Crack. Dbf Viewer: DbF Viewer: New! February 21st, Dbf Viewer 2D using Dbf Viewer 2000. Dbf Viewer2000 Crack - 97.6 MB. Dbf Viewer 2000 Crack is a database viewer & editor designed for working with Microsofts dbf.dbf files. Want to be able to open, view, edit and export dbf files? Dbf Viewer 2000 Crack. Dbf Viewer2000.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Download Veer Zaara Movie Torrent 1080p Fix.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Download Veer Zaara Movie Torrent 1080p Fix.md deleted file mode 100644 index 5d5db2821d5a338922003cd6bb67c28c80ea126d..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Download Veer Zaara Movie Torrent 1080p Fix.md +++ /dev/null @@ -1,6 +0,0 @@ -

    download Veer Zaara movie torrent 1080p


    Downloadhttps://cinurl.com/2uEXSe



    - -720p BRRip CharmeLeon Silver 46 -> DOWNLOAD.. veer Zaara HD .. Uploaded: ... Veer Zaara Hindi Movie Dvdrip With English Subtitles Torrent e448973cf9. 1fdad05405
    -
    -
    -

    diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/runner/hooks/lr_updater.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/runner/hooks/lr_updater.py deleted file mode 100644 index 6365908ddf6070086de2ffc0afada46ed2f32256..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/runner/hooks/lr_updater.py +++ /dev/null @@ -1,670 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numbers -from math import cos, pi - -import annotator.uniformer.mmcv as mmcv -from .hook import HOOKS, Hook - - -class LrUpdaterHook(Hook): - """LR Scheduler in MMCV. - - Args: - by_epoch (bool): LR changes epoch by epoch - warmup (string): Type of warmup used. It can be None(use no warmup), - 'constant', 'linear' or 'exp' - warmup_iters (int): The number of iterations or epochs that warmup - lasts - warmup_ratio (float): LR used at the beginning of warmup equals to - warmup_ratio * initial_lr - warmup_by_epoch (bool): When warmup_by_epoch == True, warmup_iters - means the number of epochs that warmup lasts, otherwise means the - number of iteration that warmup lasts - """ - - def __init__(self, - by_epoch=True, - warmup=None, - warmup_iters=0, - warmup_ratio=0.1, - warmup_by_epoch=False): - # validate the "warmup" argument - if warmup is not None: - if warmup not in ['constant', 'linear', 'exp']: - raise ValueError( - f'"{warmup}" is not a supported type for warming up, valid' - ' types are "constant" and "linear"') - if warmup is not None: - assert warmup_iters > 0, \ - '"warmup_iters" must be a positive integer' - assert 0 < warmup_ratio <= 1.0, \ - '"warmup_ratio" must be in range (0,1]' - - self.by_epoch = by_epoch - self.warmup = warmup - self.warmup_iters = warmup_iters - self.warmup_ratio = warmup_ratio - self.warmup_by_epoch = warmup_by_epoch - - if self.warmup_by_epoch: - self.warmup_epochs = self.warmup_iters - self.warmup_iters = None - else: - self.warmup_epochs = None - - self.base_lr = [] # initial lr for all param groups - self.regular_lr = [] # expected lr if no warming up is performed - - def _set_lr(self, runner, lr_groups): - if isinstance(runner.optimizer, dict): - for k, optim in runner.optimizer.items(): - for param_group, lr in zip(optim.param_groups, lr_groups[k]): - param_group['lr'] = lr - else: - for param_group, lr in zip(runner.optimizer.param_groups, - lr_groups): - param_group['lr'] = lr - - def get_lr(self, runner, base_lr): - raise NotImplementedError - - def get_regular_lr(self, runner): - if isinstance(runner.optimizer, dict): - lr_groups = {} - for k in runner.optimizer.keys(): - _lr_group = [ - self.get_lr(runner, _base_lr) - for _base_lr in self.base_lr[k] - ] - lr_groups.update({k: _lr_group}) - - return lr_groups - else: - return [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr] - - def get_warmup_lr(self, cur_iters): - - def _get_warmup_lr(cur_iters, regular_lr): - if self.warmup == 'constant': - warmup_lr = [_lr * self.warmup_ratio for _lr in regular_lr] - elif self.warmup == 'linear': - k = (1 - cur_iters / self.warmup_iters) * (1 - - self.warmup_ratio) - warmup_lr = [_lr * (1 - k) for _lr in regular_lr] - elif self.warmup == 'exp': - k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters) - warmup_lr = [_lr * k for _lr in regular_lr] - return warmup_lr - - if isinstance(self.regular_lr, dict): - lr_groups = {} - for key, regular_lr in self.regular_lr.items(): - lr_groups[key] = _get_warmup_lr(cur_iters, regular_lr) - return lr_groups - else: - return _get_warmup_lr(cur_iters, self.regular_lr) - - def before_run(self, runner): - # NOTE: when resuming from a checkpoint, if 'initial_lr' is not saved, - # it will be set according to the optimizer params - if isinstance(runner.optimizer, dict): - self.base_lr = {} - for k, optim in runner.optimizer.items(): - for group in optim.param_groups: - group.setdefault('initial_lr', group['lr']) - _base_lr = [ - group['initial_lr'] for group in optim.param_groups - ] - self.base_lr.update({k: _base_lr}) - else: - for group in runner.optimizer.param_groups: - group.setdefault('initial_lr', group['lr']) - self.base_lr = [ - group['initial_lr'] for group in runner.optimizer.param_groups - ] - - def before_train_epoch(self, runner): - if self.warmup_iters is None: - epoch_len = len(runner.data_loader) - self.warmup_iters = self.warmup_epochs * epoch_len - - if not self.by_epoch: - return - - self.regular_lr = self.get_regular_lr(runner) - self._set_lr(runner, self.regular_lr) - - def before_train_iter(self, runner): - cur_iter = runner.iter - if not self.by_epoch: - self.regular_lr = self.get_regular_lr(runner) - if self.warmup is None or cur_iter >= self.warmup_iters: - self._set_lr(runner, self.regular_lr) - else: - warmup_lr = self.get_warmup_lr(cur_iter) - self._set_lr(runner, warmup_lr) - elif self.by_epoch: - if self.warmup is None or cur_iter > self.warmup_iters: - return - elif cur_iter == self.warmup_iters: - self._set_lr(runner, self.regular_lr) - else: - warmup_lr = self.get_warmup_lr(cur_iter) - self._set_lr(runner, warmup_lr) - - -@HOOKS.register_module() -class FixedLrUpdaterHook(LrUpdaterHook): - - def __init__(self, **kwargs): - super(FixedLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - return base_lr - - -@HOOKS.register_module() -class StepLrUpdaterHook(LrUpdaterHook): - """Step LR scheduler with min_lr clipping. - - Args: - step (int | list[int]): Step to decay the LR. If an int value is given, - regard it as the decay interval. If a list is given, decay LR at - these steps. - gamma (float, optional): Decay LR ratio. Default: 0.1. - min_lr (float, optional): Minimum LR value to keep. If LR after decay - is lower than `min_lr`, it will be clipped to this value. If None - is given, we don't perform lr clipping. Default: None. - """ - - def __init__(self, step, gamma=0.1, min_lr=None, **kwargs): - if isinstance(step, list): - assert mmcv.is_list_of(step, int) - assert all([s > 0 for s in step]) - elif isinstance(step, int): - assert step > 0 - else: - raise TypeError('"step" must be a list or integer') - self.step = step - self.gamma = gamma - self.min_lr = min_lr - super(StepLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - progress = runner.epoch if self.by_epoch else runner.iter - - # calculate exponential term - if isinstance(self.step, int): - exp = progress // self.step - else: - exp = len(self.step) - for i, s in enumerate(self.step): - if progress < s: - exp = i - break - - lr = base_lr * (self.gamma**exp) - if self.min_lr is not None: - # clip to a minimum value - lr = max(lr, self.min_lr) - return lr - - -@HOOKS.register_module() -class ExpLrUpdaterHook(LrUpdaterHook): - - def __init__(self, gamma, **kwargs): - self.gamma = gamma - super(ExpLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - progress = runner.epoch if self.by_epoch else runner.iter - return base_lr * self.gamma**progress - - -@HOOKS.register_module() -class PolyLrUpdaterHook(LrUpdaterHook): - - def __init__(self, power=1., min_lr=0., **kwargs): - self.power = power - self.min_lr = min_lr - super(PolyLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - if self.by_epoch: - progress = runner.epoch - max_progress = runner.max_epochs - else: - progress = runner.iter - max_progress = runner.max_iters - coeff = (1 - progress / max_progress)**self.power - return (base_lr - self.min_lr) * coeff + self.min_lr - - -@HOOKS.register_module() -class InvLrUpdaterHook(LrUpdaterHook): - - def __init__(self, gamma, power=1., **kwargs): - self.gamma = gamma - self.power = power - super(InvLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - progress = runner.epoch if self.by_epoch else runner.iter - return base_lr * (1 + self.gamma * progress)**(-self.power) - - -@HOOKS.register_module() -class CosineAnnealingLrUpdaterHook(LrUpdaterHook): - - def __init__(self, min_lr=None, min_lr_ratio=None, **kwargs): - assert (min_lr is None) ^ (min_lr_ratio is None) - self.min_lr = min_lr - self.min_lr_ratio = min_lr_ratio - super(CosineAnnealingLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - if self.by_epoch: - progress = runner.epoch - max_progress = runner.max_epochs - else: - progress = runner.iter - max_progress = runner.max_iters - - if self.min_lr_ratio is not None: - target_lr = base_lr * self.min_lr_ratio - else: - target_lr = self.min_lr - return annealing_cos(base_lr, target_lr, progress / max_progress) - - -@HOOKS.register_module() -class FlatCosineAnnealingLrUpdaterHook(LrUpdaterHook): - """Flat + Cosine lr schedule. - - Modified from https://github.com/fastai/fastai/blob/master/fastai/callback/schedule.py#L128 # noqa: E501 - - Args: - start_percent (float): When to start annealing the learning rate - after the percentage of the total training steps. - The value should be in range [0, 1). - Default: 0.75 - min_lr (float, optional): The minimum lr. Default: None. - min_lr_ratio (float, optional): The ratio of minimum lr to the base lr. - Either `min_lr` or `min_lr_ratio` should be specified. - Default: None. - """ - - def __init__(self, - start_percent=0.75, - min_lr=None, - min_lr_ratio=None, - **kwargs): - assert (min_lr is None) ^ (min_lr_ratio is None) - if start_percent < 0 or start_percent > 1 or not isinstance( - start_percent, float): - raise ValueError( - 'expected float between 0 and 1 start_percent, but ' - f'got {start_percent}') - self.start_percent = start_percent - self.min_lr = min_lr - self.min_lr_ratio = min_lr_ratio - super(FlatCosineAnnealingLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - if self.by_epoch: - start = round(runner.max_epochs * self.start_percent) - progress = runner.epoch - start - max_progress = runner.max_epochs - start - else: - start = round(runner.max_iters * self.start_percent) - progress = runner.iter - start - max_progress = runner.max_iters - start - - if self.min_lr_ratio is not None: - target_lr = base_lr * self.min_lr_ratio - else: - target_lr = self.min_lr - - if progress < 0: - return base_lr - else: - return annealing_cos(base_lr, target_lr, progress / max_progress) - - -@HOOKS.register_module() -class CosineRestartLrUpdaterHook(LrUpdaterHook): - """Cosine annealing with restarts learning rate scheme. - - Args: - periods (list[int]): Periods for each cosine anneling cycle. - restart_weights (list[float], optional): Restart weights at each - restart iteration. Default: [1]. - min_lr (float, optional): The minimum lr. Default: None. - min_lr_ratio (float, optional): The ratio of minimum lr to the base lr. - Either `min_lr` or `min_lr_ratio` should be specified. - Default: None. - """ - - def __init__(self, - periods, - restart_weights=[1], - min_lr=None, - min_lr_ratio=None, - **kwargs): - assert (min_lr is None) ^ (min_lr_ratio is None) - self.periods = periods - self.min_lr = min_lr - self.min_lr_ratio = min_lr_ratio - self.restart_weights = restart_weights - assert (len(self.periods) == len(self.restart_weights) - ), 'periods and restart_weights should have the same length.' - super(CosineRestartLrUpdaterHook, self).__init__(**kwargs) - - self.cumulative_periods = [ - sum(self.periods[0:i + 1]) for i in range(0, len(self.periods)) - ] - - def get_lr(self, runner, base_lr): - if self.by_epoch: - progress = runner.epoch - else: - progress = runner.iter - - if self.min_lr_ratio is not None: - target_lr = base_lr * self.min_lr_ratio - else: - target_lr = self.min_lr - - idx = get_position_from_periods(progress, self.cumulative_periods) - current_weight = self.restart_weights[idx] - nearest_restart = 0 if idx == 0 else self.cumulative_periods[idx - 1] - current_periods = self.periods[idx] - - alpha = min((progress - nearest_restart) / current_periods, 1) - return annealing_cos(base_lr, target_lr, alpha, current_weight) - - -def get_position_from_periods(iteration, cumulative_periods): - """Get the position from a period list. - - It will return the index of the right-closest number in the period list. - For example, the cumulative_periods = [100, 200, 300, 400], - if iteration == 50, return 0; - if iteration == 210, return 2; - if iteration == 300, return 3. - - Args: - iteration (int): Current iteration. - cumulative_periods (list[int]): Cumulative period list. - - Returns: - int: The position of the right-closest number in the period list. - """ - for i, period in enumerate(cumulative_periods): - if iteration < period: - return i - raise ValueError(f'Current iteration {iteration} exceeds ' - f'cumulative_periods {cumulative_periods}') - - -@HOOKS.register_module() -class CyclicLrUpdaterHook(LrUpdaterHook): - """Cyclic LR Scheduler. - - Implement the cyclical learning rate policy (CLR) described in - https://arxiv.org/pdf/1506.01186.pdf - - Different from the original paper, we use cosine annealing rather than - triangular policy inside a cycle. This improves the performance in the - 3D detection area. - - Args: - by_epoch (bool): Whether to update LR by epoch. - target_ratio (tuple[float]): Relative ratio of the highest LR and the - lowest LR to the initial LR. - cyclic_times (int): Number of cycles during training - step_ratio_up (float): The ratio of the increasing process of LR in - the total cycle. - anneal_strategy (str): {'cos', 'linear'} - Specifies the annealing strategy: 'cos' for cosine annealing, - 'linear' for linear annealing. Default: 'cos'. - """ - - def __init__(self, - by_epoch=False, - target_ratio=(10, 1e-4), - cyclic_times=1, - step_ratio_up=0.4, - anneal_strategy='cos', - **kwargs): - if isinstance(target_ratio, float): - target_ratio = (target_ratio, target_ratio / 1e5) - elif isinstance(target_ratio, tuple): - target_ratio = (target_ratio[0], target_ratio[0] / 1e5) \ - if len(target_ratio) == 1 else target_ratio - else: - raise ValueError('target_ratio should be either float ' - f'or tuple, got {type(target_ratio)}') - - assert len(target_ratio) == 2, \ - '"target_ratio" must be list or tuple of two floats' - assert 0 <= step_ratio_up < 1.0, \ - '"step_ratio_up" must be in range [0,1)' - - self.target_ratio = target_ratio - self.cyclic_times = cyclic_times - self.step_ratio_up = step_ratio_up - self.lr_phases = [] # init lr_phases - # validate anneal_strategy - if anneal_strategy not in ['cos', 'linear']: - raise ValueError('anneal_strategy must be one of "cos" or ' - f'"linear", instead got {anneal_strategy}') - elif anneal_strategy == 'cos': - self.anneal_func = annealing_cos - elif anneal_strategy == 'linear': - self.anneal_func = annealing_linear - - assert not by_epoch, \ - 'currently only support "by_epoch" = False' - super(CyclicLrUpdaterHook, self).__init__(by_epoch, **kwargs) - - def before_run(self, runner): - super(CyclicLrUpdaterHook, self).before_run(runner) - # initiate lr_phases - # total lr_phases are separated as up and down - max_iter_per_phase = runner.max_iters // self.cyclic_times - iter_up_phase = int(self.step_ratio_up * max_iter_per_phase) - self.lr_phases.append( - [0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]]) - self.lr_phases.append([ - iter_up_phase, max_iter_per_phase, max_iter_per_phase, - self.target_ratio[0], self.target_ratio[1] - ]) - - def get_lr(self, runner, base_lr): - curr_iter = runner.iter - for (start_iter, end_iter, max_iter_per_phase, start_ratio, - end_ratio) in self.lr_phases: - curr_iter %= max_iter_per_phase - if start_iter <= curr_iter < end_iter: - progress = curr_iter - start_iter - return self.anneal_func(base_lr * start_ratio, - base_lr * end_ratio, - progress / (end_iter - start_iter)) - - -@HOOKS.register_module() -class OneCycleLrUpdaterHook(LrUpdaterHook): - """One Cycle LR Scheduler. - - The 1cycle learning rate policy changes the learning rate after every - batch. The one cycle learning rate policy is described in - https://arxiv.org/pdf/1708.07120.pdf - - Args: - max_lr (float or list): Upper learning rate boundaries in the cycle - for each parameter group. - total_steps (int, optional): The total number of steps in the cycle. - Note that if a value is not provided here, it will be the max_iter - of runner. Default: None. - pct_start (float): The percentage of the cycle (in number of steps) - spent increasing the learning rate. - Default: 0.3 - anneal_strategy (str): {'cos', 'linear'} - Specifies the annealing strategy: 'cos' for cosine annealing, - 'linear' for linear annealing. - Default: 'cos' - div_factor (float): Determines the initial learning rate via - initial_lr = max_lr/div_factor - Default: 25 - final_div_factor (float): Determines the minimum learning rate via - min_lr = initial_lr/final_div_factor - Default: 1e4 - three_phase (bool): If three_phase is True, use a third phase of the - schedule to annihilate the learning rate according to - final_div_factor instead of modifying the second phase (the first - two phases will be symmetrical about the step indicated by - pct_start). - Default: False - """ - - def __init__(self, - max_lr, - total_steps=None, - pct_start=0.3, - anneal_strategy='cos', - div_factor=25, - final_div_factor=1e4, - three_phase=False, - **kwargs): - # validate by_epoch, currently only support by_epoch = False - if 'by_epoch' not in kwargs: - kwargs['by_epoch'] = False - else: - assert not kwargs['by_epoch'], \ - 'currently only support "by_epoch" = False' - if not isinstance(max_lr, (numbers.Number, list, dict)): - raise ValueError('the type of max_lr must be the one of list or ' - f'dict, but got {type(max_lr)}') - self._max_lr = max_lr - if total_steps is not None: - if not isinstance(total_steps, int): - raise ValueError('the type of total_steps must be int, but' - f'got {type(total_steps)}') - self.total_steps = total_steps - # validate pct_start - if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float): - raise ValueError('expected float between 0 and 1 pct_start, but ' - f'got {pct_start}') - self.pct_start = pct_start - # validate anneal_strategy - if anneal_strategy not in ['cos', 'linear']: - raise ValueError('anneal_strategy must be one of "cos" or ' - f'"linear", instead got {anneal_strategy}') - elif anneal_strategy == 'cos': - self.anneal_func = annealing_cos - elif anneal_strategy == 'linear': - self.anneal_func = annealing_linear - self.div_factor = div_factor - self.final_div_factor = final_div_factor - self.three_phase = three_phase - self.lr_phases = [] # init lr_phases - super(OneCycleLrUpdaterHook, self).__init__(**kwargs) - - def before_run(self, runner): - if hasattr(self, 'total_steps'): - total_steps = self.total_steps - else: - total_steps = runner.max_iters - if total_steps < runner.max_iters: - raise ValueError( - 'The total steps must be greater than or equal to max ' - f'iterations {runner.max_iters} of runner, but total steps ' - f'is {total_steps}.') - - if isinstance(runner.optimizer, dict): - self.base_lr = {} - for k, optim in runner.optimizer.items(): - _max_lr = format_param(k, optim, self._max_lr) - self.base_lr[k] = [lr / self.div_factor for lr in _max_lr] - for group, lr in zip(optim.param_groups, self.base_lr[k]): - group.setdefault('initial_lr', lr) - else: - k = type(runner.optimizer).__name__ - _max_lr = format_param(k, runner.optimizer, self._max_lr) - self.base_lr = [lr / self.div_factor for lr in _max_lr] - for group, lr in zip(runner.optimizer.param_groups, self.base_lr): - group.setdefault('initial_lr', lr) - - if self.three_phase: - self.lr_phases.append( - [float(self.pct_start * total_steps) - 1, 1, self.div_factor]) - self.lr_phases.append([ - float(2 * self.pct_start * total_steps) - 2, self.div_factor, 1 - ]) - self.lr_phases.append( - [total_steps - 1, 1, 1 / self.final_div_factor]) - else: - self.lr_phases.append( - [float(self.pct_start * total_steps) - 1, 1, self.div_factor]) - self.lr_phases.append( - [total_steps - 1, self.div_factor, 1 / self.final_div_factor]) - - def get_lr(self, runner, base_lr): - curr_iter = runner.iter - start_iter = 0 - for i, (end_iter, start_lr, end_lr) in enumerate(self.lr_phases): - if curr_iter <= end_iter: - pct = (curr_iter - start_iter) / (end_iter - start_iter) - lr = self.anneal_func(base_lr * start_lr, base_lr * end_lr, - pct) - break - start_iter = end_iter - return lr - - -def annealing_cos(start, end, factor, weight=1): - """Calculate annealing cos learning rate. - - Cosine anneal from `weight * start + (1 - weight) * end` to `end` as - percentage goes from 0.0 to 1.0. - - Args: - start (float): The starting learning rate of the cosine annealing. - end (float): The ending learing rate of the cosine annealing. - factor (float): The coefficient of `pi` when calculating the current - percentage. Range from 0.0 to 1.0. - weight (float, optional): The combination factor of `start` and `end` - when calculating the actual starting learning rate. Default to 1. - """ - cos_out = cos(pi * factor) + 1 - return end + 0.5 * weight * (start - end) * cos_out - - -def annealing_linear(start, end, factor): - """Calculate annealing linear learning rate. - - Linear anneal from `start` to `end` as percentage goes from 0.0 to 1.0. - - Args: - start (float): The starting learning rate of the linear annealing. - end (float): The ending learing rate of the linear annealing. - factor (float): The coefficient of `pi` when calculating the current - percentage. Range from 0.0 to 1.0. - """ - return start + (end - start) * factor - - -def format_param(name, optim, param): - if isinstance(param, numbers.Number): - return [param] * len(optim.param_groups) - elif isinstance(param, (list, tuple)): # multi param groups - if len(param) != len(optim.param_groups): - raise ValueError(f'expected {len(optim.param_groups)} ' - f'values for {name}, got {len(param)}') - return param - else: # multi optimizers - if name not in param: - raise KeyError(f'{name} is not found in {param.keys()}') - return param[name] diff --git a/spaces/szukevin/VISOR-GPT/train/tencentpretrain/decoders/__init__.py b/spaces/szukevin/VISOR-GPT/train/tencentpretrain/decoders/__init__.py deleted file mode 100644 index e0c5cd1071cb78f184336d70addb70374649f9c5..0000000000000000000000000000000000000000 --- a/spaces/szukevin/VISOR-GPT/train/tencentpretrain/decoders/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from tencentpretrain.decoders.transformer_decoder import TransformerDecoder - - -str2decoder = {"transformer": TransformerDecoder} - -__all__ = ["TransformerDecoder", "str2decoder"] diff --git a/spaces/t13718236382/bingoGPT4/src/pages/api/create.ts b/spaces/t13718236382/bingoGPT4/src/pages/api/create.ts deleted file mode 100644 index 30d47d2ea34d72b669e01d04281302fd6105f764..0000000000000000000000000000000000000000 --- a/spaces/t13718236382/bingoGPT4/src/pages/api/create.ts +++ /dev/null @@ -1,50 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { fetch, debug } from '@/lib/isomorphic' -import { createHeaders, randomIP } from '@/lib/utils' -import { sleep } from '@/lib/bots/bing/utils' - -const API_ENDPOINT = 'https://www.bing.com/turing/conversation/create' -// const API_ENDPOINT = 'https://edgeservices.bing.com/edgesvc/turing/conversation/create'; - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - let count = 0 - let { BING_IP, ...cookies } = req.cookies - do { - const headers = createHeaders({ - ...cookies, - BING_IP: BING_IP || randomIP(), - }) - const response = await fetch(API_ENDPOINT, { method: 'GET', headers }) - if (response.status === 200) { - res.setHeader('set-cookie', [headers.cookie, `BING_IP=${headers['x-forwarded-for']}`] - .map(cookie => `${cookie}; Max-Age=${86400 * 30}; Path=/; SameSite=None; Secure`)) - debug('headers', headers) - res.writeHead(200, { - 'Content-Type': 'application/json', - }) - res.end(await response.text()) - return - } - BING_IP = '' - await sleep(2000) - debug('loop', count) - } while(count++ < 10) - res.end(JSON.stringify({ - result: { - value: 'TryLater', - message: `Please try again after a while` - } - })) - } catch (e) { - console.log('error', e) - return res.end(JSON.stringify({ - result: { - value: 'UnauthorizedRequest', - message: `${e}` - } - })) - } -} diff --git a/spaces/tabeina/bingo1/src/components/user-menu.tsx b/spaces/tabeina/bingo1/src/components/user-menu.tsx deleted file mode 100644 index 9bd1edc9cf9f39b63629b021f0c1186b1a7c1341..0000000000000000000000000000000000000000 --- a/spaces/tabeina/bingo1/src/components/user-menu.tsx +++ /dev/null @@ -1,113 +0,0 @@ -'use client' - -import { useEffect, useState } from 'react' -import Image from 'next/image' -import { toast } from 'react-hot-toast' -import { Button } from '@/components/ui/button' -import pkg from '../../package.json' -import { - DropdownMenu, - DropdownMenuContent, - DropdownMenuItem, - DropdownMenuSeparator, - DropdownMenuTrigger -} from '@/components/ui/dropdown-menu' -import { IconCopy, IconExternalLink, IconGitHub } from '@/components/ui/icons' -import SettingIcon from '@/assets/images/settings.svg' -import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard' - -export function UserMenu() { - const [host, setHost] = useState('') - const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 }) - useEffect(() => { - setHost(location.host) - }, []) - - useEffect(() => { - if (isCopied) { - toast.success('复制成功') - } - }, [isCopied]) - return ( -
    - - - - - - - location.href='#dialog="settings"' - } - className="cursor-pointer" - > - 设置用户 - - - - location.href='#dialog="voice"' - } - className="cursor-pointer" - > - 语音设置 - - - - - 开源地址 - - - - - - - - 托管地址 - 🤗 - - - - - - - 复制站点 - - - - - -
    版本信息 {pkg.version}
    -
    - - -
    站点域名
    -
    copyToClipboard(host)} className="flex gap-1 text-xs text-zinc-500 cursor-pointer"> - {host} -
    -
    -
    -
    -
    - ) -} diff --git a/spaces/taesiri/CLIPSeg2/README.md b/spaces/taesiri/CLIPSeg2/README.md deleted file mode 100644 index 33eb49d67d60e7bd345c43d75962b2b12f248575..0000000000000000000000000000000000000000 --- a/spaces/taesiri/CLIPSeg2/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: CLIPSeg -emoji: 🦀 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false -duplicated_from: taesiri/CLIPSeg ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/tappyness1/error-analysis-cv-segmentations/app.py b/spaces/tappyness1/error-analysis-cv-segmentations/app.py deleted file mode 100644 index 57b207cc4248a1d780b9e09767afa3103e8b9ada..0000000000000000000000000000000000000000 --- a/spaces/tappyness1/error-analysis-cv-segmentations/app.py +++ /dev/null @@ -1,65 +0,0 @@ -import streamlit as st -from src.st_image_tools import ImageTool - -def call_in_image_tool(cfg_path): - image_tool = ImageTool(cfg_path) - return image_tool - -def main(cfg_path="cfg/cfg.yml"): - """_summary_ - - Args: - cfg_path (str, optional): _description_. Defaults to "cfg/cfg.yml". - - Returns: - _type_: _description_ - """ - st.set_page_config(layout="wide") - - st.markdown( - """ """, - unsafe_allow_html=True, - ) - - image_tool = call_in_image_tool(cfg_path) - - # Select Plot Option - # st.sidebar.markdown("Checkboxes") - # checkbox_one = st.sidebar.checkbox("Show Image", value=True) # rename as necessary - checkbox_two = st.sidebar.checkbox("Show Inference", value=True) - checkbox_three = st.sidebar.checkbox("Show Ground Truth", value=True) - checkbox_four = st.sidebar.checkbox("Show Side by Side (GT and Pred)", value=True) - - option = st.sidebar.selectbox("Select Image", image_tool.all_img) - - if checkbox_two: - - if checkbox_three: - if checkbox_four: - image_tool.plot_with_preds_gt(option=option, side_by_side=True) - else: - image_tool.plot_with_preds_gt(option=option, plot_type="all") - - else: - image_tool.plot_with_preds_gt(option=option, plot_type="pred") - - elif checkbox_three: - - if checkbox_two: - if checkbox_four: - image_tool.plot_with_preds_gt(option=option, side_by_side=True) - else: - image_tool.plot_with_preds_gt(option=option, plot_type="all") - - else: - image_tool.plot_with_preds_gt(option=option, plot_type="gt") - - else: - image_tool.plot_with_preds_gt(option=option) - - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/spaces/templates/fastapi-uvicorn/README.md b/spaces/templates/fastapi-uvicorn/README.md deleted file mode 100644 index c1a9e0272433019be29772f819b6b5ad4846a623..0000000000000000000000000000000000000000 --- a/spaces/templates/fastapi-uvicorn/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Fast API + Uvicorn -emoji: ⚡ -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 2.9.1 -python_version: 3.10.4 -app_file: start.py -models: [osanseviero/BigGAN-deep-128, t5-small] -datasets: [emotion] -license: mit -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/teralomaniac/chatbing/Dockerfile b/spaces/teralomaniac/chatbing/Dockerfile deleted file mode 100644 index 6f5a46200fc9ff69f3d0904ed3009efa6bd4d41d..0000000000000000000000000000000000000000 --- a/spaces/teralomaniac/chatbing/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM python:3.11 - -WORKDIR /app - -ADD requirements.txt requirements.txt -RUN pip install -r requirements.txt --upgrade - -ADD . . -# EXPOSE 65432 - -CMD ["python","-m","main","-H","0.0.0.0:7860"] diff --git a/spaces/teralomaniac/chatbing/public/index.html b/spaces/teralomaniac/chatbing/public/index.html deleted file mode 100644 index 6f86ebf09796bbcf73018571a0ffc6e5b1e94412..0000000000000000000000000000000000000000 --- a/spaces/teralomaniac/chatbing/public/index.html +++ /dev/null @@ -1,660 +0,0 @@ - - - - - - ChatSydney - - - - -
    - - - - - - diff --git a/spaces/terapyon/gh-issue-search/gh_issue_loader.py b/spaces/terapyon/gh-issue-search/gh_issue_loader.py deleted file mode 100644 index 06027b2599ba3e5630da7bbc022ddcc6543156d2..0000000000000000000000000000000000000000 --- a/spaces/terapyon/gh-issue-search/gh_issue_loader.py +++ /dev/null @@ -1,61 +0,0 @@ -from dataclasses import asdict -import json -from typing import Iterator -from dateutil.parser import parse -from langchain.docstore.document import Document -from langchain.document_loaders.base import BaseLoader -from gh_issue_loader import Issue - - -def date_to_int(dt_str: str) -> int: - dt = parse(dt_str) - return int(dt.timestamp()) - - -def get_contents(repo_name: str, filename: str) -> Iterator[tuple[Issue, str]]: - with open(filename, "r") as f: - obj = [json.loads(line) for line in f] - for data in obj: - title = data["title"] - body = data["body"] - issue = Issue( - repo_name=repo_name, - id=data["number"], - title=title, - created_at=date_to_int(data["created_at"]), - user=data["user.login"], - url=data["html_url"], - labels=data["labels_"], - type_="issue", - ) - text = title - if body: - text += "\n\n" + body - yield issue, text - comments = data["comments_"] - for comment in comments: - issue = Issue( - repo_name=repo_name, - id=comment["id"], - title=data["title"], - created_at=date_to_int(comment["created_at"]), - user=comment["user.login"], - url=comment["html_url"], - labels=data["labels_"], - type_="comment", - ) - yield issue, comment["body"] - - -class GHLoader(BaseLoader): - def __init__(self, repo_name: str, filename: str): - self.repo_name = repo_name - self.filename = filename - - def lazy_load(self) -> Iterator[Document]: - for issue, text in get_contents(self.repo_name, self.filename): - metadata = asdict(issue) - yield Document(page_content=text, metadata=metadata) - - def load(self) -> list[Document]: - return list(self.lazy_load()) diff --git a/spaces/terfces0erbo/CollegeProjectV2/AbleBits Ultimate Suite For Excel 2018.5.485.1319 Utorrent.md b/spaces/terfces0erbo/CollegeProjectV2/AbleBits Ultimate Suite For Excel 2018.5.485.1319 Utorrent.md deleted file mode 100644 index cc10e5c025ee0272876e64a1cf6f51f1a8a26f06..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/AbleBits Ultimate Suite For Excel 2018.5.485.1319 Utorrent.md +++ /dev/null @@ -1,6 +0,0 @@ -

    AbleBits Ultimate Suite For Excel 2018.5.485.1319 Utorrent


    Download ❤❤❤ https://bytlly.com/2uGlxI



    -
    - 3cee63e6c2
    -
    -
    -

    diff --git a/spaces/terfces0erbo/CollegeProjectV2/Descargar Nutrimind 2012 Gratis Taringa.md b/spaces/terfces0erbo/CollegeProjectV2/Descargar Nutrimind 2012 Gratis Taringa.md deleted file mode 100644 index 83445e0cf0dee8e36080325db030e6cf4a610ee5..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Descargar Nutrimind 2012 Gratis Taringa.md +++ /dev/null @@ -1,19 +0,0 @@ - -

    ¿Cómo Descargar Nutrimind 2012 Gratis Taringa?

    -

    Nutrimind es un software de nutrición que te permite crear planes alimenticios personalizados, calcular el valor nutricional de los alimentos, generar informes y gráficos, y mucho más. Es una herramienta muy útil para nutricionistas, dietistas, médicos y cualquier persona interesada en mejorar su salud y bienestar.

    -

    Si quieres descargar Nutrimind 2012 gratis Taringa, hay varias opciones disponibles. Una de ellas es usar el sitio web de npm[^1^], donde puedes encontrar el paquete descargar_nutrimind_2012_gratis_taringa_aw4hy[^1^], que contiene el archivo de instalación del programa. Para usar este método, necesitas tener instalado npm en tu computadora, que es un gestor de paquetes para Node.js. Luego, solo tienes que ejecutar el siguiente comando en tu terminal:

    -

    Descargar Nutrimind 2012 Gratis Taringa


    Download File > https://bytlly.com/2uGlk2



    -npm install descargar_nutrimind_2012_gratis_taringa_aw4hy -

    Otra opción es usar el sitio web de SoundCloud[^2^], donde puedes encontrar el audio Descargar Nutrimind 2012 Gratis Taringa![^2^] de Piplanaudee, que contiene un enlace para descargar el programa. Para usar este método, solo tienes que reproducir el audio y hacer clic en el enlace que aparece en la descripción. Te llevará a una página donde podrás descargar el archivo zip del programa.

    -

    Una tercera opción es usar el sitio web de Manchuela Wine[^3^], donde puedes encontrar el documento Descargar Nutrimind 2012 Gratis Taringa |WORK|[^3^], que contiene un enlace para descargar el programa. Para usar este método, solo tienes que abrir el documento y hacer clic en el enlace que aparece al final. Te llevará a una página donde podrás descargar el archivo zip del programa.

    -

    Estas son algunas de las formas de descargar Nutrimind 2012 gratis Taringa. Recuerda que este programa es una versión antigua y puede no ser compatible con los sistemas operativos más recientes. Además, al descargarlo de fuentes no oficiales, corres el riesgo de infectar tu computadora con virus o malware. Por eso, te recomendamos que siempre descargues los programas de sus sitios web oficiales o de plataformas confiables.

    -

    Si quieres saber más sobre Nutrimind y sus funciones, puedes visitar su sitio web oficial, donde podrás encontrar información detallada, tutoriales, testimonios y opciones de compra. Nutrimind es un software de calidad que te ayudará a mejorar tu alimentación y tu salud.

    -

    Referencias

    -
      -
    1. https://www.npmjs.com/package/descargar_nutrimind_2012_gratis_taringa_aw4hy
    2. -
    3. https://soundcloud.com/piplanaudee/descargar-nutrimind-2012-gratis-taringa
    4. -
    5. https://www.manchuela.wine/wp-content/uploads/2022/11/marcwenz.pdf
    6. -
    7. https://nutrimind.net/
    8. -

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (aarambam 1080p Full Hd Movie Telugu ) [PATCHED].md b/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (aarambam 1080p Full Hd Movie Telugu ) [PATCHED].md deleted file mode 100644 index 26e72123c25e0da969d18712dc809af1f09ca50c..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (aarambam 1080p Full Hd Movie Telugu ) [PATCHED].md +++ /dev/null @@ -1,7 +0,0 @@ - -

    saul a77f14ba26 >Badrinath Dwarka City tour package cost.Telangana Vibhag

    >Beethoven piano concertos 2 part

    >microsoft office home and business 2016 proplus keygen

    >2ond hard disc cool download mp3

    >Za Mummy free download full mp3

    >12th house full movie dvdripcrack

    >Rapidshare 1.43 download mp3

    >connexion cabaret free download

    >Conveyor 2.0.0 full game zippy

    >Adobe Photoshop CS6 19.0.1 Final Multilanguage

    >Adobe InDesign CC 2018 trial full cracked

    >Adobe InDesign CC 2018 trial full cracked

    -

    zttdvt7 a77f14ba26 >windows 7 standard 2009 full sp3 server all rights and no trojans

    >paypal download window10usb

    >HD Online Player (maya memsaab movie hot scene 17)

    >Zirxde - Village Survival 2 Demo Gameplay v1.2

    >jade-era daniel 8 bits xbox game package release date

    -

    HD Online Player (aarambam 1080p full hd movie telugu )


    Download File --->>> https://bytlly.com/2uGl2R



    -

    demofire a77f14ba26 >Download Sedukaalam Cartoon Full Movie at Aarambam

    >Force 10 Keygen Ultimate Key.rar

    >blah.vbs virus

    >Lord Of The Rings: The Two Towers 2011 English English Audio CD Tagalog Subtitles

    >Aidan book - Volume 3 (FREE)

    >Daf zang Pro 2 v5 32.1 AIF

    >dmxxbox 7.0.7 for windows 7 from the internet download without crack

    >Parthasarthy Parthasarthy.m4v

    >Aarambam Full Movie 2010 [HD] Download

    >french iphone 5 specs and features

    >iCapture iCapture pro cracked

    >Skid Row Murder DVD/CD Full Free Download

    >Dumas: 1001 Novels - download full version

    >Aristocle.tar.gz

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/theblocknoob/hugging-face-space/style.css b/spaces/theblocknoob/hugging-face-space/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/theblocknoob/hugging-face-space/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/2004 Ford F250 Wiring Diagram Download __EXCLUSIVE__.md b/spaces/tialenAdioni/chat-gpt-api/logs/2004 Ford F250 Wiring Diagram Download __EXCLUSIVE__.md deleted file mode 100644 index ffd6a845e22da1ad2de6ccd2611e45cdc52bc628..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/2004 Ford F250 Wiring Diagram Download __EXCLUSIVE__.md +++ /dev/null @@ -1,20 +0,0 @@ -
    -

    How to Download a 2004 Ford F250 Wiring Diagram

    -

    If you are looking for a wiring diagram for your 2004 Ford F250 truck, you may have a hard time finding it online. Many websites offer wiring diagrams for various models and years of Ford trucks, but they are often low-quality, incomplete, or outdated. In this article, we will show you how to download a high-resolution, single-page wiring diagram for your 2004 Ford F250 truck from a reliable source.

    -

    2004 Ford F250 Wiring Diagram Download


    DOWNLOAD ===== https://urlcod.com/2uKa0w



    -

    A wiring diagram is a schematic representation of the electrical system of a vehicle. It shows the components, connections, wires, colors, and functions of the electrical circuits. A wiring diagram can help you troubleshoot, repair, or modify your vehicle's electrical system.

    -

    There are different types of wiring diagrams for different parts of the vehicle. For example, there are wiring diagrams for the engine, transmission, brakes, lights, radio, etc. Some wiring diagrams are specific to certain options or features of the vehicle, such as single or dual alternator, navigation system, parking assistant, etc.

    -

    For your 2004 Ford F250 truck, you will need a wiring diagram that matches your engine type (6.0L Powerstroke Diesel or 5.4L Triton Gasoline), your alternator type (single or dual), and your vehicle series (F-Series or Econoline). You can find these information on the driver's side door jamb label or on the Vehicle Identification Number (VIN) plate.

    -

    Step 1: Find the Right Wiring Diagram

    -

    The best source for wiring diagrams for your 2004 Ford F250 truck is the Ford Powerstroke Diesel Forum. This is an online community of Ford truck owners and enthusiasts who share their knowledge and experience with each other. On this forum, you can find high-resolution, single-page PDF files of wiring diagrams for various models and years of Ford trucks.

    -

    -

    To find the right wiring diagram for your 2004 Ford F250 truck, you need to go to the thread titled "Hi-res Single Page 6.0L Wiring Diagram PDFs" by user djmaguire. This thread contains links to download eight different wiring diagrams for 2003-2005 Ford trucks with 6.0L Powerstroke Diesel engines. You need to select the one that matches your alternator type and vehicle series.

    -

    For example, if you have a 2004 Ford F250 truck with a single alternator and an F-Series series, you need to download the file named "Wiring Diagram - 2004 Powerstroke 6L - F-Series - Single Alt - Full.pdf". This file has a size of 2.3 MB and a resolution of 300 dpi. It shows the complete wiring diagram for your truck on one page.

    -

    Step 2: Download the Wiring Diagram

    -

    To download the wiring diagram for your 2004 Ford F250 truck from the Ford Powerstroke Diesel Forum, you need to register as a member first. Registration is free and only takes a few minutes. You need to provide a valid email address and create a username and password.

    -

    Once you are registered and logged in, you can go to the thread titled "Hi-res Single Page 6.0L Wiring Diagram PDFs" by user djmaguire and click on the link that corresponds to your wiring diagram. This will open a new tab or window with the PDF file of the wiring diagram.

    -

    To save the PDF file to your computer, you can right-click on it and select "Save as" or "Save link as". Choose a location on your computer where you want to save the file and give it a name. You can also use the keyboard shortcut Ctrl+S or Command+S to save the file.

    -

    Step 3: View and Print the Wiring Diagram

    -

    To view the wiring diagram for your 2004 Ford F250 truck on your computer, you need a PDF reader program such as Adobe Acrobat Reader or Foxit Reader. These programs are free and easy

    81aa517590
    -
    -
    \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Chahat Hindi Dubbed Free Download Mp4 tutti i dettagli sul cast la trama e le curiosit del film.md b/spaces/tialenAdioni/chat-gpt-api/logs/Chahat Hindi Dubbed Free Download Mp4 tutti i dettagli sul cast la trama e le curiosit del film.md deleted file mode 100644 index 2fef3e70ed327db9dcb99b014a46d5e80955c689..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Chahat Hindi Dubbed Free Download Mp4 tutti i dettagli sul cast la trama e le curiosit del film.md +++ /dev/null @@ -1,65 +0,0 @@ - -

    How to Watch Chahat Hindi Dubbed Movie Online for Free

    -

    Chahat is a 1996 Bollywood romantic drama film starring Shah Rukh Khan, Pooja Bhatt, Naseeruddin Shah and Ramya Krishnan. The film was directed by Mahesh Bhatt and produced by Viral Lakhia. The film tells the story of Roop (Khan), a young singer who falls in love with Reshma (Bhatt), the daughter of a wealthy businessman (Shah). However, their love faces many obstacles as Reshma's father disapproves of their relationship and tries to separate them.

    -

    Chahat Hindi Dubbed Free Download Mp4


    Download Zip ✏ ✏ ✏ https://urlcod.com/2uK7nC



    -

    If you are a fan of Chahat and want to watch it online for free, you are in luck. There are many websites that offer Chahat Hindi dubbed free download mp4 format. You can easily download the movie on your device and enjoy it anytime you want. However, you should be careful about the quality and safety of these websites as some of them may contain viruses or malware that can harm your device or steal your personal information.

    -

    To help you find the best and safest websites to watch Chahat Hindi dubbed movie online for free, we have compiled a list of some of the most popular and reliable ones. These websites have high-quality video and audio, fast download speed, and no registration or subscription required. You can also stream the movie online if you don't want to download it. Here are the top 5 websites to watch Chahat Hindi dubbed free download mp4:

    -
      -
    1. SoundCloud: SoundCloud is one of the most popular platforms for streaming and downloading music, podcasts, audiobooks, and more. You can also find Chahat Hindi dubbed free download mp4 on SoundCloud by searching for it or following the links provided by some users [^3^] [^4^] [^5^]. You can listen to the movie online or download it on your device for offline access. SoundCloud is free to use and has a user-friendly interface.
    2. -
    3. Wixsite: Wixsite is a website builder that allows anyone to create their own website for free. Some users have created websites that offer Chahat Hindi dubbed free download mp4 format. You can visit these websites [^1^] [^2^] and click on the download button to get the movie on your device. However, you should be careful about the quality and safety of these websites as they may not be verified or secure.
    4. -
    5. YouTube: YouTube is the most popular video-sharing platform in the world. You can find almost any video on YouTube, including Chahat Hindi dubbed movie. However, you may not be able to download the movie directly from YouTube as it may violate its terms of service. You can use a third-party tool or app to download YouTube videos on your device. Alternatively, you can watch the movie online on YouTube without any hassle.
    6. -
    7. Filmywap: Filmywap is a website that offers Bollywood, Hollywood, South Indian, and other regional movies for free download. You can find Chahat Hindi dubbed free download mp4 on Filmywap by searching for it or browsing through its categories. You can download the movie in different qualities and sizes depending on your preference. Filmywap is easy to use and has a large collection of movies.
    8. -
    9. Moviesflix: Moviesflix is another website that provides movies and web series for free download. You can watch Chahat Hindi dubbed movie online for free on Moviesflix by searching for it or following its link. You can also download the movie in various formats and resolutions on your device. Moviesflix has a simple and attractive design and updates its content regularly.
    10. -
    -

    These are some of the best websites to watch Chahat Hindi dubbed free download mp4 online for free. However, you should always be careful about the legality and safety of these websites as they may not have the permission or license to distribute the movie. You should also use a VPN service or proxy server to protect your identity and privacy while accessing these websites. We hope you enjoy watching Chahat Hindi dubbed movie online for free.

    -

    Chahat Full Movie in Hindi Mp4 Download
    -How to Watch Chahat Hindi Dubbed Online Free
    -Chahat Hindi Dubbed HD Mp4 Movie Download
    -Chahat Hindi Dubbed 720p Mp4 Free Download
    -Chahat Hindi Dubbed Torrent Download Mp4
    -Chahat Hindi Dubbed Filmywap Mp4 Download
    -Chahat Hindi Dubbed Mp4 Moviez Download
    -Chahat Hindi Dubbed Mp4 Mobile Movies Download
    -Chahat Hindi Dubbed Mp4 Video Songs Download
    -Chahat Hindi Dubbed Mp4 Trailer Download
    -Chahat Hindi Dubbed Mp4 Movie Review
    -Chahat Hindi Dubbed Mp4 Movie Cast and Crew
    -Chahat Hindi Dubbed Mp4 Movie Release Date
    -Chahat Hindi Dubbed Mp4 Movie Box Office Collection
    -Chahat Hindi Dubbed Mp4 Movie Awards and Nominations
    -Chahat Hindi Dubbed Mp4 Movie IMDB Rating
    -Chahat Hindi Dubbed Mp4 Movie Rotten Tomatoes Rating
    -Chahat Hindi Dubbed Mp4 Movie Metacritic Rating
    -Chahat Hindi Dubbed Mp4 Movie Plot Summary
    -Chahat Hindi Dubbed Mp4 Movie Subtitles Download
    -Chahat Hindi Dubbed Mp4 Movie Scenes Download
    -Chahat Hindi Dubbed Mp4 Movie Dialogues Download
    -Chahat Hindi Dubbed Mp4 Movie Quotes Download
    -Chahat Hindi Dubbed Mp4 Movie Memes Download
    -Chahat Hindi Dubbed Mp4 Movie Wallpapers Download
    -Chahat Hindi Dubbed Mp4 Movie Posters Download
    -Chahat Hindi Dubbed Mp4 Movie Stills Download
    -Chahat Hindi Dubbed Mp4 Movie Behind the Scenes Download
    -Chahat Hindi Dubbed Mp4 Movie Making Video Download
    -Chahat Hindi Dubbed Mp4 Movie Bloopers Download
    -Chahat Hindi Dubbed Mp4 Movie Deleted Scenes Download
    -Chahat Hindi Dubbed Mp4 Movie Alternate Ending Download
    -Chahat Hindi Dubbed Mp4 Movie Fan Made Trailer Download
    -Chahat Hindi Dubbed Mp4 Movie Fan Art Download
    -Chahat Hindi Dubbed Mp4 Movie Fan Fiction Download
    -Chahat Hindi Dubbed Mp4 Movie Merchandise Buy Online
    -Chahat Hindi Dubbed Mp4 Movie DVD Buy Online
    -Chahat Hindi Dubbed Mp4 Movie Blu-ray Buy Online
    -Chahat Hindi Dubbed Mp4 Movie Streaming Platforms List
    -Chahat Hindi Dubbed Mp4 Movie Netflix Watch Online
    -Chahat Hindi Dubbed Mp4 Movie Amazon Prime Watch Online
    -Chahat Hindi Dubbed Mp4 Movie Disney Plus Hotstar Watch Online
    -Chahat Hindi Dubbed Mp4 Movie Zee5 Watch Online
    -Chahat Hindi Dubbed Mp4 Movie Sony Liv Watch Online
    -Chahat Hindi Dubbed Mp4 Movie Voot Watch Online
    -Chahat Hindi Dubbed Mp4 Movie MX Player Watch Online
    -Chahat Hindi Dubbed Mp4 Movie YouTube Watch Online
    -Chahat Hindi Dubbed Mp4 Movie Dailymotion Watch Online
    -Chahat Hindi Dubbed Mp4 Movie Vimeo Watch Online

    e753bf7129
    -
    -
    \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Download Ip Man 3 HD Movie Dubbed in Hindi by Torrent for Free Enjoy the Epic Finale of the Ip Man Trilogy.md b/spaces/tialenAdioni/chat-gpt-api/logs/Download Ip Man 3 HD Movie Dubbed in Hindi by Torrent for Free Enjoy the Epic Finale of the Ip Man Trilogy.md deleted file mode 100644 index 6a3b128fd2bf74ba25fa2aa5209435368b17493d..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Download Ip Man 3 HD Movie Dubbed in Hindi by Torrent for Free Enjoy the Epic Finale of the Ip Man Trilogy.md +++ /dev/null @@ -1,75 +0,0 @@ - -

    Ip Man 3 HD Movie Dubbed in Hindi Free Download by Torrent

    -

    If you are a fan of martial arts movies, you might have heard of Ip Man, the legendary Wing Chun master who trained Bruce Lee. Ip Man is also the protagonist of a series of biographical films that depict his life and achievements. The third installment of the series, Ip Man 3, was released in 2015 and features Ip Man's final years in Hong Kong, where he faces a new challenge from a ruthless property developer and his gang of thugs.

    -

    ip man 3 hd movie dubbed in hindi free download by torrent


    Download ✵✵✵ https://urlcod.com/2uK8BM



    -

    Ip Man 3 is a thrilling and action-packed movie that showcases the skills and philosophy of Wing Chun, as well as the values of loyalty, courage and justice. The movie also stars Mike Tyson as the main antagonist, and features a cameo appearance by Bruce Lee. Ip Man 3 was a huge success at the box office, earning more than $170 million worldwide. It also received positive reviews from critics and audiences alike, who praised the performances, the choreography, the cinematography and the story.

    -

    How to Download Ip Man 3 HD Movie Dubbed in Hindi by Torrent

    -

    If you want to watch Ip Man 3 in Hindi, you might be wondering how to download it for free by torrent. Torrenting is a popular method of sharing files online, especially large ones like movies. However, torrenting also comes with some risks and challenges, such as legal issues, malware infections, slow downloads and poor quality. Therefore, you need to be careful and follow some steps to ensure a safe and smooth torrenting experience.

    -

    Here are some tips on how to download Ip Man 3 HD movie dubbed in Hindi by torrent:

    -
      -
    • First, you need to find a reliable torrent site that offers Ip Man 3 in Hindi. You can use a search engine or a torrent aggregator to look for the best options. Some of the most popular torrent sites are The Pirate Bay, RARBG, 1337x and YTS.
    • -
    • Second, you need to download a torrent client that will allow you to open and download the torrent file. A torrent client is a software that connects you to other users who have the same file and enables you to download it in pieces. Some of the most popular torrent clients are uTorrent, BitTorrent, qBittorrent and Vuze.
    • -
    • Third, you need to download a VPN (virtual private network) that will protect your identity and privacy while torrenting. A VPN is a service that encrypts your internet traffic and hides your IP address from your ISP (internet service provider) and other parties. This way, you can avoid legal troubles, cyberattacks, bandwidth throttling and geo-restrictions. Some of the most popular VPNs are ExpressVPN, NordVPN, Surfshark and CyberGhost.
    • -
    • Fourth, you need to launch your VPN and connect to a server in a country where torrenting is not illegal or monitored. Then, you need to launch your torrent client and open the torrent file or magnet link of Ip Man 3 in Hindi. You can then choose the destination folder and start downloading the movie.
    • -
    • Fifth, you need to wait until the download is complete and then enjoy watching Ip Man 3 in Hindi on your device. You can also use a media player like VLC or MX Player to play the movie with subtitles if needed.
    • -
    -

    Conclusion

    -

    Ip Man 3 is an amazing movie that showcases the life and legacy of Ip Man, the master of Wing Chun. If you want to watch it in Hindi, you can download it for free by torrent using the tips above. However, you should also be aware of the risks and challenges of torrenting and use a VPN to protect yourself. We hope this article was helpful and informative for you. Happy watching!

    -

    Why You Should Watch Ip Man 3 HD Movie Dubbed in Hindi

    -

    Ip Man 3 is not only a great movie for martial arts fans, but also for anyone who enjoys a good story with compelling characters and themes. Here are some reasons why you should watch Ip Man 3 HD movie dubbed in Hindi:

    -
      -
    • You will learn more about Ip Man's life and legacy, as well as the history and culture of Hong Kong in the 1950s. You will see how Ip Man faced various challenges and adversaries, both personal and professional, and how he overcame them with his wisdom and courage.
    • -
    • You will witness some of the most spectacular and realistic fight scenes ever filmed, featuring the authentic and graceful Wing Chun style. You will also see how Ip Man used his skills to defend his family, his students and his community from the evil forces that threatened them.
    • -
    • You will enjoy the performances of some of the best actors in the industry, such as Donnie Yen, who reprised his role as Ip Man for the third time, Mike Tyson, who played the ruthless property developer Frank, and Danny Chan, who portrayed a young Bruce Lee. You will also see some familiar faces from the previous movies, such as Lynn Hung, Simon Yam and Zhang Jin.
    • -
    • You will appreciate the quality and beauty of the movie, which was shot with stunning cinematography, sound design and music. You will also notice the attention to detail and accuracy that went into the production design, costumes and props.
    • -
    • You will have fun watching the movie in Hindi, which adds a new dimension and flavor to the dialogue and narration. You will also be able to understand the movie better with subtitles if needed.
    • -
    -

    Where to Find Ip Man 3 HD Movie Dubbed in Hindi Free Download by Torrent

    -

    Now that you know why you should watch Ip Man 3 HD movie dubbed in Hindi, you might be wondering where to find it for free download by torrent. As we mentioned before, torrenting is a popular but risky way of getting movies online, so you need to be careful and use a VPN to protect yourself. However, if you are willing to take the risk, here are some of the best torrent sites where you can find Ip Man 3 HD movie dubbed in Hindi:

    -
      -
    • The Pirate Bay: This is one of the oldest and most popular torrent sites in the world, where you can find almost anything you want. However, it is also one of the most blocked and banned sites by many governments and ISPs, so you might need to use a proxy or a mirror site to access it.
    • -
    • RARBG: This is another well-known torrent site that offers high-quality torrents for movies, TV shows, games, music and more. It has a simple and user-friendly interface that makes it easy to navigate and search. However, it is also blocked in many countries, so you might need to use a VPN or a proxy to access it.
    • -
    • 1337x: This is a torrent site that has a large and diverse collection of torrents for various categories and genres. It has a modern and attractive design that makes it appealing and convenient to use. However, it is also subject to censorship and legal issues, so you might need to use a VPN or an alternative domain to access it.
    • -
    • YTS: This is a torrent site that specializes in movies, especially HD ones. It has a sleek and minimalist design that focuses on the movie posters and ratings. It also offers smaller file sizes that save bandwidth and storage space. However, it is also notorious for being sued by movie studios and distributors, so you might need to use a VPN or a proxy to access it.
    • -
    -

    What to Expect from Ip Man 3 HD Movie Dubbed in Hindi

    -

    Ip Man 3 HD movie dubbed in Hindi is a treat for the eyes and the ears, as it delivers a stunning visual and audio experience. Here are some of the things you can expect from Ip Man 3 HD movie dubbed in Hindi:

    -
      -
    • You can expect to see some of the most realistic and impressive martial arts scenes ever filmed, as Ip Man and his opponents use various styles and techniques to fight each other. You can also see how Wing Chun is applied in different situations and scenarios, such as against multiple attackers, weapons, boxing and even Tai Chi.
    • -
    • You can expect to hear some of the best voice actors in the industry, who have dubbed the movie in Hindi with accuracy and emotion. You can also hear the original soundtrack composed by Kenji Kawai, who has created a beautiful and powerful score that matches the mood and tone of the movie.
    • -
    • You can expect to feel a range of emotions, as Ip Man 3 HD movie dubbed in Hindi takes you on a roller coaster ride of drama, action, humor and romance. You can also feel inspired by Ip Man's character and values, as he shows his dedication, humility, compassion and integrity throughout the movie.
    • -
    -

    How to Enjoy Ip Man 3 HD Movie Dubbed in Hindi to the Fullest

    -

    Ip Man 3 HD movie dubbed in Hindi is a movie that deserves your full attention and appreciation, as it offers a lot of entertainment and enlightenment. Here are some tips on how to enjoy Ip Man 3 HD movie dubbed in Hindi to the fullest:

    -
      -
    • Watch it on a big screen with high resolution and sound quality, so you can appreciate the details and effects of the movie. You can also use headphones or speakers to enhance the audio experience.
    • -
    • Watch it with your friends or family, so you can share your opinions and reactions with them. You can also discuss the movie afterwards and learn more about Ip Man's life and legacy.
    • -
    • Watch it with an open mind and heart, so you can understand and appreciate the message and meaning of the movie. You can also learn more about Wing Chun and its principles and benefits.
    • -

    -

    ip man 3 hindi dubbed full movie free download mp4
    -ip man 3 2015 bluray 720p dual audio hindi dubbed
    -ip man 3 hybrid 1080p bluray x265 hevc english chinese
    -ip man 3 full movie in hindi watch online dailymotion
    -ip man 3 hindi dubbed filmyzilla download link
    -ip man 3 kung fu master 2020 hindi dual-audio web-dl
    -ip man 3 movie download in hindi 480p filmywap
    -ip man 3 master z legacy 2018 bluray x264 drago-tv
    -ip man 3 collection 2008-2015 bluray x265 hevc ddp
    -ip man 3 full movie in hindi download worldfree4u
    -ip man 3 hd movie dubbed in hindi torrent magnet
    -ip man 3 online streaming with english subtitles
    -ip man 3 hindi dubbed mkv movie free download
    -ip man 3 action martial arts film by wilson yip
    -ip man 3 donnie yen vs mike tyson fight scene
    -ip man 3 final battle wing chun vs tai chi
    -ip man 3 based on the life of grandmaster yip man
    -ip man 3 how to watch for free on netflix
    -ip man 3 best scenes and quotes compilation
    -ip man 3 soundtrack and theme song download mp3
    -ip man 3 behind the scenes and bloopers video
    -ip man 3 cast and crew trivia and facts
    -ip man 3 box office collection and reviews
    -ip man 3 awards and nominations list
    -ip man 3 sequel and spin-off movies

    679dcb208e
    -
    -
    \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Excel 2016 Odbc Driver Download 64-bit BEST.md b/spaces/tialenAdioni/chat-gpt-api/logs/Excel 2016 Odbc Driver Download 64-bit BEST.md deleted file mode 100644 index 6eaa5962cefe1692fd211f3ed27945548049dc6f..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Excel 2016 Odbc Driver Download 64-bit BEST.md +++ /dev/null @@ -1,28 +0,0 @@ - -

    How to Download and Install Excel 2016 ODBC Driver for 64-bit Windows

    -

    If you want to connect your Excel 2016 files to other data sources such as SQL Server or Access, you need to install the appropriate ODBC driver for your system. ODBC stands for Open Database Connectivity, which is an API that uses SQL as the database access language. With ODBC, you can access various database management systems with the same source code.

    -

    In this article, we will show you how to download and install the Excel 2016 ODBC driver for 64-bit Windows. This driver will allow you to open, query, and update your Excel files through the ODBC interface.

    -

    excel 2016 odbc driver download 64-bit


    DOWNLOADhttps://urlcod.com/2uK1sL



    -

    Step 1: Download the ODBC Driver

    -

    The first step is to download the ODBC driver that matches your system and Excel version. The Microsoft ODBC Desktop Database Drivers are a set of drivers that are based on the Microsoft Jet engine. They provide access to Excel, Access, Paradox, dBASE, and text files.

    -

    The latest version of these drivers is 4.0, which supports only 32-bit systems. If you have a 64-bit system, you need to download the previous version, which is 3.0. This version includes both 16-bit and 32-bit drivers that work on Windows 95 or later, Windows NT Workstation or Server version 4.0, Windows 2000 Professional, or Windows 2000 Server.

    -

    You can download the ODBC Desktop Database Drivers 3.0 from this link: https://www.microsoft.com/en-us/download/details.aspx?id=54920

    -

    Step 2: Install the ODBC Driver

    -

    After you have downloaded the installer file, run it and follow the instructions on the screen. You will need an internet connection to activate your software and access online services. The installation process will create a folder called "ODBC" in your Program Files directory.

    -

    Once the installation is complete, you can launch the ODBC Data Source Administrator from the Control Panel or the Start menu. This tool will allow you to configure and manage your ODBC data sources.

    -

    Step 3: Configure the ODBC Data Source

    -

    To connect your Excel file to an ODBC data source, you need to create a DSN (Data Source Name) that specifies the driver and other connection parameters. A DSN can be either user-specific or system-wide.

    -

    To create a DSN, follow these steps:

    -
      -
    1. Open the ODBC Data Source Administrator and click on the "User DSN" or "System DSN" tab.
    2. -
    3. Click on the "Add" button and select "Microsoft Excel Driver (*.xls)" from the list of drivers.
    4. -
    5. Click on "Finish" and enter a name and description for your DSN.
    6. -
    7. Click on "Select Workbook" and browse to your Excel file location.
    8. -
    9. Click on "OK" and test your connection.
    10. -
    -

    You have now created a DSN for your Excel file. You can use this DSN in your applications or tools that support ODBC connectivity.

    -

    -

    Conclusion

    -

    In this article, we have shown you how to download and install the Excel 2016 ODBC driver for 64-bit Windows. We have also explained how to create a DSN for your Excel file using the ODBC Data Source Administrator. With these steps, you can connect your Excel file to other data sources such as SQL Server or Access using ODBC.

    ddb901b051
    -
    -
    \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/How to Download and Install Office 2016 Professional Plus Latest Version.md b/spaces/tialenAdioni/chat-gpt-api/logs/How to Download and Install Office 2016 Professional Plus Latest Version.md deleted file mode 100644 index f4d0838abedb2ee0f7c21fdb757a7f70eb47d4b1..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/How to Download and Install Office 2016 Professional Plus Latest Version.md +++ /dev/null @@ -1,32 +0,0 @@ - -

    Office 2016 Professional Plus Latest Version

    -

    Office 2016 Professional Plus is a version of Microsoft Office that includes applications such as Word, Excel, PowerPoint, Outlook, OneNote, Access, Publisher, and Skype for Business. It is designed for businesses and organizations that need advanced features and functionality. Office 2016 Professional Plus is available as a one-time purchase through a volume licensing agreement or as a retail product.

    -

    office 2016 professional plus latest version


    Download File ———>>> https://urlcod.com/2uK9kl



    -

    In this article, we will review the latest version of Office 2016 Professional Plus, how to download and install it, and what are the benefits of using it.

    -

    What is the latest version of Office 2016 Professional Plus?

    -

    The latest version of Office 2016 Professional Plus is Version 1808 (Build 10397.20021), which was released on April 11, 2023. This version includes security and quality updates for the Office applications, as well as some new features and improvements. Some of the highlights are:

    -
      -
    • Word: You can now insert icons and SVG graphics into your documents, use the Resume Assistant to create a compelling resume based on LinkedIn data, and translate text into different languages with the Translator feature.
    • -
    • Excel: You can now create and edit custom functions with JavaScript using the Excel Scripting feature, use new functions such as TEXTJOIN and CONCAT to manipulate text strings, and create maps with geographic data using the Maps chart type.
    • -
    • PowerPoint: You can now use the Morph transition to create smooth animations between slides, insert 3D models and animations into your presentations, and record your screen with the Screen Recording feature.
    • -
    • Outlook: You can now use the Focused Inbox to prioritize your most important emails, schedule meetings across different time zones with the Time Zone feature, and share your calendar availability with others using the FindTime add-in.
    • -
    • OneNote: You can now use the OneNote app for Windows 10 instead of OneNote 2016, which has more features and syncs across all your devices. You can also use the Class Notebook and Staff Notebook tools to create collaborative notebooks for education and work.
    • -
    • Access: You can now use the Large Number data type to store numeric values up to 15 decimal places, use the new charts to visualize your data, and connect to SharePoint lists with improved performance and reliability.
    • -
    • Publisher: You can now use the new templates and layouts to create professional-looking publications, insert online pictures from sources such as Bing and Flickr, and save your publications as PDF or XPS files.
    • -
    • Skype for Business: You can now use the Skype Meetings App to join online meetings without installing the Skype for Business client, use the Call Analytics dashboard to troubleshoot call quality issues, and use the Skype Room Systems to enhance your meeting experience.
    • -
    -

    How to download and install Office 2016 Professional Plus?

    -

    To download and install Office 2016 Professional Plus, you need to have a valid product key and an account associated with Microsoft 365. You can follow these steps:

    -

    -
      -
    1. Go to your Microsoft account dashboard and sign in with the Microsoft account you associated with this version of Microsoft 365.
    2. -
    3. Select Services and subscriptions and find the Microsoft 365 product you want to install. Select Install.
    4. -
    5. To install Office 2016 Professional Plus in a different language or the 64-bit version, select Other options. Choose the language and bit version you want, and then select Install.
    6. -
    7. Select Run (in Edge or Internet Explorer), Setup (in Chrome), or Save File (in Firefox) depending on your browser. If you see a User Account Control prompt that says Do you want to allow this app to make changes to your device? select Yes.
    8. -
    9. The installation begins. Your install is finished when you see the phrase You're all set! Office is installed now and an animation plays to show you where to find Office applications on your computer. Select Close.
    10. -
    11. To activate Office 2016 Professional Plus, start any Office application such as Word or Excel. Agree to the License terms by selecting Accept. Office might activate automatically or prompt you to enter your product key.
    12. -
    -

    What are the benefits of using Office 2016 Professional Plus?

    -

    Office 2016 Professional Plus offers many benefits for users who

    ddb901b051
    -
    -
    \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/How to Get Service Release 7 Rhino Download for Windows and Mac.md b/spaces/tialenAdioni/chat-gpt-api/logs/How to Get Service Release 7 Rhino Download for Windows and Mac.md deleted file mode 100644 index f60ace667533fa597defc324115a23b72042b7f8..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/How to Get Service Release 7 Rhino Download for Windows and Mac.md +++ /dev/null @@ -1,171 +0,0 @@ -
    -

    What is Rhino 7 Service Release and How to Download It?

    - -

    Rhino 7 is a powerful 3D modeling and rendering software that can handle complex geometry and create stunning visuals. Rhino 7 Service Release is a series of updates that improve the performance, stability, and functionality of Rhino 7. In this article, we will explain what Rhino 7 Service Release is, why you should download it, and how to do it.

    -

    service release 7 rhino download


    Download ✒ ✒ ✒ https://urlcod.com/2uK3c2



    - -

    What is Rhino 7 Service Release?

    - -

    Rhino 7 Service Release is a collection of bug fixes, enhancements, and new features that are released periodically for Rhino 7 users. These updates are designed to make Rhino 7 more reliable, faster, and easier to use. Some of the benefits of Rhino 7 Service Release include:

    - -
      -
    • Improved annotation tools, such as hatch, leader, text, and dimension.
    • -
    • Better display modes, such as rendered, raytraced, technical, and PBR.
    • -
    • More options for texture mapping, decals, and materials.
    • -
    • New commands for extruding SubD objects, matching surfaces, and editing blocks.
    • -
    • Support for PDF export, SketchUp import, and KMZ export.
    • -
    • Enhanced Grasshopper integration, with new components and features.
    • -
    - -

    Rhino 7 Service Release is available for both Windows and Mac users. You can check the latest version number and release notes on the Rhino website.

    - -

    Why should you download Rhino 7 Service Release?

    - -

    Downloading Rhino 7 Service Release is highly recommended for all Rhino 7 users. By installing the latest updates, you can enjoy the following advantages:

    - -
      -
    • Get access to the newest features and improvements that can boost your productivity and creativity.
    • -
    • Fix any issues or errors that may affect your workflow or results.
    • -
    • Ensure compatibility with other software and formats that you may use or export.
    • -
    • Optimize your system performance and security.
    • -
    - -

    Rhino 7 Service Release is free for all Rhino 7 users. You do not need to buy a new license key or pay any fees to download it.

    - -

    How to download Rhino 7 Service Release?

    - -

    Downloading Rhino 7 Service Release is easy and fast. You can follow these steps:

    -

    How to install service release 7 for rhino
    -Service release 7 rhino download link
    -Service release 7 rhino download free trial
    -Service release 7 rhino download for mac
    -Service release 7 rhino download for windows
    -Service release 7 rhino download crack
    -Service release 7 rhino download full version
    -Service release 7 rhino download tutorial
    -Service release 7 rhino download problems
    -Service release 7 rhino download reviews
    -Service release 7 rhino download features
    -Service release 7 rhino download benefits
    -Service release 7 rhino download requirements
    -Service release 7 rhino download size
    -Service release 7 rhino download speed
    -Service release 7 rhino download update
    -Service release 7 rhino download comparison
    -Service release 7 rhino download alternatives
    -Service release 7 rhino download tips
    -Service release 7 rhino download guide
    -Service release 7 rhino download support
    -Service release 7 rhino download license
    -Service release 7 rhino download discount
    -Service release 7 rhino download coupon
    -Service release 7 rhino download offer
    -Service release 7 rhino download deal
    -Service release 7 rhino download sale
    -Service release 7 rhino download price
    -Service release 7 rhino download cost
    -Service release 7 rhino download value
    -Service release 7 rhino download quality
    -Service release 7 rhino download performance
    -Service release 7 rhino download reliability
    -Service release 7 rhino download security
    -Service release 7 rhino download compatibility
    -Service release 7 rhino download usability
    -Service release 7 rhino download functionality
    -Service release 7 rhino download design
    -Service release 7 rhino download interface
    -Service release 7 rhino download graphics
    -Service release 7 rhino download animation
    -Service release 7 rhino download rendering
    -Service release 7 rhino download modeling
    -Service release 7 rhino download editing
    -Service release 7 rhino download simulation
    -Service release 7 rhino download optimization
    -Service release 7 rhino download automation
    -Service release 7 rhino download customization
    -Service release 7 rhino download integration
    -Service release 7 rhino download collaboration

    - -
      -
    1. Open Rhino 7 on your computer.
    2. -
    3. Go to Help > Check for Updates.
    4. -
    5. If there is a new update available, you will see a message with the version number and a link to download it.
    6. -
    7. Click on the link and follow the instructions to install the update.
    8. -
    9. Restart Rhino 7 after the installation is complete.
    10. -
    - -

    You can also download Rhino 7 Service Release manually from the Rhino website. Just go to the Download page and choose the appropriate version for your operating system. Then follow the same steps as above to install it.

    - -

    Conclusion

    - -

    Rhino 7 Service Release is a valuable resource for all Rhino 7 users. It provides regular updates that improve the quality and performance of Rhino 7. By downloading Rhino 7 Service Release, you can make sure that you are using the best version of Rhino 7 possible. To download Rhino 7 Service Release, just go to Help > Check for Updates or visit the Rhino website. Happy modeling!

    -

    How to troubleshoot Rhino 7 Service Release issues?

    - -

    Sometimes, you may encounter some problems or errors when downloading or installing Rhino 7 Service Release. For example, you may see a message that says "Rhino 7.1 RC refuses to license after installing" or "Windows Rhino locks up when opening color dialog". Don't worry, these issues can be easily solved by following these steps:

    - -
      -
    1. Make sure you have a valid Rhino 7 license key. You can check your license status by going to Help > About Rhinoceros.
    2. -
    3. Make sure you have a stable internet connection and enough disk space to download and install the update.
    4. -
    5. Make sure you have closed all other applications and processes that may interfere with Rhino 7.
    6. -
    7. Make sure you have the latest version of your operating system and drivers.
    8. -
    9. If you still have problems, you can contact the Rhino support team or visit the Rhino forum for help.
    10. -
    - -

    Rhino 7 Service Release is designed to be compatible with most systems and software. However, if you have any doubts or questions, you can always check the system requirements and compatibility notes on the Rhino website.

    - -

    Conclusion

    - -

    Rhino 7 Service Release is a valuable resource for all Rhino 7 users. It provides regular updates that improve the quality and performance of Rhino 7. By downloading Rhino 7 Service Release, you can make sure that you are using the best version of Rhino 7 possible. To download Rhino 7 Service Release, just go to Help > Check for Updates or visit the Rhino website. Happy modeling!

    -

    How to use Rhino 7 Service Release features?

    - -

    Rhino 7 Service Release offers many new features and improvements that can enhance your 3D modeling and rendering experience. Some of the most notable features include:

    - -
      -
    • SubD objects: SubD objects are a new type of geometry that can create smooth and organic shapes with ease. You can convert any mesh or NURBS object to a SubD object, or create one from scratch using the SubD commands. You can also edit SubD objects using the Gumball, control points, or sub-object selection.
    • -
    • QuadRemesh: QuadRemesh is a new command that can create a quad-dominant mesh from any input geometry. This can help you optimize your mesh topology, reduce polygon count, or prepare your model for SubD conversion.
    • -
    • Grasshopper Player: Grasshopper Player is a new feature that allows you to run Grasshopper definitions without opening Grasshopper. You can access Grasshopper Player from the Rhino toolbar, command line, or context menu. You can also customize the inputs and outputs of your Grasshopper definitions using the MetaHopper plug-in.
    • -
    • PBR materials: PBR materials are a new type of materials that use physically based rendering (PBR) to create realistic and consistent appearance across different lighting conditions and render engines. You can create PBR materials from scratch, import them from other sources, or convert existing materials to PBR.
    • -
    • Decals: Decals are a new feature that allows you to apply images or textures to specific parts of your model without affecting the underlying material. You can use decals to add logos, labels, stickers, or details to your objects. You can also adjust the size, position, rotation, and transparency of your decals.
    • -
    - -

    To learn more about how to use these features and more, you can check out the Rhino 7 documentation, tutorials, videos, and webinars on the Rhino website.

    - -

    Conclusion

    - -

    Rhino 7 Service Release is a valuable resource for all Rhino 7 users. It provides regular updates that improve the quality and performance of Rhino 7. By downloading Rhino 7 Service Release, you can make sure that you are using the best version of Rhino 7 possible. To download Rhino 7 Service Release, just go to Help > Check for Updates or visit the Rhino website. Happy modeling!

    -

    How to learn more about Rhino 7 Service Release features?

    - -

    If you want to learn more about how to use the new features and improvements of Rhino 7 Service Release, you can find many resources online that can help you. Some of the best sources include:

    - -
      -
    • The Rhino website: The official website of Rhino has a lot of information and documentation about Rhino 7 Service Release. You can find the release notes, system requirements, compatibility notes, download links, and more. You can also access the online help, tutorials, videos, webinars, and blogs that cover various topics and tips about Rhino 7 Service Release.
    • -
    • The Rhino forum: The Rhino forum is a great place to interact with other Rhino users and experts. You can ask questions, share your work, get feedback, and learn from others. You can also find announcements, news, events, and challenges related to Rhino 7 Service Release.
    • -
    • The Rhino YouTube channel: The Rhino YouTube channel has many videos that demonstrate and explain the new features and improvements of Rhino 7 Service Release. You can watch them at your own pace and follow along with the examples. You can also subscribe to the channel to get notified of new videos.
    • -
    • The Rhino newsletter: The Rhino newsletter is a monthly email that delivers the latest news and updates about Rhino 7 Service Release. You can sign up for free and get exclusive content, tips, tricks, and offers.
    • -
    - -

    By using these resources, you can learn more about how to use Rhino 7 Service Release effectively and efficiently.

    - -

    Conclusion

    - -

    Rhino 7 Service Release is a valuable resource for all Rhino 7 users. It provides regular updates that improve the quality and performance of Rhino 7. By downloading Rhino 7 Service Release, you can make sure that you are using the best version of Rhino 7 possible. To download Rhino 7 Service Release, just go to Help > Check for Updates or visit the Rhino website. Happy modeling!

    -

    How to get feedback and support for Rhino 7 Service Release?

    - -

    If you have any feedback or suggestions about Rhino 7 Service Release, you can share them with the Rhino development team and the Rhino community. You can also get help and support if you encounter any issues or difficulties with Rhino 7 Service Release. Some of the ways you can do this include:

    - -
      -
    • The Rhino feedback form: The Rhino feedback form is a simple and quick way to send your comments and ideas to the Rhino development team. You can access it from Help > Send Feedback in Rhino 7. You can also attach screenshots or files to illustrate your feedback.
    • -
    • The Rhino bug tracker: The Rhino bug tracker is a tool that allows you to report any bugs or errors that you find in Rhino 7 Service Release. You can access it from Help > Report a Bug in Rhino 7. You can also view the status of your reports and other reports from other users.
    • -
    • The Rhino support email: The Rhino support email is a direct way to contact the Rhino technical support team if you need any assistance or guidance with Rhino 7 Service Release. You can email them at tech@mcneel.com. You can also attach screenshots or files to explain your problem.
    • -
    - -

    By using these methods, you can communicate with the Rhino development team and the Rhino community, and get feedback and support for Rhino 7 Service Release.

    - -

    Conclusion

    - -

    Rhino 7 Service Release is a valuable resource for all Rhino 7 users. It provides regular updates that improve the quality and performance of Rhino 7. By downloading Rhino 7 Service Release, you can make sure that you are using the best version of Rhino 7 possible. To download Rhino 7 Service Release, just go to Help > Check for Updates or visit the Rhino website. Happy modeling!

    -

    Conclusion

    - -

    Rhino 7 Service Release is a must-have for all Rhino 7 users. It offers many new features and improvements that can help you create amazing 3D models and renderings. It also fixes any bugs or errors that may affect your workflow or results. By downloading Rhino 7 Service Release, you can ensure that you are using the most up-to-date and reliable version of Rhino 7. You can also learn more about how to use Rhino 7 Service Release features, get feedback and support from the Rhino development team and the Rhino community, and access various resources online that can enhance your 3D modeling and rendering skills. To download Rhino 7 Service Release, just go to Help > Check for Updates or visit the Rhino website. Happy modeling!

    679dcb208e
    -
    -
    \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Disney on Google Play Store How to Access New Releases Classics and More.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Disney on Google Play Store How to Access New Releases Classics and More.md deleted file mode 100644 index f00ecf099e8e1e8bac950d6d0b3b46c59e50bd60..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Disney on Google Play Store How to Access New Releases Classics and More.md +++ /dev/null @@ -1,101 +0,0 @@ -
    -

    How to Download Disney Plus on Google Play Store

    -

    Disney Plus is one of the best streaming services available, offering a huge library of content from Disney, Pixar, Marvel, Star Wars, National Geographic, and more. Whether you want to watch the latest releases, timeless classics, or exclusive originals, there's something for everyone on Disney Plus. In this article, we'll show you how to download and install Disney Plus on your Android device using the Google Play Store, and how to enjoy all the features and benefits of this amazing service.

    -

    download disney plus on google play store


    Download Zip ——— https://bltlly.com/2uOnGp



    -

    What is Disney Plus and why you should get it

    -

    Disney Plus is a streaming service that lets you access unlimited entertainment from some of the most popular brands in the world. You can watch movies, shows, documentaries, shorts, specials, and more, all in one place. Here are some of the reasons why you should get Disney Plus:

    -

    Disney Plus features and benefits

    -
      -
    • You can create up to seven user profiles per subscription, each with their own preferences and recommendations.
    • -
    • You can use a special kids profile option that restricts streaming to titles that are rated TV-7FV and G.
    • -
    • You can stream on up to four different devices at the same time.
    • -
    • You can watch in up to 4K resolution with high dynamic range (HDR), using the HDR10 or Dolby Vision formats.
    • -
    • You can enjoy IMAX Enhanced playback for select movies, which lets you see even more picture on screen.
    • -
    • You can access special features for many of the movies and shows, such as trailers, deleted scenes, gag reels, and behind-the-scenes featurettes.
    • -
    • You can download any movie or series and watch it offline via the Disney Plus app on up to 10 different smartphones and tablets.
    • -
    • You can host virtual movie nights with GroupWatch feature that lets you watch Disney Plus with up to six friends.
    • -
    -

    Disney Plus subscription plans and prices

    -

    Disney Plus offers two subscription plans for you to choose from:

    - - - - -
    PlanPriceFeatures
    Disney+ Basic$7.99/month or $79.99/yearDisney+ (With Ads)
    Disney+ Premium$10.99/month or $109.99/yearDisney+ (No Ads)
    -

    You can also get the Disney Bundle that includes Disney+, Hulu, and ESPN+ for $12.99/month (Basic With Ads) or $19.99/month (Premium No Ads). This gives you access to even more content, such as live sports, TV episodes

    How to download and install Disney Plus on your Android device

    -

    Now that you know what Disney Plus is and why you should get it, let's see how you can download and install it on your Android device. It's very easy and only takes a few minutes. Just follow these steps:

    -

    Step 1: Open the Google Play Store app on your device

    -

    The Google Play Store is the official app store for Android devices, where you can find and download millions of apps, games, books, movies, and more. To open it, just tap on the icon that looks like a colorful triangle on your home screen or app drawer.

    -

    Step 2: Search for Disney Plus or use this link

    -

    Once you open the Google Play Store, you can search for Disney Plus by typing its name in the search bar at the top of the screen. Alternatively, you can use this link to go directly to the Disney Plus app page on the Google Play Store.

    -

    How to download disney plus app on android devices
    -Disney plus google play store download link
    -Download disney plus apk from google play store
    -Disney plus streaming service google play store
    -Google play store disney plus app review
    -Disney plus download error on google play store
    -Download disney plus movies offline from google play store
    -Disney plus subscription price on google play store
    -Disney plus vs netflix comparison on google play store
    -Download disney plus shows on chromebook from google play store
    -Disney plus original series available on google play store
    -Disney plus parental controls on google play store app
    -Google play store gift card for disney plus subscription
    -Disney plus 4k content on google play store
    -Disney plus free trial offer on google play store
    -Disney plus compatible devices list on google play store
    -Disney plus customer service contact on google play store
    -Disney plus login issues on google play store app
    -Disney plus watch party feature on google play store
    -Disney plus star content rating on google play store
    -Disney plus marvel movies order on google play store
    -Disney plus star wars timeline on google play store
    -Disney plus pixar movies ranking on google play store
    -Disney plus national geographic documentaries on google play store
    -Disney plus best animated movies on google play store
    -Disney plus classics collection on google play store
    -Disney plus upcoming releases schedule on google play store
    -Disney plus account sharing limit on google play store app
    -Disney plus profile icons customization on google play store
    -Disney plus download quality settings on google play store app
    -Disney plus data usage calculator on google play store app
    -Disney plus offline viewing mode on google play store app
    -Disney plus recommendations algorithm on google play store app
    -Disney plus bundle with hulu and espn+ on google play store
    -Disney plus vpn access guide on google play store app
    -Disney plus refund policy on google play store purchases
    -Disney plus cancel subscription steps on google play store app
    -Disney plus troubleshooting tips on google play store app errors
    -Disney plus feedback and suggestions form on google play store app
    -Disney plus terms of service and privacy policy on google play store app

    -

    Step 3: Tap on Install and wait for the app to download

    -

    When you find the Disney Plus app, tap on the green Install button to start downloading it to your device. You will see a progress bar that shows you how much of the app has been downloaded. Depending on your internet speed and device storage, this may take a few seconds or minutes.

    -

    Step 4: Open the app and sign in with your Disney Plus account or create a new one

    -

    After the app has been downloaded and installed, you can open it by tapping on the Open button on the Google Play Store or by tapping on the Disney Plus icon on your home screen or app drawer. The first time you open the app, you will be asked to sign in with your Disney Plus account or create a new one if you don't have one yet. To sign in, enter your email address and password and tap on Continue. To create a new account, tap on Start Free Trial and follow the instructions to choose a subscription plan and enter your payment details.

    -

    How to enjoy Disney Plus on your Android device

    -

    Congratulations! You have successfully downloaded and installed Disney Plus on your Android device. Now you can enjoy all the amazing content that this service has to offer. Here are some of the things you can do with Disney Plus on your Android device:

    -

    Access thousands of movies and shows from Disney, Pixar, Marvel, Star Wars, and more

    -

    Disney Plus has a huge library of movies and shows from some of the most popular brands in the world. You can browse by category, genre, or franchise, or use the search function to find what you're looking for. You can also discover new and exclusive content that is only available on Disney Plus, such as The Mandalorian, WandaVision, Loki, Soul, Hamilton, and more.

    -

    Stream in up to 4K resolution with HDR and Dolby Atmos sound on compatible devices

    -

    If you have a compatible device and a fast internet connection, you can stream Disney Plus content in up to 4K resolution with high dynamic range (HDR) and Dolby Atmos sound. This means that you can enjoy stunning picture quality and immersive sound that will make you feel like you're in the middle of the action. To stream in 4K HDR with Dolby Atmos, make sure that your device supports these formats and that you have enabled them in your device settings.

    -

    Download any movie or series and watch it offline

    -

    If you want to watch Disney Plus content without an internet connection, you can download any movie or series and watch it offline via the Disney Plus app. This is great for when you're traveling, commuting, or just want to save some data. To download a movie or series, just tap on the download icon next to the title and choose the quality level you want. You can then access your downloads from the Downloads tab at the bottom of the screen.

    -

    Host virtual movie nights with GroupWatch feature

    -

    If you want to watch Disney Plus content with your friends or family who are not physically with you, you can use the GroupWatch feature that lets you watch together online. GroupWatch allows up to seven people to join a virtual viewing party where they can watch the same movie or show at the same time and react with emojis. To start a GroupWatch session, just tap on the GroupWatch icon next to the title and invite your friends or family via a link.

    -

    Use parental controls to create a safe environment for kids

    -

    If you have kids who use Disney Plus, you can use parental controls to create a safe environment for them. Parental controls allow you to restrict the streaming to titles that are rated TV-7FV and G. To use parental controls, you need to create a kids profile and set a PIN code that will prevent them from switching to other profiles. To create a kids profile, go to the Profile tab at the bottom of the screen and tap on Add Profile. Then, choose the Kids option and enter a name and an avatar for the profile. To set a PIN code, go to the Settings tab at the bottom of the screen and tap on Parental Controls. Then, enter a four-digit PIN code and confirm it.

    -

    Conclusion and FAQs

    -

    Disney Plus is a great streaming service for the whole family, offering a wide range of content from some of the most popular brands in the world. You can download and install Disney Plus on your Android device using the Google Play Store, and enjoy all the features and benefits that it has to offer. Here are some of the frequently asked questions about Disney Plus:

    -

    FAQs:

    -

    How much does Disney Plus cost?

    -

    Disney Plus costs $7.99/month or $79.99/year for the Basic plan, or $10.99/month or $109.99/year for the Premium plan. You can also get the Disney Bundle that includes Disney+, Hulu, and ESPN+ for $12.99/month (Basic With Ads) or $19.99/month (Premium No Ads).

    -

    What devices can I watch Disney Plus on?

    -

    You can watch Disney Plus on a variety of devices, such as smartphones, tablets, computers, smart TVs, streaming devices, game consoles, and more. For a full list of compatible devices, visit this link.

    -

    How can I update the Disney Plus app?

    -

    To update the Disney Plus app on your Android device, you need to go to the Google Play Store and check if there is a new version available. If there is, tap on Update and wait for the app to download and install. You can also enable automatic updates by going to the Settings tab at the bottom of the screen and tapping on App Settings. Then, toggle on Auto Update.

    -

    How can I disable autoplay on Disney Plus?

    -

    To disable autoplay on Disney Plus, you need to go to the Settings tab at the bottom of the screen and tap on App Settings. Then, toggle off Autoplay Next Episode and Autoplay Previews.

    -

    How can I unlock more content on Disney Plus?

    -

    To unlock more content on Disney Plus, you need to use a VPN service that lets you change your location and access different regions of Disney Plus. This way, you can watch movies and shows that are not available in your country. However, be aware that using a VPN may violate Disney Plus's terms of service and result in account suspension or termination.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/Battle Of The Year 2013 Torrent.md b/spaces/tioseFevbu/cartoon-converter/Battle Of The Year 2013 Torrent.md deleted file mode 100644 index 445f09c43d3650035e96d5662479ca43d0eeb534..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/Battle Of The Year 2013 Torrent.md +++ /dev/null @@ -1,55 +0,0 @@ -## Battle Of The Year 2013 Torrent - - - -**Battle Of The Year 2013 Torrent === [https://vercupalo.blogspot.com/?d=2tvYpD](https://vercupalo.blogspot.com/?d=2tvYpD)** - - - -# Battle of the Year 2013 Torrent: How to Download the Best Dance Movie Ever - - - -If you are a fan of dance movies, you probably have heard of Battle of the Year, the 2013 film that showcases some of the most talented dancers from around the world competing in an international tournament. The movie features amazing choreography, stunning visuals, and a catchy soundtrack that will make you want to get up and dance. - - - -But what if you missed the chance to watch Battle of the Year in theaters or on streaming platforms? Don't worry, there is still a way to enjoy this awesome movie: by downloading it from a torrent site. Torrents are files that contain data that can be shared and downloaded by users through a peer-to-peer network. By using a torrent client, such as BitTorrent or uTorrent, you can download Battle of the Year 2013 torrent and watch it on your computer or any device that supports video playback. - - - -However, before you start downloading Battle of the Year 2013 torrent, there are some things you need to know. First, you need to make sure that the torrent file you choose is safe and reliable. There are many fake or malicious torrents out there that can harm your device or expose your personal information. To avoid this, you should always check the comments and ratings of other users who have downloaded the same torrent. You should also use a VPN (virtual private network) to hide your IP address and encrypt your traffic, so that your online activity cannot be traced or monitored by anyone. - - - -Second, you need to respect the copyright laws of your country and the rights of the creators of Battle of the Year. Downloading and sharing copyrighted content without permission is illegal and unethical. You should only download Battle of the Year 2013 torrent if you own a legitimate copy of the movie or if you have paid for a subscription to a streaming service that offers it. You should also delete the torrent file after you have watched the movie and not share it with others. - - - -By following these tips, you can download Battle of the Year 2013 torrent safely and legally and enjoy one of the best dance movies ever made. You will be amazed by the skills and creativity of the dancers and inspired by their passion and dedication. Battle of the Year is not just a movie, it's a celebration of dance culture and a tribute to its history and diversity. - - - -So, how can you find Battle of the Year 2013 torrent? There are many torrent sites that offer this movie, but not all of them are trustworthy or updated. Some of the most popular and reliable torrent sites are The Pirate Bay, RARBG, 1337x, and YTS. These sites have a large collection of movies, TV shows, music, games, and other content that you can download for free. They also have user-friendly interfaces and advanced search features that allow you to find what you are looking for easily. - - - -To download Battle of the Year 2013 torrent from any of these sites, you need to follow these steps: - - - -1. Go to the torrent site of your choice and type "Battle of the Year 2013" in the search bar. - -2. Choose the torrent file that has the best quality and the most seeders. Seeders are users who have the complete file and are sharing it with others. The more seeders a torrent has, the faster it will download. - -3. Click on the download button or the magnet link to open the torrent file with your torrent client. - -4. Select the destination folder where you want to save the movie and start the download. - -5. Wait until the download is complete and enjoy watching Battle of the Year. - - - -Downloading Battle of the Year 2013 torrent is a great way to experience this amazing dance movie. You will be able to watch it anytime and anywhere you want, without any interruptions or ads. You will also be able to share it with your friends and family who love dance as much as you do. However, remember to be careful and responsible when using torrents and respect the law and the artists who made this movie possible. - - 1b8d091108 \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Flash Memory Toolkit V1.20 Pro.rar.md b/spaces/tioseFevbu/cartoon-converter/scripts/Flash Memory Toolkit V1.20 Pro.rar.md deleted file mode 100644 index 6756f6c1ee7b4882fcb3f93f077109b9a81fbf23..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Flash Memory Toolkit V1.20 Pro.rar.md +++ /dev/null @@ -1,147 +0,0 @@ - -

    Flash Memory Toolkit V1.20 Pro.rar: A Comprehensive Review

    -

    If you use flash memory devices such as USB drives, memory cards, or SSDs, you know how important it is to keep them in good condition. Flash memory devices can store large amounts of data that you can access quickly and easily. However, they can also suffer from various problems that can affect their performance, reliability, and security.

    -

    That's why you need a tool like Flash Memory Toolkit, a software utility that can help you manage and repair your flash memory devices. Flash Memory Toolkit combines a wide range of functions that allow you to get the most out of your flash memory devices. You can measure their performance, scan for errors, securely delete all data, recover files, create backups, and more.

    -

    Flash Memory Toolkit V1.20 Pro.rar


    DOWNLOAD »»» https://urlcod.com/2uHvym



    -

    In this article, we will review Flash Memory Toolkit V1.20 Pro.rar in detail. We will explain what flash memory is, why you need Flash Memory Toolkit, how to use it, what are its pros and cons, and answer some frequently asked questions about it. By the end of this article, you will have a clear idea of whether Flash Memory Toolkit is the right tool for you.

    -

    What is Flash Memory?

    -

    Flash memory is a type of nonvolatile memory that can store data even when there is no power supply. It works by using electric fields to change the state of tiny transistors on a chip. Each transistor can store one bit of data (0 or 1) by being either on or off.

    -

    There are two main types of flash memory: NOR and NAND. They differ in their architecture and characteristics. NOR flash has a simple structure that allows random access to any location on the chip. It is fast and reliable but expensive and has low density. It is mainly used for storing code or firmware in devices such as computers or phones.

    -

    NAND flash has a complex structure that allows sequential access to blocks of data on the chip. It is cheaper and has higher density but slower and less reliable than NOR flash. It is mainly used for storing data in devices such as USB drives or memory cards.

    -

    Flash memory has several advantages over other types of storage media. It is small, light-weight, durable, shock-resistant, energy-efficient, and silent. It also has faster read and write speeds than hard disk drives or optical discs.

    Why Do You Need Flash Memory Toolkit? -

    Common Problems with Flash Memory Devices

    -

    Despite its advantages, flash memory is not perfect. It has some limitations and drawbacks that can cause problems for its users. Some of the common problems with flash memory devices are:

    -
      -
    • Data loss: Flash memory devices can lose data due to physical damage, power failure, virus infection, accidental deletion, formatting, or corruption. Data loss can be devastating if you don't have a backup of your important files.
    • -
    • Data corruption: Flash memory devices can suffer from data corruption due to bad sectors, wear and tear, improper removal, or interference. Data corruption can make your files unreadable, unusable, or incomplete.
    • -
    • Data errors: Flash memory devices can encounter data errors due to faulty hardware, software bugs, or human errors. Data errors can affect the functionality, performance, or security of your devices.
    • -
    • Performance degradation: Flash memory devices can experience performance degradation over time due to fragmentation, overuse, or aging. Performance degradation can slow down your devices and reduce their lifespan.
    • -
    -

    These problems can be frustrating and costly to deal with. You may lose your valuable data, waste your time and money, or compromise your privacy and security. That's why you need a tool like Flash Memory Toolkit to prevent and fix these problems.

    -

    How Flash Memory Toolkit Can Help You

    -

    Flash Memory Toolkit is a software utility that can help you manage and repair your flash memory devices. It offers a wide range of functions that can help you with various tasks related to flash memory. Some of the functions are:

    -
      -
    • File Recovery: This function allows you to recover deleted or lost files from your flash memory devices. You can scan your devices for recoverable files and preview them before restoring them. You can also filter the files by name, size, date, or type.
    • -
    • Backup: This function allows you to create backups of your flash memory devices. You can copy the entire device or selected partitions to another location. You can also compress the backup files to save space.
    • -
    • Erase: This function allows you to securely delete all data from your flash memory devices. You can choose from different methods of erasure, such as quick erase, full erase, or secure erase. Secure erase overwrites the data multiple times with random patterns to prevent recovery.
    • -
    • Scan: This function allows you to scan your flash memory devices for errors or bad sectors. You can choose from different modes of scanning, such as read-only scan, write scan, or verify scan. You can also view the results in a graphical or numerical format.
    • -
    • Benchmark: This function allows you to measure the performance of your flash memory devices. You can test the read and write speeds of your devices and compare them with other devices. You can also view the results in a graphical or tabular format.
    • -
    • Info: This function allows you to view detailed information about your flash memory devices. You can see the manufacturer, model, serial number, capacity, file system, partition layout, and more.
    • -
    -

    With these functions, you can easily manage and repair your flash memory devices. You can recover your lost data, create backups of your important files, securely delete your sensitive data, scan for errors and bad sectors, measure your performance and speed, and view detailed information about your devices.

    -

    -

    How to Use Flash Memory Toolkit?

    -

    Installation and Requirements

    -

    To use Flash Memory Toolkit, you need to download and install it on your computer. You can download Flash Memory Toolkit V1.20 Pro.rar from its official website or from other sources. The file size is about 1 MB and the installation process is simple and fast.

    -

    To install Flash Memory Toolkit, you need to extract the rar file and run the setup.exe file. Follow the instructions on the screen and choose the destination folder and language. After the installation is complete, you can launch Flash Memory Toolkit from the start menu or desktop shortcut.

    -

    To use Flash Memory Toolkit, you need to have a Windows operating system (XP/Vista/7/8/10) and a flash memory device (USB drive/memory card/SSD) that is supported by the software. The software supports most types of flash memory devices from various manufacturers. However, some devices may not be compatible or may require special drivers or settings.

    -

    User Interface and Options

    -

    The user interface of Flash Memory Toolkit is simple and intuitive. The main window consists of three parts

    The user interface of Flash Memory Toolkit is simple and intuitive. The main window consists of three parts:

    -
      -
    • Device selection: This is where you can select the flash memory device that you want to work with. You can see the device name, drive letter, capacity, and file system. You can also refresh the list or eject the device.
    • -
    • Function selection: This is where you can choose the function that you want to use. You can see the icons and names of the six functions: File Recovery, Backup, Erase, Scan, Benchmark, and Info. You can also access the help file or the settings menu.
    • -
    • Function window: This is where you can see the details and options of the selected function. You can also start, stop, or pause the operation, and view the progress and results.
    • -
    -

    The user interface of Flash Memory Toolkit is designed to be user-friendly and easy to navigate. You can switch between different functions and devices with a few clicks. You can also customize some of the settings, such as language, theme, sound, or log file.

    -

    Using the Functions

    -

    To use any of the functions of Flash Memory Toolkit, you need to follow these steps:

    -
      -
    1. Select the flash memory device that you want to work with from the device selection area.
    2. -
    3. Select the function that you want to use from the function selection area.
    4. -
    5. Configure the options and parameters of the selected function in the function window.
    6. -
    7. Click on the start button to begin the operation.
    8. -
    9. Wait for the operation to finish and view the results in the function window.
    10. -
    -

    Here is a brief guide on how to use each function of Flash Memory Toolkit:

    -

    File Recovery

    -

    This function allows you to recover deleted or lost files from your flash memory devices. To use this function, you need to:

    -
      -
    1. Select the flash memory device that contains the files that you want to recover.
    2. -
    3. Select the File Recovery function from the function selection area.
    4. -
    5. Select the scan mode from the drop-down menu. You can choose from quick scan or full scan. Quick scan is faster but may not find all files. Full scan is slower but more thorough.
    6. -
    7. Click on the start button to begin scanning your device for recoverable files.
    8. -
    9. Wait for the scan to finish and view the list of files that were found in the function window. You can sort, filter, or preview the files by name, size, date, or type.
    10. -
    11. Select the files that you want to recover by checking their boxes.
    12. -
    13. Click on the recover button to restore the selected files. You will be asked to choose a destination folder where you want to save them.
    14. -
    -

    Backup

    -

    This function allows you to create backups of your flash memory devices. To use this function, you need to:

    -
      -
    1. Select the flash memory device that you want to backup.
    2. -
    3. Select the Backup function from the function selection area.
    4. -
    5. Select whether you want to backup the entire device or selected partitions from the drop-down menu. You can also choose whether you want to compress the backup files or not.
    6. -
    7. Click on the start button to begin backing up your device.
    8. -
    9. Wait for the backup to finish and view the status and details of the backup in the function window. You can also see the location and size of the backup files.
    10. -
    11. To restore a backup, click on the restore button and select the backup file that you want to restore. You will be asked to choose a destination device where you want to restore it.
    12. -
    -

    Erase

    -

    This function allows you to securely delete all data from your flash memory devices. To use this function, you need to:

    -
      -
    1. Select the flash memory device that you want to erase.
    2. -
    3. Select the Erase function from the function selection area.
    4. -
    5. Select the erase method from the drop-down menu. You can choose from quick erase, full erase, or secure erase. Quick erase is fast but may not delete all data. Full erase is slow but more thorough. Secure erase is slowest but most secure.
    6. -
    7. Click on the start button to begin erasing your device.
    8. -
    9. Wait for the erase to finish and view the status and details of the erase in the function window. You can also see the time and speed of the erase.
    10. -
    -

    Scan

    -

    This function allows you to scan your flash memory devices for errors or bad sectors. To use this function, you need to:

    -
      -
    1. Select the flash memory device that you want to scan.
    2. -
    3. Select the Scan function from the function selection area.
    4. -
    5. Select the scan mode from the drop-down menu. You can choose from read-only scan, write scan, or verify scan. Read-only scan is fast but may not detect all errors. Write scan is slow but more accurate. Verify scan is slowest but most reliable.
    6. -
    7. Click on the start button to begin scanning your device.
    8. -
    9. Wait for the scan to finish and view the results in the function window. You can see the number and percentage of errors or bad sectors, and their locations on a graphical or numerical display.
    10. -
    -

    Benchmark

    -

    This function allows you to measure the performance of your flash memory devices. To use this function, you need to:

    -
      -
    1. Select the flash memory device that you want to test.
    2. -
    3. Select the Benchmark function from the function selection area.
    4. -
    5. Select whether you want to test the read speed or write speed of your device from the drop-down menu. You can also select the file size and the number of runs for the test.
    6. -
    7. Click on the start button to begin testing your device.
    8. -
    9. Wait for the test to finish and view the results in the function window. You can see the average, minimum, and maximum speeds of your device, and compare them with other devices. You can also see the results in a graphical or tabular format.
    10. -
    -

    Info

    -

    This function allows you to view detailed information about your flash memory devices. To use this function, you need to:

    -
      -
    1. Select the flash memory device that you want to view.
    2. -
    3. Select the Info function from the function selection area.
    4. -
    5. View the information in the function window. You can see the manufacturer, model, serial number, capacity, file system, partition layout, and more. You can also see the device temperature and health status.
    6. -
    -

    Pros and Cons of Flash Memory Toolkit

    -

    Flash Memory Toolkit is a powerful and versatile tool that can help you manage and repair your flash memory devices. However, like any other tool, it has its pros and cons. Here are some of them:

    -

    Pros

    -
      -
    • Easy to use: Flash Memory Toolkit has a simple and intuitive user interface that makes it easy to use. You can switch between different functions and devices with a few clicks. You can also customize some of the settings to suit your preferences.
    • -
    • Comprehensive: Flash Memory Toolkit offers a wide range of functions that cover most of the tasks related to flash memory. You can recover files, create backups, erase data, scan for errors, measure performance, and view information with one tool.
    • -
    • Effective: Flash Memory Toolkit delivers reliable and accurate results for each function. You can recover your lost data, backup your important files, securely delete your sensitive data, scan for errors and bad sectors, measure your performance and speed, and view detailed information about your devices.
    • -
    • Affordable: Flash Memory Toolkit is a free tool that you can download and use without any limitations or restrictions. You don't need to pay anything to use all of its functions and features.
    • -
    -

    Cons

    -
      -
    • Limited compatibility: Flash Memory Toolkit may not work with some flash memory devices that are not supported by the software. Some devices may require special drivers or settings to be recognized by the software.
    • -
    • Potential risks: Flash Memory Toolkit may cause some damage or data loss to your flash memory devices if you use it incorrectly or carelessly. For example, if you erase or write data to a device that is already corrupted or faulty, you may make it worse. You should always backup your data before using any function of Flash Memory Toolkit.
    • -
    • No updates: Flash Memory Toolkit has not been updated since 2010. It may not be compatible with newer versions of Windows or newer types of flash memory devices. It may also have some bugs or errors that have not been fixed.
    • -
    -

    Conclusion and FAQs

    -

    In conclusion, Flash Memory Toolkit is a useful tool that can help you manage and repair your flash memory devices. It offers a wide range of functions that allow you to recover files, create backups, erase data, scan for errors, measure performance, and view information. It is easy to use, comprehensive, effective, and affordable. However, it also has some limitations and drawbacks that you should be aware of. It may not work with some devices that are not supported by the software. It may also cause some damage or data loss if you use it incorrectly or carelessly. It has not been updated since 2010 and may not be compatible with newer systems or devices.

    -

    If you are looking for a tool that can help you manage and repair your flash memory devices, Flash Memory Toolkit may be a good option for you. However, you should always backup your data before using it and follow the instructions carefully. You should also check for other alternatives that may offer more features or updates.

    -

    Here are some frequently asked questions about Flash Memory Toolkit:

    -

    Q: Where can I download Flash Memory Toolkit?

    -

    A: You can download Flash Memory Toolkit from its official website: http://www.flashmemorytoolkit.com/. You can also find it on other websites that offer software downloads.

    -

    Q: How do I uninstall Flash Memory Toolkit?

    -

    A: To uninstall Flash Memory Toolkit, you need to go to the Control Panel > Programs > Uninstall a program > Flash Memory Toolkit > Uninstall. You can also use the uninstaller that comes with the software. You can find it in the installation folder or the start menu.

    -

    Q: Is Flash Memory Toolkit safe to use?

    -

    A: Flash Memory Toolkit is generally safe to use, as long as you download it from a trusted source and use it correctly. However, you should always backup your data before using any function of Flash Memory Toolkit, as it may cause some damage or data loss to your flash memory devices. You should also scan your device for viruses or malware before and after using Flash Memory Toolkit, as some malicious programs may try to infect or corrupt your device.

    -

    Q: How do I update Flash Memory Toolkit?

    -

    A: Flash Memory Toolkit has not been updated since 2010, and there is no official information on whether it will be updated in the future. You can check the official website for any news or updates, but you may not find any. You can also look for other sources that may offer newer versions or patches of Flash Memory Toolkit, but you should be careful about their reliability and security.

    -

    Q: What are some alternatives to Flash Memory Toolkit?

    -

    A: There are many other tools that can help you manage and repair your flash memory devices. Some of them are:

    -
      -
    • USB Disk Storage Format Tool: This is a free tool that can format, repair, and erase your USB flash drives. It can also create bootable USB drives and remove write protection.
    • -
    • Recuva: This is a free tool that can recover deleted or lost files from your flash memory devices. It can also securely delete files and scan for errors.
    • -
    • CrystalDiskMark: This is a free tool that can measure the performance of your flash memory devices. It can test the read and write speeds of your devices and compare them with other devices.
    • -
    • USBDeview: This is a free tool that can view detailed information about your flash memory devices. It can show the device name, description, serial number, vendor ID, product ID, and more. It can also enable or disable devices, uninstall drivers, or eject devices.
    • -

    b2dd77e56b
    -
    -
    \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Mcdonalds Balanced Scorecard.pdf.md b/spaces/tioseFevbu/cartoon-converter/scripts/Mcdonalds Balanced Scorecard.pdf.md deleted file mode 100644 index 151bd22fa7e95b473be73e3fc39c750e93611ed3..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Mcdonalds Balanced Scorecard.pdf.md +++ /dev/null @@ -1,103 +0,0 @@ -
    -

    How McDonald's Uses a Balanced Scorecard Approach to Strategize its Fast Food Business

    - -

    McDonald's is one of the largest and most successful fast food chains in the world. It has been able to maintain its competitive edge by constantly innovating its business strategies and aligning them with its core values. But how does McDonald's measure and manage its performance across different aspects of its operations? One of the tools that McDonald's uses is a balanced scorecard approach.

    - -

    A balanced scorecard is a strategic management system that helps organizations translate their vision and mission into specific objectives, measures, targets, and initiatives. It provides a comprehensive view of the organization's performance by balancing four perspectives: financial, customer, internal process, and learning and growth. By using a balanced scorecard, McDonald's can monitor and improve its performance in each of these areas and ensure that they are aligned with its overall goals.

    -

    Mcdonald's Balanced Scorecard.pdf


    DOWNLOAD ––– https://urlcod.com/2uHveZ



    - -

    The Financial Perspective

    - -

    The financial perspective focuses on how the organization creates value for its shareholders. It measures the financial results of the organization's strategy, such as revenue, profit, return on investment, cash flow, and market share. Some of the financial objectives that McDonald's has are to increase sales growth, improve profitability, optimize capital structure, and enhance shareholder value.

    - -

    To achieve these objectives, McDonald's uses various measures and targets, such as sales per customer, operating margin, return on assets, earnings per share, and total shareholder return. Some of the initiatives that McDonald's implements to improve its financial performance are to expand its global presence, diversify its menu offerings, optimize its pricing strategy, reduce costs and waste, and invest in technology and innovation.

    - -

    The Customer Perspective

    - -

    The customer perspective focuses on how the organization meets the needs and expectations of its customers. It measures the customer outcomes of the organization's strategy, such as customer satisfaction, loyalty, retention, acquisition, and market share. Some of the customer objectives that McDonald's has are to deliver quality products and services, provide value for money, enhance customer experience, and build a strong brand image.

    - -

    To achieve these objectives, McDonald's uses various measures and targets, such as customer satisfaction index, net promoter score, customer retention rate, customer acquisition rate, and brand awareness. Some of the initiatives that McDonald's implements to improve its customer performance are to ensure food safety and quality standards, offer personalized and convenient services, leverage digital platforms and social media, create loyalty programs and promotions, and engage in corporate social responsibility.

    - -

    The Internal Process Perspective

    - -

    The internal process perspective focuses on how the organization executes its strategy through its core business processes. It measures the efficiency and effectiveness of the organization's operations, such as product development, supply chain management, service delivery, innovation, and risk management. Some of the internal process objectives that McDonald's has are to optimize its operational processes, enhance its supply chain performance, -innovate its product portfolio, -and manage its risks.

    - -

    To achieve these objectives, -McDonald's uses various measures -and targets, -such as -product development cycle time, -supply chain reliability, -service speed -and accuracy, -innovation rate, -and risk exposure. -Some of -the initiatives -that McDonald's implements -to improve -its internal process performance -are -to adopt lean -and agile methodologies, -strengthen -its supplier relationships, -implement quality -and service standards, -foster a culture -of innovation, -and develop a risk management framework.

    -

    - -

    The Learning -and Growth Perspective

    - -

    The learning -and growth perspective focuses -on how -the organization develops -its intangible assets, -such as human capital, -information capital, -and organizational capital. -It measures -the capabilities -and competencies -of the organization's people, -systems, -and culture, -such as employee engagement, -knowledge management, -technology adoption, -and organizational alignment. -Some of -the learning -and growth objectives -that McDonald's has are to attract -and retain talent, -enhance employee skills -and performance, -leverage data -and analytics, -and foster a collaborative -and adaptive culture.

    - -

    To achieve these objectives, -McDonald's uses various measures -and targets, -such as employee turnover rate, -employee satisfaction index, -employee training hours, -data quality index, -and organizational climate survey. -Some of -the initiatives -that McDonald's implements -to improve -its learning -and growth performance -are

    7196e7f11a
    -
    -
    \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/cli/req_command.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/cli/req_command.py deleted file mode 100644 index 1044809f04082fe8ba43e57946638870e494dd40..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/cli/req_command.py +++ /dev/null @@ -1,502 +0,0 @@ -"""Contains the Command base classes that depend on PipSession. - -The classes in this module are in a separate module so the commands not -needing download / PackageFinder capability don't unnecessarily import the -PackageFinder machinery and all its vendored dependencies, etc. -""" - -import logging -import os -import sys -from functools import partial -from optparse import Values -from typing import TYPE_CHECKING, Any, List, Optional, Tuple - -from pip._internal.cache import WheelCache -from pip._internal.cli import cmdoptions -from pip._internal.cli.base_command import Command -from pip._internal.cli.command_context import CommandContextMixIn -from pip._internal.exceptions import CommandError, PreviousBuildDirError -from pip._internal.index.collector import LinkCollector -from pip._internal.index.package_finder import PackageFinder -from pip._internal.models.selection_prefs import SelectionPreferences -from pip._internal.models.target_python import TargetPython -from pip._internal.network.session import PipSession -from pip._internal.operations.build.build_tracker import BuildTracker -from pip._internal.operations.prepare import RequirementPreparer -from pip._internal.req.constructors import ( - install_req_from_editable, - install_req_from_line, - install_req_from_parsed_requirement, - install_req_from_req_string, -) -from pip._internal.req.req_file import parse_requirements -from pip._internal.req.req_install import InstallRequirement -from pip._internal.resolution.base import BaseResolver -from pip._internal.self_outdated_check import pip_self_version_check -from pip._internal.utils.temp_dir import ( - TempDirectory, - TempDirectoryTypeRegistry, - tempdir_kinds, -) -from pip._internal.utils.virtualenv import running_under_virtualenv - -if TYPE_CHECKING: - from ssl import SSLContext - -logger = logging.getLogger(__name__) - - -def _create_truststore_ssl_context() -> Optional["SSLContext"]: - if sys.version_info < (3, 10): - raise CommandError("The truststore feature is only available for Python 3.10+") - - try: - import ssl - except ImportError: - logger.warning("Disabling truststore since ssl support is missing") - return None - - try: - import truststore - except ImportError: - raise CommandError( - "To use the truststore feature, 'truststore' must be installed into " - "pip's current environment." - ) - - return truststore.SSLContext(ssl.PROTOCOL_TLS_CLIENT) - - -class SessionCommandMixin(CommandContextMixIn): - - """ - A class mixin for command classes needing _build_session(). - """ - - def __init__(self) -> None: - super().__init__() - self._session: Optional[PipSession] = None - - @classmethod - def _get_index_urls(cls, options: Values) -> Optional[List[str]]: - """Return a list of index urls from user-provided options.""" - index_urls = [] - if not getattr(options, "no_index", False): - url = getattr(options, "index_url", None) - if url: - index_urls.append(url) - urls = getattr(options, "extra_index_urls", None) - if urls: - index_urls.extend(urls) - # Return None rather than an empty list - return index_urls or None - - def get_default_session(self, options: Values) -> PipSession: - """Get a default-managed session.""" - if self._session is None: - self._session = self.enter_context(self._build_session(options)) - # there's no type annotation on requests.Session, so it's - # automatically ContextManager[Any] and self._session becomes Any, - # then https://github.com/python/mypy/issues/7696 kicks in - assert self._session is not None - return self._session - - def _build_session( - self, - options: Values, - retries: Optional[int] = None, - timeout: Optional[int] = None, - fallback_to_certifi: bool = False, - ) -> PipSession: - cache_dir = options.cache_dir - assert not cache_dir or os.path.isabs(cache_dir) - - if "truststore" in options.features_enabled: - try: - ssl_context = _create_truststore_ssl_context() - except Exception: - if not fallback_to_certifi: - raise - ssl_context = None - else: - ssl_context = None - - session = PipSession( - cache=os.path.join(cache_dir, "http") if cache_dir else None, - retries=retries if retries is not None else options.retries, - trusted_hosts=options.trusted_hosts, - index_urls=self._get_index_urls(options), - ssl_context=ssl_context, - ) - - # Handle custom ca-bundles from the user - if options.cert: - session.verify = options.cert - - # Handle SSL client certificate - if options.client_cert: - session.cert = options.client_cert - - # Handle timeouts - if options.timeout or timeout: - session.timeout = timeout if timeout is not None else options.timeout - - # Handle configured proxies - if options.proxy: - session.proxies = { - "http": options.proxy, - "https": options.proxy, - } - - # Determine if we can prompt the user for authentication or not - session.auth.prompting = not options.no_input - - return session - - -class IndexGroupCommand(Command, SessionCommandMixin): - - """ - Abstract base class for commands with the index_group options. - - This also corresponds to the commands that permit the pip version check. - """ - - def handle_pip_version_check(self, options: Values) -> None: - """ - Do the pip version check if not disabled. - - This overrides the default behavior of not doing the check. - """ - # Make sure the index_group options are present. - assert hasattr(options, "no_index") - - if options.disable_pip_version_check or options.no_index: - return - - # Otherwise, check if we're using the latest version of pip available. - session = self._build_session( - options, - retries=0, - timeout=min(5, options.timeout), - # This is set to ensure the function does not fail when truststore is - # specified in use-feature but cannot be loaded. This usually raises a - # CommandError and shows a nice user-facing error, but this function is not - # called in that try-except block. - fallback_to_certifi=True, - ) - with session: - pip_self_version_check(session, options) - - -KEEPABLE_TEMPDIR_TYPES = [ - tempdir_kinds.BUILD_ENV, - tempdir_kinds.EPHEM_WHEEL_CACHE, - tempdir_kinds.REQ_BUILD, -] - - -def warn_if_run_as_root() -> None: - """Output a warning for sudo users on Unix. - - In a virtual environment, sudo pip still writes to virtualenv. - On Windows, users may run pip as Administrator without issues. - This warning only applies to Unix root users outside of virtualenv. - """ - if running_under_virtualenv(): - return - if not hasattr(os, "getuid"): - return - # On Windows, there are no "system managed" Python packages. Installing as - # Administrator via pip is the correct way of updating system environments. - # - # We choose sys.platform over utils.compat.WINDOWS here to enable Mypy platform - # checks: https://mypy.readthedocs.io/en/stable/common_issues.html - if sys.platform == "win32" or sys.platform == "cygwin": - return - - if os.getuid() != 0: - return - - logger.warning( - "Running pip as the 'root' user can result in broken permissions and " - "conflicting behaviour with the system package manager. " - "It is recommended to use a virtual environment instead: " - "https://pip.pypa.io/warnings/venv" - ) - - -def with_cleanup(func: Any) -> Any: - """Decorator for common logic related to managing temporary - directories. - """ - - def configure_tempdir_registry(registry: TempDirectoryTypeRegistry) -> None: - for t in KEEPABLE_TEMPDIR_TYPES: - registry.set_delete(t, False) - - def wrapper( - self: RequirementCommand, options: Values, args: List[Any] - ) -> Optional[int]: - assert self.tempdir_registry is not None - if options.no_clean: - configure_tempdir_registry(self.tempdir_registry) - - try: - return func(self, options, args) - except PreviousBuildDirError: - # This kind of conflict can occur when the user passes an explicit - # build directory with a pre-existing folder. In that case we do - # not want to accidentally remove it. - configure_tempdir_registry(self.tempdir_registry) - raise - - return wrapper - - -class RequirementCommand(IndexGroupCommand): - def __init__(self, *args: Any, **kw: Any) -> None: - super().__init__(*args, **kw) - - self.cmd_opts.add_option(cmdoptions.no_clean()) - - @staticmethod - def determine_resolver_variant(options: Values) -> str: - """Determines which resolver should be used, based on the given options.""" - if "legacy-resolver" in options.deprecated_features_enabled: - return "legacy" - - return "2020-resolver" - - @classmethod - def make_requirement_preparer( - cls, - temp_build_dir: TempDirectory, - options: Values, - build_tracker: BuildTracker, - session: PipSession, - finder: PackageFinder, - use_user_site: bool, - download_dir: Optional[str] = None, - verbosity: int = 0, - ) -> RequirementPreparer: - """ - Create a RequirementPreparer instance for the given parameters. - """ - temp_build_dir_path = temp_build_dir.path - assert temp_build_dir_path is not None - - resolver_variant = cls.determine_resolver_variant(options) - if resolver_variant == "2020-resolver": - lazy_wheel = "fast-deps" in options.features_enabled - if lazy_wheel: - logger.warning( - "pip is using lazily downloaded wheels using HTTP " - "range requests to obtain dependency information. " - "This experimental feature is enabled through " - "--use-feature=fast-deps and it is not ready for " - "production." - ) - else: - lazy_wheel = False - if "fast-deps" in options.features_enabled: - logger.warning( - "fast-deps has no effect when used with the legacy resolver." - ) - - return RequirementPreparer( - build_dir=temp_build_dir_path, - src_dir=options.src_dir, - download_dir=download_dir, - build_isolation=options.build_isolation, - check_build_deps=options.check_build_deps, - build_tracker=build_tracker, - session=session, - progress_bar=options.progress_bar, - finder=finder, - require_hashes=options.require_hashes, - use_user_site=use_user_site, - lazy_wheel=lazy_wheel, - verbosity=verbosity, - ) - - @classmethod - def make_resolver( - cls, - preparer: RequirementPreparer, - finder: PackageFinder, - options: Values, - wheel_cache: Optional[WheelCache] = None, - use_user_site: bool = False, - ignore_installed: bool = True, - ignore_requires_python: bool = False, - force_reinstall: bool = False, - upgrade_strategy: str = "to-satisfy-only", - use_pep517: Optional[bool] = None, - py_version_info: Optional[Tuple[int, ...]] = None, - ) -> BaseResolver: - """ - Create a Resolver instance for the given parameters. - """ - make_install_req = partial( - install_req_from_req_string, - isolated=options.isolated_mode, - use_pep517=use_pep517, - config_settings=getattr(options, "config_settings", None), - ) - resolver_variant = cls.determine_resolver_variant(options) - # The long import name and duplicated invocation is needed to convince - # Mypy into correctly typechecking. Otherwise it would complain the - # "Resolver" class being redefined. - if resolver_variant == "2020-resolver": - import pip._internal.resolution.resolvelib.resolver - - return pip._internal.resolution.resolvelib.resolver.Resolver( - preparer=preparer, - finder=finder, - wheel_cache=wheel_cache, - make_install_req=make_install_req, - use_user_site=use_user_site, - ignore_dependencies=options.ignore_dependencies, - ignore_installed=ignore_installed, - ignore_requires_python=ignore_requires_python, - force_reinstall=force_reinstall, - upgrade_strategy=upgrade_strategy, - py_version_info=py_version_info, - ) - import pip._internal.resolution.legacy.resolver - - return pip._internal.resolution.legacy.resolver.Resolver( - preparer=preparer, - finder=finder, - wheel_cache=wheel_cache, - make_install_req=make_install_req, - use_user_site=use_user_site, - ignore_dependencies=options.ignore_dependencies, - ignore_installed=ignore_installed, - ignore_requires_python=ignore_requires_python, - force_reinstall=force_reinstall, - upgrade_strategy=upgrade_strategy, - py_version_info=py_version_info, - ) - - def get_requirements( - self, - args: List[str], - options: Values, - finder: PackageFinder, - session: PipSession, - ) -> List[InstallRequirement]: - """ - Parse command-line arguments into the corresponding requirements. - """ - requirements: List[InstallRequirement] = [] - for filename in options.constraints: - for parsed_req in parse_requirements( - filename, - constraint=True, - finder=finder, - options=options, - session=session, - ): - req_to_add = install_req_from_parsed_requirement( - parsed_req, - isolated=options.isolated_mode, - user_supplied=False, - ) - requirements.append(req_to_add) - - for req in args: - req_to_add = install_req_from_line( - req, - None, - isolated=options.isolated_mode, - use_pep517=options.use_pep517, - user_supplied=True, - config_settings=getattr(options, "config_settings", None), - ) - requirements.append(req_to_add) - - for req in options.editables: - req_to_add = install_req_from_editable( - req, - user_supplied=True, - isolated=options.isolated_mode, - use_pep517=options.use_pep517, - config_settings=getattr(options, "config_settings", None), - ) - requirements.append(req_to_add) - - # NOTE: options.require_hashes may be set if --require-hashes is True - for filename in options.requirements: - for parsed_req in parse_requirements( - filename, finder=finder, options=options, session=session - ): - req_to_add = install_req_from_parsed_requirement( - parsed_req, - isolated=options.isolated_mode, - use_pep517=options.use_pep517, - user_supplied=True, - ) - requirements.append(req_to_add) - - # If any requirement has hash options, enable hash checking. - if any(req.has_hash_options for req in requirements): - options.require_hashes = True - - if not (args or options.editables or options.requirements): - opts = {"name": self.name} - if options.find_links: - raise CommandError( - "You must give at least one requirement to {name} " - '(maybe you meant "pip {name} {links}"?)'.format( - **dict(opts, links=" ".join(options.find_links)) - ) - ) - else: - raise CommandError( - "You must give at least one requirement to {name} " - '(see "pip help {name}")'.format(**opts) - ) - - return requirements - - @staticmethod - def trace_basic_info(finder: PackageFinder) -> None: - """ - Trace basic information about the provided objects. - """ - # Display where finder is looking for packages - search_scope = finder.search_scope - locations = search_scope.get_formatted_locations() - if locations: - logger.info(locations) - - def _build_package_finder( - self, - options: Values, - session: PipSession, - target_python: Optional[TargetPython] = None, - ignore_requires_python: Optional[bool] = None, - ) -> PackageFinder: - """ - Create a package finder appropriate to this requirement command. - - :param ignore_requires_python: Whether to ignore incompatible - "Requires-Python" values in links. Defaults to False. - """ - link_collector = LinkCollector.create(session, options=options) - selection_prefs = SelectionPreferences( - allow_yanked=True, - format_control=options.format_control, - allow_all_prereleases=options.pre, - prefer_binary=options.prefer_binary, - ignore_requires_python=ignore_requires_python, - ) - - return PackageFinder.create( - link_collector=link_collector, - selection_prefs=selection_prefs, - target_python=target_python, - ) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py deleted file mode 100644 index 89caaafbc17d871d836e810ba7c038648937254c..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py +++ /dev/null @@ -1,15 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) -model = dict( - pretrained='open-mmlab://contrib/resnet50_gn', - backbone=dict(norm_cfg=norm_cfg), - neck=dict(norm_cfg=norm_cfg), - roi_head=dict( - bbox_head=dict( - type='Shared4Conv1FCBBoxHead', - conv_out_channels=256, - norm_cfg=norm_cfg), - mask_head=dict(norm_cfg=norm_cfg))) -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/pisa/README.md b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/pisa/README.md deleted file mode 100644 index 69a1a3f16f5698f14d614670a1ce110e544f2b42..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/pisa/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# Prime Sample Attention in Object Detection - -## Introduction - - - -```latex -@inproceedings{cao2019prime, - title={Prime sample attention in object detection}, - author={Cao, Yuhang and Chen, Kai and Loy, Chen Change and Lin, Dahua}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - year={2020} -} -``` - -## Results and models - -| PISA | Network | Backbone | Lr schd | box AP | mask AP | Config | Download | -|:----:|:-------:|:-------------------:|:-------:|:------:|:-------:|:------:|:--------:| -| × | Faster R-CNN | R-50-FPN | 1x | 36.4 | | - | -| √ | Faster R-CNN | R-50-FPN | 1x | 38.4 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco_20200506_185619.log.json) | -| × | Faster R-CNN | X101-32x4d-FPN | 1x | 40.1 | | - | -| √ | Faster R-CNN | X101-32x4d-FPN | 1x | 41.9 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco-e4accec4.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco_20200505_181503.log.json) | -| × | Mask R-CNN | R-50-FPN | 1x | 37.3 | 34.2 | - | -| √ | Mask R-CNN | R-50-FPN | 1x | 39.1 | 35.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_mask_rcnn_r50_fpn_1x_coco/pisa_mask_rcnn_r50_fpn_1x_coco-dfcedba6.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_mask_rcnn_r50_fpn_1x_coco/pisa_mask_rcnn_r50_fpn_1x_coco_20200508_150500.log.json) | -| × | Mask R-CNN | X101-32x4d-FPN | 1x | 41.1 | 37.1 | - | -| √ | Mask R-CNN | X101-32x4d-FPN | 1x | | | | -| × | RetinaNet | R-50-FPN | 1x | 35.6 | | - | -| √ | RetinaNet | R-50-FPN | 1x | 36.9 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_r50_fpn_1x_coco/pisa_retinanet_r50_fpn_1x_coco-76409952.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_r50_fpn_1x_coco/pisa_retinanet_r50_fpn_1x_coco_20200504_014311.log.json) | -| × | RetinaNet | X101-32x4d-FPN | 1x | 39.0 | | - | -| √ | RetinaNet | X101-32x4d-FPN | 1x | 40.7 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco/pisa_retinanet_x101_32x4d_fpn_1x_coco-a0c13c73.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco/pisa_retinanet_x101_32x4d_fpn_1x_coco_20200505_001404.log.json) | -| × | SSD300 | VGG16 | 1x | 25.6 | | - | -| √ | SSD300 | VGG16 | 1x | 27.6 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_ssd300_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd300_coco/pisa_ssd300_coco-710e3ac9.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd300_coco/pisa_ssd300_coco_20200504_144325.log.json) | -| × | SSD300 | VGG16 | 1x | 29.3 | | - | -| √ | SSD300 | VGG16 | 1x | 31.8 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_ssd512_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd512_coco/pisa_ssd512_coco-247addee.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd512_coco/pisa_ssd512_coco_20200508_131030.log.json) | - -**Notes:** - -- In the original paper, all models are trained and tested on mmdet v1.x, thus results may not be exactly the same with this release on v2.0. -- It is noted PISA only modifies the training pipeline so the inference time remains the same with the baseline. diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/datasets/pipelines/auto_augment.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/datasets/pipelines/auto_augment.py deleted file mode 100644 index e19adaec18a96cac4dbe1d8c2c9193e9901be1fb..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/datasets/pipelines/auto_augment.py +++ /dev/null @@ -1,890 +0,0 @@ -import copy - -import cv2 -import mmcv -import numpy as np - -from ..builder import PIPELINES -from .compose import Compose - -_MAX_LEVEL = 10 - - -def level_to_value(level, max_value): - """Map from level to values based on max_value.""" - return (level / _MAX_LEVEL) * max_value - - -def enhance_level_to_value(level, a=1.8, b=0.1): - """Map from level to values.""" - return (level / _MAX_LEVEL) * a + b - - -def random_negative(value, random_negative_prob): - """Randomly negate value based on random_negative_prob.""" - return -value if np.random.rand() < random_negative_prob else value - - -def bbox2fields(): - """The key correspondence from bboxes to labels, masks and - segmentations.""" - bbox2label = { - 'gt_bboxes': 'gt_labels', - 'gt_bboxes_ignore': 'gt_labels_ignore' - } - bbox2mask = { - 'gt_bboxes': 'gt_masks', - 'gt_bboxes_ignore': 'gt_masks_ignore' - } - bbox2seg = { - 'gt_bboxes': 'gt_semantic_seg', - } - return bbox2label, bbox2mask, bbox2seg - - -@PIPELINES.register_module() -class AutoAugment(object): - """Auto augmentation. - - This data augmentation is proposed in `Learning Data Augmentation - Strategies for Object Detection `_. - - TODO: Implement 'Shear', 'Sharpness' and 'Rotate' transforms - - Args: - policies (list[list[dict]]): The policies of auto augmentation. Each - policy in ``policies`` is a specific augmentation policy, and is - composed by several augmentations (dict). When AutoAugment is - called, a random policy in ``policies`` will be selected to - augment images. - - Examples: - >>> replace = (104, 116, 124) - >>> policies = [ - >>> [ - >>> dict(type='Sharpness', prob=0.0, level=8), - >>> dict( - >>> type='Shear', - >>> prob=0.4, - >>> level=0, - >>> replace=replace, - >>> axis='x') - >>> ], - >>> [ - >>> dict( - >>> type='Rotate', - >>> prob=0.6, - >>> level=10, - >>> replace=replace), - >>> dict(type='Color', prob=1.0, level=6) - >>> ] - >>> ] - >>> augmentation = AutoAugment(policies) - >>> img = np.ones(100, 100, 3) - >>> gt_bboxes = np.ones(10, 4) - >>> results = dict(img=img, gt_bboxes=gt_bboxes) - >>> results = augmentation(results) - """ - - def __init__(self, policies): - assert isinstance(policies, list) and len(policies) > 0, \ - 'Policies must be a non-empty list.' - for policy in policies: - assert isinstance(policy, list) and len(policy) > 0, \ - 'Each policy in policies must be a non-empty list.' - for augment in policy: - assert isinstance(augment, dict) and 'type' in augment, \ - 'Each specific augmentation must be a dict with key' \ - ' "type".' - - self.policies = copy.deepcopy(policies) - self.transforms = [Compose(policy) for policy in self.policies] - - def __call__(self, results): - transform = np.random.choice(self.transforms) - return transform(results) - - def __repr__(self): - return f'{self.__class__.__name__}(policies={self.policies})' - - -@PIPELINES.register_module() -class Shear(object): - """Apply Shear Transformation to image (and its corresponding bbox, mask, - segmentation). - - Args: - level (int | float): The level should be in range [0,_MAX_LEVEL]. - img_fill_val (int | float | tuple): The filled values for image border. - If float, the same fill value will be used for all the three - channels of image. If tuple, the should be 3 elements. - seg_ignore_label (int): The fill value used for segmentation map. - Note this value must equals ``ignore_label`` in ``semantic_head`` - of the corresponding config. Default 255. - prob (float): The probability for performing Shear and should be in - range [0, 1]. - direction (str): The direction for shear, either "horizontal" - or "vertical". - max_shear_magnitude (float): The maximum magnitude for Shear - transformation. - random_negative_prob (float): The probability that turns the - offset negative. Should be in range [0,1] - interpolation (str): Same as in :func:`mmcv.imshear`. - """ - - def __init__(self, - level, - img_fill_val=128, - seg_ignore_label=255, - prob=0.5, - direction='horizontal', - max_shear_magnitude=0.3, - random_negative_prob=0.5, - interpolation='bilinear'): - assert isinstance(level, (int, float)), 'The level must be type ' \ - f'int or float, got {type(level)}.' - assert 0 <= level <= _MAX_LEVEL, 'The level should be in range ' \ - f'[0,{_MAX_LEVEL}], got {level}.' - if isinstance(img_fill_val, (float, int)): - img_fill_val = tuple([float(img_fill_val)] * 3) - elif isinstance(img_fill_val, tuple): - assert len(img_fill_val) == 3, 'img_fill_val as tuple must ' \ - f'have 3 elements. got {len(img_fill_val)}.' - img_fill_val = tuple([float(val) for val in img_fill_val]) - else: - raise ValueError( - 'img_fill_val must be float or tuple with 3 elements.') - assert np.all([0 <= val <= 255 for val in img_fill_val]), 'all ' \ - 'elements of img_fill_val should between range [0,255].' \ - f'got {img_fill_val}.' - assert 0 <= prob <= 1.0, 'The probability of shear should be in ' \ - f'range [0,1]. got {prob}.' - assert direction in ('horizontal', 'vertical'), 'direction must ' \ - f'in be either "horizontal" or "vertical". got {direction}.' - assert isinstance(max_shear_magnitude, float), 'max_shear_magnitude ' \ - f'should be type float. got {type(max_shear_magnitude)}.' - assert 0. <= max_shear_magnitude <= 1., 'Defaultly ' \ - 'max_shear_magnitude should be in range [0,1]. ' \ - f'got {max_shear_magnitude}.' - self.level = level - self.magnitude = level_to_value(level, max_shear_magnitude) - self.img_fill_val = img_fill_val - self.seg_ignore_label = seg_ignore_label - self.prob = prob - self.direction = direction - self.max_shear_magnitude = max_shear_magnitude - self.random_negative_prob = random_negative_prob - self.interpolation = interpolation - - def _shear_img(self, - results, - magnitude, - direction='horizontal', - interpolation='bilinear'): - """Shear the image. - - Args: - results (dict): Result dict from loading pipeline. - magnitude (int | float): The magnitude used for shear. - direction (str): The direction for shear, either "horizontal" - or "vertical". - interpolation (str): Same as in :func:`mmcv.imshear`. - """ - for key in results.get('img_fields', ['img']): - img = results[key] - img_sheared = mmcv.imshear( - img, - magnitude, - direction, - border_value=self.img_fill_val, - interpolation=interpolation) - results[key] = img_sheared.astype(img.dtype) - - def _shear_bboxes(self, results, magnitude): - """Shear the bboxes.""" - h, w, c = results['img_shape'] - if self.direction == 'horizontal': - shear_matrix = np.stack([[1, magnitude], - [0, 1]]).astype(np.float32) # [2, 2] - else: - shear_matrix = np.stack([[1, 0], [magnitude, - 1]]).astype(np.float32) - for key in results.get('bbox_fields', []): - min_x, min_y, max_x, max_y = np.split( - results[key], results[key].shape[-1], axis=-1) - coordinates = np.stack([[min_x, min_y], [max_x, min_y], - [min_x, max_y], - [max_x, max_y]]) # [4, 2, nb_box, 1] - coordinates = coordinates[..., 0].transpose( - (2, 1, 0)).astype(np.float32) # [nb_box, 2, 4] - new_coords = np.matmul(shear_matrix[None, :, :], - coordinates) # [nb_box, 2, 4] - min_x = np.min(new_coords[:, 0, :], axis=-1) - min_y = np.min(new_coords[:, 1, :], axis=-1) - max_x = np.max(new_coords[:, 0, :], axis=-1) - max_y = np.max(new_coords[:, 1, :], axis=-1) - min_x = np.clip(min_x, a_min=0, a_max=w) - min_y = np.clip(min_y, a_min=0, a_max=h) - max_x = np.clip(max_x, a_min=min_x, a_max=w) - max_y = np.clip(max_y, a_min=min_y, a_max=h) - results[key] = np.stack([min_x, min_y, max_x, max_y], - axis=-1).astype(results[key].dtype) - - def _shear_masks(self, - results, - magnitude, - direction='horizontal', - fill_val=0, - interpolation='bilinear'): - """Shear the masks.""" - h, w, c = results['img_shape'] - for key in results.get('mask_fields', []): - masks = results[key] - results[key] = masks.shear((h, w), - magnitude, - direction, - border_value=fill_val, - interpolation=interpolation) - - def _shear_seg(self, - results, - magnitude, - direction='horizontal', - fill_val=255, - interpolation='bilinear'): - """Shear the segmentation maps.""" - for key in results.get('seg_fields', []): - seg = results[key] - results[key] = mmcv.imshear( - seg, - magnitude, - direction, - border_value=fill_val, - interpolation=interpolation).astype(seg.dtype) - - def _filter_invalid(self, results, min_bbox_size=0): - """Filter bboxes and corresponding masks too small after shear - augmentation.""" - bbox2label, bbox2mask, _ = bbox2fields() - for key in results.get('bbox_fields', []): - bbox_w = results[key][:, 2] - results[key][:, 0] - bbox_h = results[key][:, 3] - results[key][:, 1] - valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size) - valid_inds = np.nonzero(valid_inds)[0] - results[key] = results[key][valid_inds] - # label fields. e.g. gt_labels and gt_labels_ignore - label_key = bbox2label.get(key) - if label_key in results: - results[label_key] = results[label_key][valid_inds] - # mask fields, e.g. gt_masks and gt_masks_ignore - mask_key = bbox2mask.get(key) - if mask_key in results: - results[mask_key] = results[mask_key][valid_inds] - - def __call__(self, results): - """Call function to shear images, bounding boxes, masks and semantic - segmentation maps. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Sheared results. - """ - if np.random.rand() > self.prob: - return results - magnitude = random_negative(self.magnitude, self.random_negative_prob) - self._shear_img(results, magnitude, self.direction, self.interpolation) - self._shear_bboxes(results, magnitude) - # fill_val set to 0 for background of mask. - self._shear_masks( - results, - magnitude, - self.direction, - fill_val=0, - interpolation=self.interpolation) - self._shear_seg( - results, - magnitude, - self.direction, - fill_val=self.seg_ignore_label, - interpolation=self.interpolation) - self._filter_invalid(results) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(level={self.level}, ' - repr_str += f'img_fill_val={self.img_fill_val}, ' - repr_str += f'seg_ignore_label={self.seg_ignore_label}, ' - repr_str += f'prob={self.prob}, ' - repr_str += f'direction={self.direction}, ' - repr_str += f'max_shear_magnitude={self.max_shear_magnitude}, ' - repr_str += f'random_negative_prob={self.random_negative_prob}, ' - repr_str += f'interpolation={self.interpolation})' - return repr_str - - -@PIPELINES.register_module() -class Rotate(object): - """Apply Rotate Transformation to image (and its corresponding bbox, mask, - segmentation). - - Args: - level (int | float): The level should be in range (0,_MAX_LEVEL]. - scale (int | float): Isotropic scale factor. Same in - ``mmcv.imrotate``. - center (int | float | tuple[float]): Center point (w, h) of the - rotation in the source image. If None, the center of the - image will be used. Same in ``mmcv.imrotate``. - img_fill_val (int | float | tuple): The fill value for image border. - If float, the same value will be used for all the three - channels of image. If tuple, the should be 3 elements (e.g. - equals the number of channels for image). - seg_ignore_label (int): The fill value used for segmentation map. - Note this value must equals ``ignore_label`` in ``semantic_head`` - of the corresponding config. Default 255. - prob (float): The probability for perform transformation and - should be in range 0 to 1. - max_rotate_angle (int | float): The maximum angles for rotate - transformation. - random_negative_prob (float): The probability that turns the - offset negative. - """ - - def __init__(self, - level, - scale=1, - center=None, - img_fill_val=128, - seg_ignore_label=255, - prob=0.5, - max_rotate_angle=30, - random_negative_prob=0.5): - assert isinstance(level, (int, float)), \ - f'The level must be type int or float. got {type(level)}.' - assert 0 <= level <= _MAX_LEVEL, \ - f'The level should be in range (0,{_MAX_LEVEL}]. got {level}.' - assert isinstance(scale, (int, float)), \ - f'The scale must be type int or float. got type {type(scale)}.' - if isinstance(center, (int, float)): - center = (center, center) - elif isinstance(center, tuple): - assert len(center) == 2, 'center with type tuple must have '\ - f'2 elements. got {len(center)} elements.' - else: - assert center is None, 'center must be None or type int, '\ - f'float or tuple, got type {type(center)}.' - if isinstance(img_fill_val, (float, int)): - img_fill_val = tuple([float(img_fill_val)] * 3) - elif isinstance(img_fill_val, tuple): - assert len(img_fill_val) == 3, 'img_fill_val as tuple must '\ - f'have 3 elements. got {len(img_fill_val)}.' - img_fill_val = tuple([float(val) for val in img_fill_val]) - else: - raise ValueError( - 'img_fill_val must be float or tuple with 3 elements.') - assert np.all([0 <= val <= 255 for val in img_fill_val]), \ - 'all elements of img_fill_val should between range [0,255]. '\ - f'got {img_fill_val}.' - assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. '\ - 'got {prob}.' - assert isinstance(max_rotate_angle, (int, float)), 'max_rotate_angle '\ - f'should be type int or float. got type {type(max_rotate_angle)}.' - self.level = level - self.scale = scale - # Rotation angle in degrees. Positive values mean - # clockwise rotation. - self.angle = level_to_value(level, max_rotate_angle) - self.center = center - self.img_fill_val = img_fill_val - self.seg_ignore_label = seg_ignore_label - self.prob = prob - self.max_rotate_angle = max_rotate_angle - self.random_negative_prob = random_negative_prob - - def _rotate_img(self, results, angle, center=None, scale=1.0): - """Rotate the image. - - Args: - results (dict): Result dict from loading pipeline. - angle (float): Rotation angle in degrees, positive values - mean clockwise rotation. Same in ``mmcv.imrotate``. - center (tuple[float], optional): Center point (w, h) of the - rotation. Same in ``mmcv.imrotate``. - scale (int | float): Isotropic scale factor. Same in - ``mmcv.imrotate``. - """ - for key in results.get('img_fields', ['img']): - img = results[key].copy() - img_rotated = mmcv.imrotate( - img, angle, center, scale, border_value=self.img_fill_val) - results[key] = img_rotated.astype(img.dtype) - - def _rotate_bboxes(self, results, rotate_matrix): - """Rotate the bboxes.""" - h, w, c = results['img_shape'] - for key in results.get('bbox_fields', []): - min_x, min_y, max_x, max_y = np.split( - results[key], results[key].shape[-1], axis=-1) - coordinates = np.stack([[min_x, min_y], [max_x, min_y], - [min_x, max_y], - [max_x, max_y]]) # [4, 2, nb_bbox, 1] - # pad 1 to convert from format [x, y] to homogeneous - # coordinates format [x, y, 1] - coordinates = np.concatenate( - (coordinates, - np.ones((4, 1, coordinates.shape[2], 1), coordinates.dtype)), - axis=1) # [4, 3, nb_bbox, 1] - coordinates = coordinates.transpose( - (2, 0, 1, 3)) # [nb_bbox, 4, 3, 1] - rotated_coords = np.matmul(rotate_matrix, - coordinates) # [nb_bbox, 4, 2, 1] - rotated_coords = rotated_coords[..., 0] # [nb_bbox, 4, 2] - min_x, min_y = np.min( - rotated_coords[:, :, 0], axis=1), np.min( - rotated_coords[:, :, 1], axis=1) - max_x, max_y = np.max( - rotated_coords[:, :, 0], axis=1), np.max( - rotated_coords[:, :, 1], axis=1) - min_x, min_y = np.clip( - min_x, a_min=0, a_max=w), np.clip( - min_y, a_min=0, a_max=h) - max_x, max_y = np.clip( - max_x, a_min=min_x, a_max=w), np.clip( - max_y, a_min=min_y, a_max=h) - results[key] = np.stack([min_x, min_y, max_x, max_y], - axis=-1).astype(results[key].dtype) - - def _rotate_masks(self, - results, - angle, - center=None, - scale=1.0, - fill_val=0): - """Rotate the masks.""" - h, w, c = results['img_shape'] - for key in results.get('mask_fields', []): - masks = results[key] - results[key] = masks.rotate((h, w), angle, center, scale, fill_val) - - def _rotate_seg(self, - results, - angle, - center=None, - scale=1.0, - fill_val=255): - """Rotate the segmentation map.""" - for key in results.get('seg_fields', []): - seg = results[key].copy() - results[key] = mmcv.imrotate( - seg, angle, center, scale, - border_value=fill_val).astype(seg.dtype) - - def _filter_invalid(self, results, min_bbox_size=0): - """Filter bboxes and corresponding masks too small after rotate - augmentation.""" - bbox2label, bbox2mask, _ = bbox2fields() - for key in results.get('bbox_fields', []): - bbox_w = results[key][:, 2] - results[key][:, 0] - bbox_h = results[key][:, 3] - results[key][:, 1] - valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size) - valid_inds = np.nonzero(valid_inds)[0] - results[key] = results[key][valid_inds] - # label fields. e.g. gt_labels and gt_labels_ignore - label_key = bbox2label.get(key) - if label_key in results: - results[label_key] = results[label_key][valid_inds] - # mask fields, e.g. gt_masks and gt_masks_ignore - mask_key = bbox2mask.get(key) - if mask_key in results: - results[mask_key] = results[mask_key][valid_inds] - - def __call__(self, results): - """Call function to rotate images, bounding boxes, masks and semantic - segmentation maps. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Rotated results. - """ - if np.random.rand() > self.prob: - return results - h, w = results['img'].shape[:2] - center = self.center - if center is None: - center = ((w - 1) * 0.5, (h - 1) * 0.5) - angle = random_negative(self.angle, self.random_negative_prob) - self._rotate_img(results, angle, center, self.scale) - rotate_matrix = cv2.getRotationMatrix2D(center, -angle, self.scale) - self._rotate_bboxes(results, rotate_matrix) - self._rotate_masks(results, angle, center, self.scale, fill_val=0) - self._rotate_seg( - results, angle, center, self.scale, fill_val=self.seg_ignore_label) - self._filter_invalid(results) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(level={self.level}, ' - repr_str += f'scale={self.scale}, ' - repr_str += f'center={self.center}, ' - repr_str += f'img_fill_val={self.img_fill_val}, ' - repr_str += f'seg_ignore_label={self.seg_ignore_label}, ' - repr_str += f'prob={self.prob}, ' - repr_str += f'max_rotate_angle={self.max_rotate_angle}, ' - repr_str += f'random_negative_prob={self.random_negative_prob})' - return repr_str - - -@PIPELINES.register_module() -class Translate(object): - """Translate the images, bboxes, masks and segmentation maps horizontally - or vertically. - - Args: - level (int | float): The level for Translate and should be in - range [0,_MAX_LEVEL]. - prob (float): The probability for performing translation and - should be in range [0, 1]. - img_fill_val (int | float | tuple): The filled value for image - border. If float, the same fill value will be used for all - the three channels of image. If tuple, the should be 3 - elements (e.g. equals the number of channels for image). - seg_ignore_label (int): The fill value used for segmentation map. - Note this value must equals ``ignore_label`` in ``semantic_head`` - of the corresponding config. Default 255. - direction (str): The translate direction, either "horizontal" - or "vertical". - max_translate_offset (int | float): The maximum pixel's offset for - Translate. - random_negative_prob (float): The probability that turns the - offset negative. - min_size (int | float): The minimum pixel for filtering - invalid bboxes after the translation. - """ - - def __init__(self, - level, - prob=0.5, - img_fill_val=128, - seg_ignore_label=255, - direction='horizontal', - max_translate_offset=250., - random_negative_prob=0.5, - min_size=0): - assert isinstance(level, (int, float)), \ - 'The level must be type int or float.' - assert 0 <= level <= _MAX_LEVEL, \ - 'The level used for calculating Translate\'s offset should be ' \ - 'in range [0,_MAX_LEVEL]' - assert 0 <= prob <= 1.0, \ - 'The probability of translation should be in range [0, 1].' - if isinstance(img_fill_val, (float, int)): - img_fill_val = tuple([float(img_fill_val)] * 3) - elif isinstance(img_fill_val, tuple): - assert len(img_fill_val) == 3, \ - 'img_fill_val as tuple must have 3 elements.' - img_fill_val = tuple([float(val) for val in img_fill_val]) - else: - raise ValueError('img_fill_val must be type float or tuple.') - assert np.all([0 <= val <= 255 for val in img_fill_val]), \ - 'all elements of img_fill_val should between range [0,255].' - assert direction in ('horizontal', 'vertical'), \ - 'direction should be "horizontal" or "vertical".' - assert isinstance(max_translate_offset, (int, float)), \ - 'The max_translate_offset must be type int or float.' - # the offset used for translation - self.offset = int(level_to_value(level, max_translate_offset)) - self.level = level - self.prob = prob - self.img_fill_val = img_fill_val - self.seg_ignore_label = seg_ignore_label - self.direction = direction - self.max_translate_offset = max_translate_offset - self.random_negative_prob = random_negative_prob - self.min_size = min_size - - def _translate_img(self, results, offset, direction='horizontal'): - """Translate the image. - - Args: - results (dict): Result dict from loading pipeline. - offset (int | float): The offset for translate. - direction (str): The translate direction, either "horizontal" - or "vertical". - """ - for key in results.get('img_fields', ['img']): - img = results[key].copy() - results[key] = mmcv.imtranslate( - img, offset, direction, self.img_fill_val).astype(img.dtype) - - def _translate_bboxes(self, results, offset): - """Shift bboxes horizontally or vertically, according to offset.""" - h, w, c = results['img_shape'] - for key in results.get('bbox_fields', []): - min_x, min_y, max_x, max_y = np.split( - results[key], results[key].shape[-1], axis=-1) - if self.direction == 'horizontal': - min_x = np.maximum(0, min_x + offset) - max_x = np.minimum(w, max_x + offset) - elif self.direction == 'vertical': - min_y = np.maximum(0, min_y + offset) - max_y = np.minimum(h, max_y + offset) - - # the boxes translated outside of image will be filtered along with - # the corresponding masks, by invoking ``_filter_invalid``. - results[key] = np.concatenate([min_x, min_y, max_x, max_y], - axis=-1) - - def _translate_masks(self, - results, - offset, - direction='horizontal', - fill_val=0): - """Translate masks horizontally or vertically.""" - h, w, c = results['img_shape'] - for key in results.get('mask_fields', []): - masks = results[key] - results[key] = masks.translate((h, w), offset, direction, fill_val) - - def _translate_seg(self, - results, - offset, - direction='horizontal', - fill_val=255): - """Translate segmentation maps horizontally or vertically.""" - for key in results.get('seg_fields', []): - seg = results[key].copy() - results[key] = mmcv.imtranslate(seg, offset, direction, - fill_val).astype(seg.dtype) - - def _filter_invalid(self, results, min_size=0): - """Filter bboxes and masks too small or translated out of image.""" - bbox2label, bbox2mask, _ = bbox2fields() - for key in results.get('bbox_fields', []): - bbox_w = results[key][:, 2] - results[key][:, 0] - bbox_h = results[key][:, 3] - results[key][:, 1] - valid_inds = (bbox_w > min_size) & (bbox_h > min_size) - valid_inds = np.nonzero(valid_inds)[0] - results[key] = results[key][valid_inds] - # label fields. e.g. gt_labels and gt_labels_ignore - label_key = bbox2label.get(key) - if label_key in results: - results[label_key] = results[label_key][valid_inds] - # mask fields, e.g. gt_masks and gt_masks_ignore - mask_key = bbox2mask.get(key) - if mask_key in results: - results[mask_key] = results[mask_key][valid_inds] - return results - - def __call__(self, results): - """Call function to translate images, bounding boxes, masks and - semantic segmentation maps. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Translated results. - """ - if np.random.rand() > self.prob: - return results - offset = random_negative(self.offset, self.random_negative_prob) - self._translate_img(results, offset, self.direction) - self._translate_bboxes(results, offset) - # fill_val defaultly 0 for BitmapMasks and None for PolygonMasks. - self._translate_masks(results, offset, self.direction) - # fill_val set to ``seg_ignore_label`` for the ignored value - # of segmentation map. - self._translate_seg( - results, offset, self.direction, fill_val=self.seg_ignore_label) - self._filter_invalid(results, min_size=self.min_size) - return results - - -@PIPELINES.register_module() -class ColorTransform(object): - """Apply Color transformation to image. The bboxes, masks, and - segmentations are not modified. - - Args: - level (int | float): Should be in range [0,_MAX_LEVEL]. - prob (float): The probability for performing Color transformation. - """ - - def __init__(self, level, prob=0.5): - assert isinstance(level, (int, float)), \ - 'The level must be type int or float.' - assert 0 <= level <= _MAX_LEVEL, \ - 'The level should be in range [0,_MAX_LEVEL].' - assert 0 <= prob <= 1.0, \ - 'The probability should be in range [0,1].' - self.level = level - self.prob = prob - self.factor = enhance_level_to_value(level) - - def _adjust_color_img(self, results, factor=1.0): - """Apply Color transformation to image.""" - for key in results.get('img_fields', ['img']): - # NOTE defaultly the image should be BGR format - img = results[key] - results[key] = mmcv.adjust_color(img, factor).astype(img.dtype) - - def __call__(self, results): - """Call function for Color transformation. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Colored results. - """ - if np.random.rand() > self.prob: - return results - self._adjust_color_img(results, self.factor) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(level={self.level}, ' - repr_str += f'prob={self.prob})' - return repr_str - - -@PIPELINES.register_module() -class EqualizeTransform(object): - """Apply Equalize transformation to image. The bboxes, masks and - segmentations are not modified. - - Args: - prob (float): The probability for performing Equalize transformation. - """ - - def __init__(self, prob=0.5): - assert 0 <= prob <= 1.0, \ - 'The probability should be in range [0,1].' - self.prob = prob - - def _imequalize(self, results): - """Equalizes the histogram of one image.""" - for key in results.get('img_fields', ['img']): - img = results[key] - results[key] = mmcv.imequalize(img).astype(img.dtype) - - def __call__(self, results): - """Call function for Equalize transformation. - - Args: - results (dict): Results dict from loading pipeline. - - Returns: - dict: Results after the transformation. - """ - if np.random.rand() > self.prob: - return results - self._imequalize(results) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(prob={self.prob})' - - -@PIPELINES.register_module() -class BrightnessTransform(object): - """Apply Brightness transformation to image. The bboxes, masks and - segmentations are not modified. - - Args: - level (int | float): Should be in range [0,_MAX_LEVEL]. - prob (float): The probability for performing Brightness transformation. - """ - - def __init__(self, level, prob=0.5): - assert isinstance(level, (int, float)), \ - 'The level must be type int or float.' - assert 0 <= level <= _MAX_LEVEL, \ - 'The level should be in range [0,_MAX_LEVEL].' - assert 0 <= prob <= 1.0, \ - 'The probability should be in range [0,1].' - self.level = level - self.prob = prob - self.factor = enhance_level_to_value(level) - - def _adjust_brightness_img(self, results, factor=1.0): - """Adjust the brightness of image.""" - for key in results.get('img_fields', ['img']): - img = results[key] - results[key] = mmcv.adjust_brightness(img, - factor).astype(img.dtype) - - def __call__(self, results): - """Call function for Brightness transformation. - - Args: - results (dict): Results dict from loading pipeline. - - Returns: - dict: Results after the transformation. - """ - if np.random.rand() > self.prob: - return results - self._adjust_brightness_img(results, self.factor) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(level={self.level}, ' - repr_str += f'prob={self.prob})' - return repr_str - - -@PIPELINES.register_module() -class ContrastTransform(object): - """Apply Contrast transformation to image. The bboxes, masks and - segmentations are not modified. - - Args: - level (int | float): Should be in range [0,_MAX_LEVEL]. - prob (float): The probability for performing Contrast transformation. - """ - - def __init__(self, level, prob=0.5): - assert isinstance(level, (int, float)), \ - 'The level must be type int or float.' - assert 0 <= level <= _MAX_LEVEL, \ - 'The level should be in range [0,_MAX_LEVEL].' - assert 0 <= prob <= 1.0, \ - 'The probability should be in range [0,1].' - self.level = level - self.prob = prob - self.factor = enhance_level_to_value(level) - - def _adjust_contrast_img(self, results, factor=1.0): - """Adjust the image contrast.""" - for key in results.get('img_fields', ['img']): - img = results[key] - results[key] = mmcv.adjust_contrast(img, factor).astype(img.dtype) - - def __call__(self, results): - """Call function for Contrast transformation. - - Args: - results (dict): Results dict from loading pipeline. - - Returns: - dict: Results after the transformation. - """ - if np.random.rand() > self.prob: - return results - self._adjust_contrast_img(results, self.factor) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(level={self.level}, ' - repr_str += f'prob={self.prob})' - return repr_str diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/yolo_head.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/yolo_head.py deleted file mode 100644 index 693912795a24e8d5c43bfca7295c70f71a069085..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/yolo_head.py +++ /dev/null @@ -1,604 +0,0 @@ -# Copyright (c) 2019 Western Digital Corporation or its affiliates. - -import warnings - -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.runner import force_fp32 - -from mmdet.core import (build_anchor_generator, build_assigner, - build_bbox_coder, build_sampler, images_to_levels, - multi_apply, multiclass_nms) -from ..builder import HEADS, build_loss -from .base_dense_head import BaseDenseHead -from .dense_test_mixins import BBoxTestMixin - - -@HEADS.register_module() -class YOLOV3Head(BaseDenseHead, BBoxTestMixin): - """YOLOV3Head Paper link: https://arxiv.org/abs/1804.02767. - - Args: - num_classes (int): The number of object classes (w/o background) - in_channels (List[int]): Number of input channels per scale. - out_channels (List[int]): The number of output channels per scale - before the final 1x1 layer. Default: (1024, 512, 256). - anchor_generator (dict): Config dict for anchor generator - bbox_coder (dict): Config of bounding box coder. - featmap_strides (List[int]): The stride of each scale. - Should be in descending order. Default: (32, 16, 8). - one_hot_smoother (float): Set a non-zero value to enable label-smooth - Default: 0. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Dictionary to construct and config norm layer. - Default: dict(type='BN', requires_grad=True) - act_cfg (dict): Config dict for activation layer. - Default: dict(type='LeakyReLU', negative_slope=0.1). - loss_cls (dict): Config of classification loss. - loss_conf (dict): Config of confidence loss. - loss_xy (dict): Config of xy coordinate loss. - loss_wh (dict): Config of wh coordinate loss. - train_cfg (dict): Training config of YOLOV3 head. Default: None. - test_cfg (dict): Testing config of YOLOV3 head. Default: None. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - num_classes, - in_channels, - out_channels=(1024, 512, 256), - anchor_generator=dict( - type='YOLOAnchorGenerator', - base_sizes=[[(116, 90), (156, 198), (373, 326)], - [(30, 61), (62, 45), (59, 119)], - [(10, 13), (16, 30), (33, 23)]], - strides=[32, 16, 8]), - bbox_coder=dict(type='YOLOBBoxCoder'), - featmap_strides=[32, 16, 8], - one_hot_smoother=0., - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - act_cfg=dict(type='LeakyReLU', negative_slope=0.1), - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - loss_conf=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - loss_xy=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - loss_wh=dict(type='MSELoss', loss_weight=1.0), - train_cfg=None, - test_cfg=None, - init_cfg=dict( - type='Normal', std=0.01, - override=dict(name='convs_pred'))): - super(YOLOV3Head, self).__init__(init_cfg) - # Check params - assert (len(in_channels) == len(out_channels) == len(featmap_strides)) - - self.num_classes = num_classes - self.in_channels = in_channels - self.out_channels = out_channels - self.featmap_strides = featmap_strides - self.train_cfg = train_cfg - self.test_cfg = test_cfg - if self.train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - if hasattr(self.train_cfg, 'sampler'): - sampler_cfg = self.train_cfg.sampler - else: - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - - self.one_hot_smoother = one_hot_smoother - - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - - self.bbox_coder = build_bbox_coder(bbox_coder) - self.anchor_generator = build_anchor_generator(anchor_generator) - - self.loss_cls = build_loss(loss_cls) - self.loss_conf = build_loss(loss_conf) - self.loss_xy = build_loss(loss_xy) - self.loss_wh = build_loss(loss_wh) - # usually the numbers of anchors for each level are the same - # except SSD detectors - self.num_anchors = self.anchor_generator.num_base_anchors[0] - assert len( - self.anchor_generator.num_base_anchors) == len(featmap_strides) - self._init_layers() - - @property - def num_levels(self): - return len(self.featmap_strides) - - @property - def num_attrib(self): - """int: number of attributes in pred_map, bboxes (4) + - objectness (1) + num_classes""" - - return 5 + self.num_classes - - def _init_layers(self): - self.convs_bridge = nn.ModuleList() - self.convs_pred = nn.ModuleList() - for i in range(self.num_levels): - conv_bridge = ConvModule( - self.in_channels[i], - self.out_channels[i], - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - conv_pred = nn.Conv2d(self.out_channels[i], - self.num_anchors * self.num_attrib, 1) - - self.convs_bridge.append(conv_bridge) - self.convs_pred.append(conv_pred) - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple[Tensor]: A tuple of multi-level predication map, each is a - 4D-tensor of shape (batch_size, 5+num_classes, height, width). - """ - - assert len(feats) == self.num_levels - pred_maps = [] - for i in range(self.num_levels): - x = feats[i] - x = self.convs_bridge[i](x) - pred_map = self.convs_pred[i](x) - pred_maps.append(pred_map) - - return tuple(pred_maps), - - @force_fp32(apply_to=('pred_maps', )) - def get_bboxes(self, - pred_maps, - img_metas, - cfg=None, - rescale=False, - with_nms=True): - """Transform network output for a batch into bbox predictions. - - Args: - pred_maps (list[Tensor]): Raw predictions for a batch of images. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - cfg (mmcv.Config | None): Test / postprocessing configuration, - if None, test_cfg would be used. Default: None. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - - Returns: - list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is an (n, 5) tensor, where 5 represent - (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. - The shape of the second tensor in the tuple is (n,), and - each element represents the class label of the corresponding - box. - """ - num_levels = len(pred_maps) - pred_maps_list = [pred_maps[i].detach() for i in range(num_levels)] - scale_factors = [ - img_metas[i]['scale_factor'] - for i in range(pred_maps_list[0].shape[0]) - ] - result_list = self._get_bboxes(pred_maps_list, scale_factors, cfg, - rescale, with_nms) - return result_list - - def _get_bboxes(self, - pred_maps_list, - scale_factors, - cfg, - rescale=False, - with_nms=True): - """Transform outputs for a single batch item into bbox predictions. - - Args: - pred_maps_list (list[Tensor]): Prediction maps for different scales - of each single image in the batch. - scale_factors (list(ndarray)): Scale factor of the image arrange as - (w_scale, h_scale, w_scale, h_scale). - cfg (mmcv.Config | None): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - - Returns: - list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is an (n, 5) tensor, where 5 represent - (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. - The shape of the second tensor in the tuple is (n,), and - each element represents the class label of the corresponding - box. - """ - cfg = self.test_cfg if cfg is None else cfg - assert len(pred_maps_list) == self.num_levels - - device = pred_maps_list[0].device - batch_size = pred_maps_list[0].shape[0] - - featmap_sizes = [ - pred_maps_list[i].shape[-2:] for i in range(self.num_levels) - ] - multi_lvl_anchors = self.anchor_generator.grid_anchors( - featmap_sizes, device) - # convert to tensor to keep tracing - nms_pre_tensor = torch.tensor( - cfg.get('nms_pre', -1), device=device, dtype=torch.long) - - multi_lvl_bboxes = [] - multi_lvl_cls_scores = [] - multi_lvl_conf_scores = [] - for i in range(self.num_levels): - # get some key info for current scale - pred_map = pred_maps_list[i] - stride = self.featmap_strides[i] - # (b,h, w, num_anchors*num_attrib) -> - # (b,h*w*num_anchors, num_attrib) - pred_map = pred_map.permute(0, 2, 3, - 1).reshape(batch_size, -1, - self.num_attrib) - # Inplace operation like - # ```pred_map[..., :2] = \torch.sigmoid(pred_map[..., :2])``` - # would create constant tensor when exporting to onnx - pred_map_conf = torch.sigmoid(pred_map[..., :2]) - pred_map_rest = pred_map[..., 2:] - pred_map = torch.cat([pred_map_conf, pred_map_rest], dim=-1) - pred_map_boxes = pred_map[..., :4] - multi_lvl_anchor = multi_lvl_anchors[i] - multi_lvl_anchor = multi_lvl_anchor.expand_as(pred_map_boxes) - bbox_pred = self.bbox_coder.decode(multi_lvl_anchor, - pred_map_boxes, stride) - # conf and cls - conf_pred = torch.sigmoid(pred_map[..., 4]) - cls_pred = torch.sigmoid(pred_map[..., 5:]).view( - batch_size, -1, self.num_classes) # Cls pred one-hot. - - # Get top-k prediction - from mmdet.core.export import get_k_for_topk - nms_pre = get_k_for_topk(nms_pre_tensor, bbox_pred.shape[1]) - if nms_pre > 0: - _, topk_inds = conf_pred.topk(nms_pre) - batch_inds = torch.arange(batch_size).view( - -1, 1).expand_as(topk_inds).long() - # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 - if torch.onnx.is_in_onnx_export(): - transformed_inds = ( - bbox_pred.shape[1] * batch_inds + topk_inds) - bbox_pred = bbox_pred.reshape( - -1, 4)[transformed_inds, :].reshape(batch_size, -1, 4) - cls_pred = cls_pred.reshape( - -1, self.num_classes)[transformed_inds, :].reshape( - batch_size, -1, self.num_classes) - conf_pred = conf_pred.reshape(-1, - 1)[transformed_inds].reshape( - batch_size, -1) - else: - bbox_pred = bbox_pred[batch_inds, topk_inds, :] - cls_pred = cls_pred[batch_inds, topk_inds, :] - conf_pred = conf_pred[batch_inds, topk_inds] - # Save the result of current scale - multi_lvl_bboxes.append(bbox_pred) - multi_lvl_cls_scores.append(cls_pred) - multi_lvl_conf_scores.append(conf_pred) - - # Merge the results of different scales together - batch_mlvl_bboxes = torch.cat(multi_lvl_bboxes, dim=1) - batch_mlvl_scores = torch.cat(multi_lvl_cls_scores, dim=1) - batch_mlvl_conf_scores = torch.cat(multi_lvl_conf_scores, dim=1) - - # Replace multiclass_nms with ONNX::NonMaxSuppression in deployment - if torch.onnx.is_in_onnx_export() and with_nms: - from mmdet.core.export import add_dummy_nms_for_onnx - conf_thr = cfg.get('conf_thr', -1) - score_thr = cfg.get('score_thr', -1) - # follow original pipeline of YOLOv3 - if conf_thr > 0: - mask = (batch_mlvl_conf_scores >= conf_thr).float() - batch_mlvl_conf_scores *= mask - if score_thr > 0: - mask = (batch_mlvl_scores > score_thr).float() - batch_mlvl_scores *= mask - batch_mlvl_conf_scores = batch_mlvl_conf_scores.unsqueeze( - 2).expand_as(batch_mlvl_scores) - batch_mlvl_scores = batch_mlvl_scores * batch_mlvl_conf_scores - max_output_boxes_per_class = cfg.nms.get( - 'max_output_boxes_per_class', 200) - iou_threshold = cfg.nms.get('iou_threshold', 0.5) - # keep aligned with original pipeline, improve - # mAP by 1% for YOLOv3 in ONNX - score_threshold = 0 - nms_pre = cfg.get('deploy_nms_pre', -1) - return add_dummy_nms_for_onnx( - batch_mlvl_bboxes, - batch_mlvl_scores, - max_output_boxes_per_class, - iou_threshold, - score_threshold, - nms_pre, - cfg.max_per_img, - ) - - if with_nms and (batch_mlvl_conf_scores.size(0) == 0): - return torch.zeros((0, 5)), torch.zeros((0, )) - - if rescale: - batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor( - scale_factors).unsqueeze(1) - - # In mmdet 2.x, the class_id for background is num_classes. - # i.e., the last column. - padding = batch_mlvl_scores.new_zeros(batch_size, - batch_mlvl_scores.shape[1], 1) - batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1) - - # Support exporting to onnx without nms - if with_nms and cfg.get('nms', None) is not None: - det_results = [] - for (mlvl_bboxes, mlvl_scores, - mlvl_conf_scores) in zip(batch_mlvl_bboxes, batch_mlvl_scores, - batch_mlvl_conf_scores): - # Filtering out all predictions with conf < conf_thr - conf_thr = cfg.get('conf_thr', -1) - if conf_thr > 0 and (not torch.onnx.is_in_onnx_export()): - # TensorRT not support NonZero - # add as_tuple=False for compatibility in Pytorch 1.6 - # flatten would create a Reshape op with constant values, - # and raise RuntimeError when doing inference in ONNX - # Runtime with a different input image (#4221). - conf_inds = mlvl_conf_scores.ge(conf_thr).nonzero( - as_tuple=False).squeeze(1) - mlvl_bboxes = mlvl_bboxes[conf_inds, :] - mlvl_scores = mlvl_scores[conf_inds, :] - mlvl_conf_scores = mlvl_conf_scores[conf_inds] - - det_bboxes, det_labels = multiclass_nms( - mlvl_bboxes, - mlvl_scores, - cfg.score_thr, - cfg.nms, - cfg.max_per_img, - score_factors=mlvl_conf_scores) - det_results.append(tuple([det_bboxes, det_labels])) - - else: - det_results = [ - tuple(mlvl_bs) - for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores, - batch_mlvl_conf_scores) - ] - return det_results - - @force_fp32(apply_to=('pred_maps', )) - def loss(self, - pred_maps, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute loss of the head. - - Args: - pred_maps (list[Tensor]): Prediction map for each scale level, - shape (N, num_anchors * num_attrib, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - num_imgs = len(img_metas) - device = pred_maps[0][0].device - - featmap_sizes = [ - pred_maps[i].shape[-2:] for i in range(self.num_levels) - ] - multi_level_anchors = self.anchor_generator.grid_anchors( - featmap_sizes, device) - anchor_list = [multi_level_anchors for _ in range(num_imgs)] - - responsible_flag_list = [] - for img_id in range(len(img_metas)): - responsible_flag_list.append( - self.anchor_generator.responsible_flags( - featmap_sizes, gt_bboxes[img_id], device)) - - target_maps_list, neg_maps_list = self.get_targets( - anchor_list, responsible_flag_list, gt_bboxes, gt_labels) - - losses_cls, losses_conf, losses_xy, losses_wh = multi_apply( - self.loss_single, pred_maps, target_maps_list, neg_maps_list) - - return dict( - loss_cls=losses_cls, - loss_conf=losses_conf, - loss_xy=losses_xy, - loss_wh=losses_wh) - - def loss_single(self, pred_map, target_map, neg_map): - """Compute loss of a single image from a batch. - - Args: - pred_map (Tensor): Raw predictions for a single level. - target_map (Tensor): The Ground-Truth target for a single level. - neg_map (Tensor): The negative masks for a single level. - - Returns: - tuple: - loss_cls (Tensor): Classification loss. - loss_conf (Tensor): Confidence loss. - loss_xy (Tensor): Regression loss of x, y coordinate. - loss_wh (Tensor): Regression loss of w, h coordinate. - """ - - num_imgs = len(pred_map) - pred_map = pred_map.permute(0, 2, 3, - 1).reshape(num_imgs, -1, self.num_attrib) - neg_mask = neg_map.float() - pos_mask = target_map[..., 4] - pos_and_neg_mask = neg_mask + pos_mask - pos_mask = pos_mask.unsqueeze(dim=-1) - if torch.max(pos_and_neg_mask) > 1.: - warnings.warn('There is overlap between pos and neg sample.') - pos_and_neg_mask = pos_and_neg_mask.clamp(min=0., max=1.) - - pred_xy = pred_map[..., :2] - pred_wh = pred_map[..., 2:4] - pred_conf = pred_map[..., 4] - pred_label = pred_map[..., 5:] - - target_xy = target_map[..., :2] - target_wh = target_map[..., 2:4] - target_conf = target_map[..., 4] - target_label = target_map[..., 5:] - - loss_cls = self.loss_cls(pred_label, target_label, weight=pos_mask) - loss_conf = self.loss_conf( - pred_conf, target_conf, weight=pos_and_neg_mask) - loss_xy = self.loss_xy(pred_xy, target_xy, weight=pos_mask) - loss_wh = self.loss_wh(pred_wh, target_wh, weight=pos_mask) - - return loss_cls, loss_conf, loss_xy, loss_wh - - def get_targets(self, anchor_list, responsible_flag_list, gt_bboxes_list, - gt_labels_list): - """Compute target maps for anchors in multiple images. - - Args: - anchor_list (list[list[Tensor]]): Multi level anchors of each - image. The outer list indicates images, and the inner list - corresponds to feature levels of the image. Each element of - the inner list is a tensor of shape (num_total_anchors, 4). - responsible_flag_list (list[list[Tensor]]): Multi level responsible - flags of each image. Each element is a tensor of shape - (num_total_anchors, ) - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. - gt_labels_list (list[Tensor]): Ground truth labels of each box. - - Returns: - tuple: Usually returns a tuple containing learning targets. - - target_map_list (list[Tensor]): Target map of each level. - - neg_map_list (list[Tensor]): Negative map of each level. - """ - num_imgs = len(anchor_list) - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - - results = multi_apply(self._get_targets_single, anchor_list, - responsible_flag_list, gt_bboxes_list, - gt_labels_list) - - all_target_maps, all_neg_maps = results - assert num_imgs == len(all_target_maps) == len(all_neg_maps) - target_maps_list = images_to_levels(all_target_maps, num_level_anchors) - neg_maps_list = images_to_levels(all_neg_maps, num_level_anchors) - - return target_maps_list, neg_maps_list - - def _get_targets_single(self, anchors, responsible_flags, gt_bboxes, - gt_labels): - """Generate matching bounding box prior and converted GT. - - Args: - anchors (list[Tensor]): Multi-level anchors of the image. - responsible_flags (list[Tensor]): Multi-level responsible flags of - anchors - gt_bboxes (Tensor): Ground truth bboxes of single image. - gt_labels (Tensor): Ground truth labels of single image. - - Returns: - tuple: - target_map (Tensor): Predication target map of each - scale level, shape (num_total_anchors, - 5+num_classes) - neg_map (Tensor): Negative map of each scale level, - shape (num_total_anchors,) - """ - - anchor_strides = [] - for i in range(len(anchors)): - anchor_strides.append( - torch.tensor(self.featmap_strides[i], - device=gt_bboxes.device).repeat(len(anchors[i]))) - concat_anchors = torch.cat(anchors) - concat_responsible_flags = torch.cat(responsible_flags) - - anchor_strides = torch.cat(anchor_strides) - assert len(anchor_strides) == len(concat_anchors) == \ - len(concat_responsible_flags) - assign_result = self.assigner.assign(concat_anchors, - concat_responsible_flags, - gt_bboxes) - sampling_result = self.sampler.sample(assign_result, concat_anchors, - gt_bboxes) - - target_map = concat_anchors.new_zeros( - concat_anchors.size(0), self.num_attrib) - - target_map[sampling_result.pos_inds, :4] = self.bbox_coder.encode( - sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes, - anchor_strides[sampling_result.pos_inds]) - - target_map[sampling_result.pos_inds, 4] = 1 - - gt_labels_one_hot = F.one_hot( - gt_labels, num_classes=self.num_classes).float() - if self.one_hot_smoother != 0: # label smooth - gt_labels_one_hot = gt_labels_one_hot * ( - 1 - self.one_hot_smoother - ) + self.one_hot_smoother / self.num_classes - target_map[sampling_result.pos_inds, 5:] = gt_labels_one_hot[ - sampling_result.pos_assigned_gt_inds] - - neg_map = concat_anchors.new_zeros( - concat_anchors.size(0), dtype=torch.uint8) - neg_map[sampling_result.neg_inds] = 1 - - return target_map, neg_map - - def aug_test(self, feats, img_metas, rescale=False): - """Test function with test time augmentation. - - Args: - feats (list[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains features for all images in the batch. - img_metas (list[list[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. each dict has image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[ndarray]: bbox results of each class - """ - return self.aug_test_bboxes(feats, img_metas, rescale=rescale) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tests/test_utils/test_version.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tests/test_utils/test_version.py deleted file mode 100644 index 6ddf45c0e2854cb64006281363afe5547aa886c2..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tests/test_utils/test_version.py +++ /dev/null @@ -1,15 +0,0 @@ -from mmdet import digit_version - - -def test_version_check(): - assert digit_version('1.0.5') > digit_version('1.0.5rc0') - assert digit_version('1.0.5') > digit_version('1.0.4rc0') - assert digit_version('1.0.5') > digit_version('1.0rc0') - assert digit_version('1.0.0') > digit_version('0.6.2') - assert digit_version('1.0.0') > digit_version('0.2.16') - assert digit_version('1.0.5rc0') > digit_version('1.0.0rc0') - assert digit_version('1.0.0rc1') > digit_version('1.0.0rc0') - assert digit_version('1.0.0rc2') > digit_version('1.0.0rc0') - assert digit_version('1.0.0rc2') > digit_version('1.0.0rc1') - assert digit_version('1.0.1rc1') > digit_version('1.0.0rc1') - assert digit_version('1.0.0') > digit_version('1.0.0rc1') diff --git a/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/models/diffusion/__init__.py b/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/models/diffusion/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/transiteration/nemo_stt_kz_quartznet15x5/README.md b/spaces/transiteration/nemo_stt_kz_quartznet15x5/README.md deleted file mode 100644 index 98900eaa1f1ecffedd3143447643850022e0d100..0000000000000000000000000000000000000000 --- a/spaces/transiteration/nemo_stt_kz_quartznet15x5/README.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: NeMo_STT_KZ_Quartznet15x5 -emoji: 🔥 -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.42.0 -app_file: app.py -pinned: false ---- - -## Model Overview - -In order to prepare and experiment with the model, it's necessary to install [NVIDIA NeMo Toolkit](https://github.com/NVIDIA/NeMo) [1].\ -We advise installing it once you've installed the most recent version of PyTorch.\ -\ -This model have been trained on NVIDIA GeForce RTX 2070:\ -Python 3.7.15\ -NumPy 1.21.6\ -PyTorch 1.21.1\ -NVIDIA NeMo 1.7.0 - -``` -pip install nemo_toolkit['all'] -``` - -## Model Usage: - -The model is accessible within the NeMo toolkit [1] and can serve as a pre-trained checkpoint for either making inferences or for fine-tuning on a different dataset. - -#### How to Import -``` -import nemo.collections.asr as nemo_asr -asr_model = nemo_asr.models.EncDecCTCModel.restore_from(restore_path="stt_kz_quartznet15x5.nemo") -``` -#### How to Transcribe Single Audio File -We can get a sample audio to test the model: -``` -wget https://asr-kz-example.s3.us-west-2.amazonaws.com/sample_kz.wav -``` -Then this line of code is to transcribe the single audio: -``` -asr_model.transcribe(['sample_kz.wav']) -``` -#### How to Transcribe Multiple Audio Files -``` -python transcribe_speech.py model_path=stt_kz_quartznet15x5.nemo audio_dir="" -``` - -If you have a manifest file about your audio files: -``` -python transcribe_speech.py model_path=stt_kz_quartznet15x5.nemo dataset_manifest=manifest.json -``` - -## Input and Output - -This model can take input from mono-channel audio .WAV files with a sample rate of 16,000 KHz.\ -Then, this model gives you the spoken words in a text format for a given audio sample. - -## Model Architecture - -[QuartzNet 15x5](https://catalog.ngc.nvidia.com/orgs/nvidia/models/quartznet15x5) [2] is a Jasper-like network that uses separable convolutions and larger filter sizes. It has comparable accuracy to Jasper while having much fewer parameters. This particular model has 15 blocks each repeated 5 times. - -## Training and Dataset - -The model was finetuned to Kazakh speech based on the pre-trained English Model for over several epochs. -[Kazakh Speech Corpus 2](https://issai.nu.edu.kz/kz-speech-corpus/?version=1.1) (KSC2) [3] is the first industrial-scale open-source Kazakh speech corpus.\ -In total, KSC2 contains around 1.2k hours of high-quality transcribed data comprising over 600k utterances. - -## Performance - -Average WER: 15.53% - -## Limitations - -Because the GPU has limited power, we used a lightweight model architecture for fine-tuning.\ -In general, this makes it faster for inference but might show less overall performance.\ -In addition, if the speech includes technical terms or dialect words the model hasn't learned, it may not work as well. - -## References - -[1] [NVIDIA NeMo Toolkit](https://github.com/NVIDIA/NeMo) - -[2] [QuartzNet 15x5](https://catalog.ngc.nvidia.com/orgs/nvidia/models/quartznet15x5) - -[3] [Kazakh Speech Corpus 2](https://issai.nu.edu.kz/kz-speech-corpus/?version=1.1) \ No newline at end of file diff --git a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_py_lib/str_half2full_tool.py b/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_py_lib/str_half2full_tool.py deleted file mode 100644 index 17edea2788dc8a1416c604613dcb3ca92580f1fc..0000000000000000000000000000000000000000 --- a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_py_lib/str_half2full_tool.py +++ /dev/null @@ -1,220 +0,0 @@ -''' -字符串-全角半角转换工具 - -全角半角对照表,wiki -https://zh.wikipedia.org/wiki/%E5%85%A8%E5%BD%A2%E5%92%8C%E5%8D%8A%E5%BD%A2 -''' - -_jp_half2full_dict = { - '。': '。', - '「': '「', - '」': '」', - '、': '、', - '・': '・', - 'ヲ': 'ヲ', - 'ァ': 'ァ', - 'ィ': 'ィ', - 'ゥ': 'ゥ', - 'ェ': 'ェ', - 'ォ': 'ォ', - 'ャ': 'ャ', - 'ュ': 'ュ', - 'ョ': 'ョ', - 'ッ': 'ッ', - 'ー': 'ー', - 'ア': 'ア', - 'イ': 'イ', - 'ウ': 'ウ', - 'エ': 'エ', - 'オ': 'オ', - 'カ': 'カ', - 'キ': 'キ', - 'ク': 'ク', - 'ケ': 'ケ', - 'コ': 'コ', - 'サ': 'サ', - 'シ': 'シ', - 'ス': 'ス', - 'セ': 'セ', - 'ソ': 'ソ', - 'タ': 'タ', - 'チ': 'チ', - 'ツ': 'ツ', - 'テ': 'テ', - 'ト': 'ト', - 'ナ': 'ナ', - 'ニ': 'ニ', - 'ヌ': 'ヌ', - 'ネ': 'ネ', - 'ノ': 'ノ', - 'ハ': 'ハ', - 'ヒ': 'ヒ', - 'フ': 'フ', - 'ヘ': 'ヘ', - 'ホ': 'ホ', - 'マ': 'マ', - 'ミ': 'ミ', - 'ム': 'ム', - 'メ': 'メ', - 'モ': 'モ', - 'ヤ': 'ヤ', - 'ユ': 'ユ', - 'ヨ': 'ヨ', - 'ラ': 'ラ', - 'リ': 'リ', - 'ル': 'ル', - 'レ': 'レ', - 'ロ': 'ロ', - 'ワ': 'ワ', - 'ン': 'ン', - '゙': '゛', - '゚': '゜', -} - -_jp_full2half_dict = dict(zip(_jp_half2full_dict.values(), _jp_half2full_dict.keys())) - - -_kr_half2full_dict = { - 'ᅠ': 'ㅤ', - 'ᄀ': 'ㄱ', - 'ᄁ': 'ㄲ', - 'ᆪ': 'ㄳ', - 'ᄂ': 'ㄴ', - 'ᆬ': 'ㄵ', - 'ᆭ': 'ㄶ', - 'ᄃ': 'ㄷ', - 'ᄄ': 'ㄸ', - 'ᄅ': 'ㄹ', - 'ᆰ': 'ㄺ', - 'ᆱ': 'ㄻ', - 'ᆲ': 'ㄼ', - 'ᆳ': 'ㄽ', - 'ᆴ': 'ㄾ', - 'ᆵ': 'ㄿ', - 'ᄚ': 'ㅀ', - 'ᄆ': 'ㅁ', - 'ᄇ': 'ㅂ', - 'ᄈ': 'ㅃ', - 'ᄡ': 'ㅄ', - 'ᄉ': 'ㅅ', - 'ᄊ': 'ㅆ', - 'ᄋ': 'ㅇ', - 'ᄌ': 'ㅈ', - 'ᄍ': 'ㅉ', - 'ᄎ': 'ㅊ', - 'ᄏ': 'ㅋ', - 'ᄐ': 'ㅌ', - 'ᄑ': 'ㅍ', - 'ᄒ': 'ㅎ', - 'ᅡ': 'ㅏ', - 'ᅢ': 'ㅐ', - 'ᅣ': 'ㅑ', - 'ᅤ': 'ㅒ', - 'ᅥ': 'ㅓ', - 'ᅦ': 'ㅔ', - 'ᅧ': 'ㅕ', - 'ᅨ': 'ㅖ', - 'ᅩ': 'ㅗ', - 'ᅪ': 'ㅘ', - 'ᅫ': 'ㅙ', - 'ᅬ': 'ㅚ', - 'ᅭ': 'ㅛ', - 'ᅮ': 'ㅜ', - 'ᅯ': 'ㅝ', - 'ᅰ': 'ㅞ', - 'ᅱ': 'ㅟ', - 'ᅲ': 'ㅠ', - 'ᅳ': 'ㅡ', - 'ᅴ': 'ㅢ', - 'ᅵ': 'ㅣ', -} - -_kr_full2half_dict = dict(zip(_kr_half2full_dict.values(), _kr_half2full_dict.keys())) - - -_other_half2full_dict = { - '⦅': '⦅', - '⦆': '⦆', - '¢': '¢', - '£': '£', - '¬': '¬', - '¯': ' ̄', - '¦': '¦', - '¥': '¥', - '₩': '₩', - '│': '│', - '←': '←', - '↑': '↑', - '→': '→', - '↓': '↓', - '■': '■', - '○': '○', -} - -_other_full2half_dict = dict(zip(_other_half2full_dict.values(), _other_half2full_dict.keys())) - - -def str_full2half(s: str, ignore_chars=None, ignore_ascii=False, ignore_jp=False, ignore_kr=False, ignore_other=False): - ignore_chars = set() if ignore_chars is None else set(ignore_chars) - - ns = [] - for c in s: - nc = None - - if c in ignore_chars: - nc = c - - if nc is None and not ignore_ascii: - if '\uff01' <= c <= '\uff5e': - nc = chr(ord(c) - 0xfee0) - elif c == '\u3000': - nc = '\u0020' - - if nc is None and not ignore_jp: - nc = _jp_full2half_dict.get(c, None) - - if nc is None and not ignore_kr: - nc = _kr_full2half_dict.get(c, None) - - if nc is None and not ignore_other: - nc = _other_full2half_dict.get(c, None) - - if nc is None: - nc = c - ns.append(nc) - - ns = ''.join(ns) - return ns - - -def str_half2full(s: str, ignore_chars=None, ignore_ascii=False, ignore_jp=False, ignore_kr=False, ignore_other=False): - ignore_chars = set() if ignore_chars is None else set(ignore_chars) - - ns = [] - for c in s: - nc = None - - if c in ignore_chars: - nc = c - - if nc is None and not ignore_ascii: - if '\u0021' <= c <= '\u007e': - nc = chr(ord(c) + 0xfee0) - elif c == '\u0020': - nc = '\u3000' - - if nc is None and not ignore_jp: - nc = _jp_half2full_dict.get(c, None) - - if nc is None and not ignore_kr: - nc = _kr_half2full_dict.get(c, None) - - if nc is None and not ignore_other: - nc = _other_half2full_dict.get(c, None) - - if nc is None: - nc = c - ns.append(nc) - - ns = ''.join(ns) - return ns diff --git a/spaces/ulysses115/ulysses115-pmvoice/train.py b/spaces/ulysses115/ulysses115-pmvoice/train.py deleted file mode 100644 index 37ba61847cef8b2a392b0fe89f0296e117925687..0000000000000000000000000000000000000000 --- a/spaces/ulysses115/ulysses115-pmvoice/train.py +++ /dev/null @@ -1,296 +0,0 @@ -import os -import json -import argparse -import itertools -import math -import torch -from torch import nn, optim -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -import torch.multiprocessing as mp -import torch.distributed as dist -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.cuda.amp import autocast, GradScaler - -import librosa -import logging - -logging.getLogger('numba').setLevel(logging.WARNING) - -import commons -import utils -from data_utils import ( - TextAudioLoader, - TextAudioCollate, - DistributedBucketSampler -) -from models import ( - SynthesizerTrn, - MultiPeriodDiscriminator, -) -from losses import ( - generator_loss, - discriminator_loss, - feature_loss, - kl_loss -) -from mel_processing import mel_spectrogram_torch, spec_to_mel_torch -from text.symbols import symbols - - -torch.backends.cudnn.benchmark = True -global_step = 0 - - -def main(): - """Assume Single Node Multi GPUs Training Only""" - assert torch.cuda.is_available(), "CPU training is not allowed." - - n_gpus = torch.cuda.device_count() - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = '8000' - - hps = utils.get_hparams() - mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) - - -def run(rank, n_gpus, hps): - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - logger.info(hps) - utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - - dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank) - torch.manual_seed(hps.train.seed) - torch.cuda.set_device(rank) - - train_dataset = TextAudioLoader(hps.data.training_files, hps.data) - train_sampler = DistributedBucketSampler( - train_dataset, - hps.train.batch_size, - [32,300,400,500,600,700,800,900,1000], - num_replicas=n_gpus, - rank=rank, - shuffle=True) - collate_fn = TextAudioCollate() - train_loader = DataLoader(train_dataset, num_workers=4, shuffle=False, pin_memory=True, - collate_fn=collate_fn, batch_sampler=train_sampler) - if rank == 0: - eval_dataset = TextAudioLoader(hps.data.validation_files, hps.data) - eval_loader = DataLoader(eval_dataset, num_workers=4, shuffle=False, - batch_size=hps.train.batch_size, pin_memory=True, - drop_last=False, collate_fn=collate_fn) - - net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model).cuda(rank) - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) - optim_g = torch.optim.AdamW( - net_g.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - optim_d = torch.optim.AdamW( - net_d.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - net_g = DDP(net_g, device_ids=[rank]) - net_d = DDP(net_d, device_ids=[rank]) - - try: - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g) - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d) - global_step = (epoch_str - 1) * len(train_loader) - except: - epoch_str = 1 - global_step = 0 - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) - - scaler = GradScaler(enabled=hps.train.fp16_run) - - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank==0: - train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval]) - else: - train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None) - scheduler_g.step() - scheduler_d.step() - - -def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): - net_g, net_d = nets - optim_g, optim_d = optims - scheduler_g, scheduler_d = schedulers - train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers - - train_loader.batch_sampler.set_epoch(epoch) - global global_step - - net_g.train() - net_d.train() - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths) in enumerate(train_loader): - x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) - spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) - y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) - - with autocast(enabled=hps.train.fp16_run): - y_hat, l_length, attn, ids_slice, x_mask, z_mask,\ - (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(x, x_lengths, spec, spec_lengths) - - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - - y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) - loss_disc_all = loss_disc - optim_d.zero_grad() - scaler.scale(loss_disc_all).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) - - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) - with autocast(enabled=False): - loss_dur = torch.sum(l_length.float()) - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank==0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]['lr'] - losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl] - logger.info('Train Epoch: {} [{:.0f}%]'.format( - epoch, - 100. * batch_idx / len(train_loader))) - logger.info([x.item() for x in losses] + [global_step, lr]) - - scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} - scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl}) - - scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) - scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) - scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), - "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), - "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), - "all/attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy()) - } - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict) - - if global_step % hps.train.eval_interval == 0: - evaluate(hps, net_g, eval_loader, writer_eval) - utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, '/content/drive/MyDrive/Genshin_TTS/G_paimon.pth') - utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, '/content/drive/MyDrive/Genshin_TTS/D_paimon.pth') - global_step += 1 - - if rank == 0: - logger.info('====> Epoch: {}'.format(epoch)) - - -def evaluate(hps, generator, eval_loader, writer_eval): - generator.eval() - with torch.no_grad(): - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths) in enumerate(eval_loader): - x, x_lengths = x.cuda(0), x_lengths.cuda(0) - spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0) - y, y_lengths = y.cuda(0), y_lengths.cuda(0) - - # remove else - x = x[:1] - x_lengths = x_lengths[:1] - spec = spec[:1] - spec_lengths = spec_lengths[:1] - y = y[:1] - y_lengths = y_lengths[:1] - break - y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, max_len=1000) - y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length - - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1).float(), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - image_dict = { - "gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()) - } - audio_dict = { - "gen/audio": y_hat[0,:,:y_hat_lengths[0]] - } - if global_step == 0: - image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())}) - audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]}) - - utils.summarize( - writer=writer_eval, - global_step=global_step, - images=image_dict, - audios=audio_dict, - audio_sampling_rate=hps.data.sampling_rate - ) - generator.train() - - -if __name__ == "__main__": - - main() diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Codigo Para Activar El Juego Scania Truck Driving Simulator Todo Lo Que Necesitas Saber.md b/spaces/usbethFlerru/sovits-modelsV2/example/Codigo Para Activar El Juego Scania Truck Driving Simulator Todo Lo Que Necesitas Saber.md deleted file mode 100644 index 530e4fe0b6bc78cdffc2b572d1d2b00a676d1378..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Codigo Para Activar El Juego Scania Truck Driving Simulator Todo Lo Que Necesitas Saber.md +++ /dev/null @@ -1,6 +0,0 @@ - -

    Entonces su tarjeta puede convertir imagenes de 2D a 3D sin utilizar el convertidor 3D de su televisor, para esto ahi que tener un cable HDMI v1.4a este cable es compatible con reproduccion 3D y televisor y monitor compatible con 3D (solo he experimentado con televisor de 3D activo samsung y Tarjeta de video AMD RADEON HD 6950, solo he experimentado con una sola pantalla, el HD3D tambien se puede aplicar a la tecnologia EYE-INFINITY de 3 pantallas)

    ¿solo ahi que conectar el cable al televisor y ya vere en 3D? Pues NO!

    ahi que usar un programa que solo es para esta tecnologia de AMD, se llama TRIDEF 3D


    Al abrir el programa el 3D se tiene que activar solo, no usar el emulador 3D del televisor, El 3D se activara al usar los juegos, peliculas y fotografias a pantalla completa, en modo ventana no se podra hacer la "magia" el mismo programa da una advertencia que para activar el 3D la aplicacion debe estar en pantalla completa.



    (pantalla de inicio del programa, esto se debe ver en 3D al ejecutar la aplicacion)





    Caracteristicas:
    -compatible con peliculas 3D (si es que el PC se usa como reproductor de Blu-ray 3D con un lector bluray 3D)
    -ver peliculas DVD de 2D a 3D
    -ver archivos de videos en variados formatos a 3D
    -ver fotografias y archivos de imagenes en 3D
    -jugar en 3D
    -profundidad del 3D totalmente regulable a gusto del usuario

    ¿Como hacer que al ejecutar un juego, este se vea en 3D?

    Ahi que configurar el Programa tridef 3D ignition ¿como? les dejare unas imagenes.



    (Esta es la aplicacion para usar los juegos con el 3D) Ir a herramientas - Mostrar configuraciones (ver abajo)
    Listado de juegos compatibles al 100%

    -

    Codigo Para Activar El Juego Scania Truck Driving Simulator


    DOWNLOAD ★★★★★ https://urlcod.com/2uyX0E



    -

    para que el 3D se active correctamente, debe estar en 1080p (p= progressive) en 60 hz de frec. de actualizacion

    Ya con esto, el tridef deberia usar la caracteristica del HD3D de su tarjeta AMD.

    Ultimo paso, agregar el juego que desee , activar sus lentes 3D y a disfrutar!!!

    (Nota: para que el juego sea ejecutado con 3D, ahi que ejecutar el juego con el tridef ignition (ver imagenes de arriba de ejemplo, del icono verde )


    Saludos a toda la comunidad y espero que este post les haya sido de gran ayuda y espero hacer mas aportes en el futuro

    Para todos ustedes el programa:

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Descargar Opticut 5.20 Con Crack Gratis __FULL__.md b/spaces/usbethFlerru/sovits-modelsV2/example/Descargar Opticut 5.20 Con Crack Gratis __FULL__.md deleted file mode 100644 index e9554ce50189506c2cfb8474571b1ca29adf77eb..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Descargar Opticut 5.20 Con Crack Gratis __FULL__.md +++ /dev/null @@ -1,158 +0,0 @@ - -

    Descargar Opticut 5.20 con crack gratis: la mejor opción para optimizar tus cortes de madera

    - -

    Si trabajas con madera, sabes lo importante que es aprovechar al máximo cada pieza y reducir el desperdicio de material. Para ello, necesitas un programa que te ayude a calcular los cortes más eficientes y rentables. En este artículo, te vamos a presentar Opticut 5.20, un software que hace justamente eso y que puedes descargar con crack gratis.

    -

    descargar opticut 5.20 con crack gratis


    Download Ziphttps://urlcod.com/2uyXXb



    - -

    ¿Qué es Opticut 5.20 y para qué sirve?

    - -

    Opticut 5.20 es un programa que pertenece a la categoría de optimizadores de corte. Su función es analizar las medidas de los tableros y perfiles de madera que tienes disponibles y las piezas que necesitas obtener, y definir dónde hacer los cortes para aprovechar al máximo el material y minimizar los costes.

    - -

    Opticut 5.20 es uno de los programas más completos de su categoría, ya que incluye dos módulos: uno para los cortes de tableros y otro para los cortes de perfiles y listones. Además, tiene una interfaz sencilla e intuitiva, que te permite introducir los datos fácilmente y ver los resultados en forma de gráficos y listados.

    - -

    ¿Cómo descargar Opticut 5.20 con crack gratis y cómo instalarlo?

    - -

    Si quieres descargar Opticut 5.20 con crack gratis, tienes que seguir estos pasos:

    - -
      -
    1. Busca en internet un sitio web que ofrezca el programa con crack gratis. Ten en cuenta que algunos sitios pueden ser fraudulentos o contener virus, así que ten cuidado y usa un antivirus.
    2. -
    3. Descarga el archivo del programa y el archivo del crack. Normalmente, estarán comprimidos en formato ZIP o RAR, así que necesitarás un programa para extraerlos.
    4. -
    5. Instala el programa siguiendo las instrucciones del instalador. No lo abras todavía.
    6. -
    7. Copia el archivo del crack y pégalo en la carpeta donde se instaló el programa. Reemplaza el archivo original si te lo pide.
    8. -
    9. Abre el programa y disfruta de todas sus funciones sin limitaciones.
    10. -
    - -

    ¿Qué ventajas tiene descargar Opticut 5.20 con crack gratis?

    - -

    Descargar Opticut 5.20 con crack gratis tiene algunas ventajas, como por ejemplo:

    - -
      -
    • No tienes que pagar nada por el programa, que normalmente tiene un precio elevado.
    • -
    • Puedes usar todas las funciones del programa sin restricciones ni caducidad.
    • -
    • Puedes optimizar tus cortes de madera y ahorrar dinero en tus proyectos de carpintería.
    • -
    • Puedes crear e imprimir tus propios mapas de carreteras con Opticut 5.20 con crack gratis.
    • -
    - -

    Como ves, descargar Opticut 5.20 con crack gratis es una excelente opción para optimizar tus cortes de madera y mejorar tu productividad. Si quieres probar este programa, no dudes en seguir los pasos que te hemos indicado y disfrutar de sus beneficios.

    -

    ¿Qué precauciones debes tener al descargar Opticut 5.20 con crack gratis?

    - -

    Aunque descargar Opticut 5.20 con crack gratis tiene sus ventajas, también tiene algunos riesgos que debes tener en cuenta. Algunos de ellos son:

    - -
      -
    • Estás infringiendo la ley de propiedad intelectual y puedes tener problemas legales si te descubren.
    • -
    • Estás vulnerando la seguridad de tu ordenador y puedes infectarlo con virus o malware si descargas el programa o el crack de sitios poco fiables.
    • -
    • Estás perdiendo la oportunidad de acceder al soporte técnico y a las actualizaciones del programa si lo compras legalmente.
    • -
    • Estás perjudicando al desarrollador del programa y a su trabajo si no le pagas por su producto.
    • -
    - -

    Por estas razones, te recomendamos que seas prudente y responsable al descargar Opticut 5.20 con crack gratis. Si puedes, mejor opta por comprar el programa y apoyar al creador.

    - -

    ¿Qué alternativas hay a descargar Opticut 5.20 con crack gratis?

    - -

    Si no quieres descargar Opticut 5.20 con crack gratis, pero tampoco quieres pagar por el programa, existen algunas alternativas que puedes probar. Algunas de ellas son:

    -

    - -
      -
    • Usar la versión de prueba o demo de Opticut 5.20, que te permite usar el programa con algunas limitaciones durante un tiempo determinado.
    • -
    • Usar otros programas gratuitos o de código abierto que también te permiten optimizar los cortes de madera, como CutList Plus, MaxCut o CutMaster 2D.
    • -
    • Usar servicios online que te ofrecen la misma función que Opticut 5.20, como Cut Optimizer o Cutting Planner.
    • -
    - -

    Estas alternativas pueden ser una buena opción si quieres ahorrar dinero y evitar los riesgos de descargar Opticut 5.20 con crack gratis. Sin embargo, ten en cuenta que pueden no tener las mismas prestaciones o calidad que Opticut 5.20.

    - -

    Conclusión

    - -

    Opticut 5.20 es un programa que te ayuda a optimizar los cortes de madera y a reducir los costes de material en tus proyectos de carpintería. Es uno de los más completos y fáciles de usar de su categoría, y puedes descargarlo con crack gratis siguiendo unos sencillos pasos.

    - -

    Pero descargar Opticut 5.20 con crack gratis también tiene sus riesgos y desventajas, como problemas legales, virus, falta de soporte o daño al desarrollador. Por eso, te recomendamos que seas prudente y responsable al hacerlo, y que consideres otras alternativas gratuitas o legales si quieres evitar estos inconvenientes.

    - -

    Esperamos que este artículo te haya sido útil y que hayas aprendido todo lo que necesitas saber sobre descargar Opticut 5.20 con crack gratis. Si te ha gustado, compártelo con tus amigos y déjanos un comentario con tu opinión.

    -

    ¿Qué características tiene Opticut 5.20?

    - -

    Opticut 5.20 es un programa que tiene muchas características que lo hacen destacar entre otros optimizadores de corte. Algunas de ellas son:

    - -
      -
    • Tiene una base de datos de materiales y piezas que puedes personalizar según tus necesidades.
    • -
    • Tiene un sistema de importación y exportación de datos que te permite trabajar con otros programas como Excel, AutoCAD o Polyboard.
    • -
    • Tiene un sistema de optimización automática que te propone la mejor solución de corte según tus criterios de calidad, precio o tiempo.
    • -
    • Tiene un sistema de simulación gráfica que te muestra el resultado de los cortes en 2D o 3D, con colores, etiquetas y símbolos.
    • -
    • Tiene un sistema de impresión que te permite generar informes detallados con los planos de corte, las listas de piezas y los costes de material.
    • -
    - -

    ¿Qué opiniones tiene Opticut 5.20?

    - -

    Opticut 5.20 es un programa que tiene muy buenas opiniones entre los usuarios que lo han probado. Algunos de los comentarios que se pueden encontrar en internet son:

    - -
    -

    "Opticut 5.20 es un programa muy completo y fácil de usar. Me ha ayudado a optimizar los cortes de madera y a ahorrar mucho dinero en mis proyectos."

    -Usuario anónimo -
    - -
    -

    "Opticut 5.20 es un programa imprescindible para cualquier carpintero o profesional del sector de la madera. Tiene muchas funciones y opciones que te permiten adaptarlo a tus necesidades."

    -Usuario anónimo -
    - -
    -

    "Opticut 5.20 es un programa que vale la pena descargar con crack gratis. Tiene una calidad excelente y funciona muy bien. Lo recomiendo a todo el mundo."

    -Usuario anónimo -
    - -

    Como ves, Opticut 5.20 es un programa que tiene muy buena reputación y que satisface las expectativas de los usuarios que lo usan.

    -

    ¿Qué consejos te damos para usar Opticut 5.20?

    - -

    Opticut 5.20 es un programa que te facilita mucho el trabajo de optimizar los cortes de madera, pero también requiere que sigas algunos consejos para obtener los mejores resultados. Algunos de ellos son:

    - -
      -
    • Revisa bien las medidas de los tableros y perfiles que tienes disponibles y las piezas que necesitas obtener, y asegúrate de que no hay errores o inconsistencias.
    • -
    • Ajusta bien los parámetros de optimización según tus criterios de calidad, precio o tiempo, y elige el método de corte que más te convenga.
    • -
    • Comprueba bien los resultados que te propone el programa y verifica que se ajustan a tus necesidades y expectativas.
    • -
    • Imprime los planos de corte y las listas de piezas con toda la información necesaria para realizar los cortes con precisión y seguridad.
    • -
    • Guarda los datos de tus proyectos y las soluciones de corte que hayas obtenido para poder consultarlos o modificarlos en el futuro.
    • -
    - -

    ¿Qué otros programas similares a Opticut 5.20 existen?

    - -

    Opticut 5.20 es uno de los programas más populares y completos para optimizar los cortes de madera, pero no es el único que existe. Hay otros programas similares que también te pueden interesar, como por ejemplo:

    - -
      -
    • CutList Plus: un programa que te permite crear listas de corte para madera, metal, plástico y otros materiales, con un diseño sencillo y atractivo.
    • -
    • MaxCut: un programa que te permite optimizar los cortes de tableros y perfiles de madera, metal, vidrio y otros materiales, con una interfaz intuitiva y moderna.
    • -
    • CutMaster 2D: un programa que te permite optimizar los cortes de tableros y perfiles de madera, metal, vidrio y otros materiales, con una interfaz clásica y funcional.
    • -
    - -

    Estos programas también tienen sus ventajas y desventajas, así que te recomendamos que los pruebes y compares con Opticut 5.20 para ver cuál se adapta mejor a tus necesidades.

    -

    ¿Qué beneficios tiene usar Opticut 5.20 para tus proyectos de carpintería?

    - -

    Usar Opticut 5.20 para tus proyectos de carpintería tiene muchos beneficios que te harán mejorar tu trabajo y tu rentabilidad. Algunos de ellos son:

    - -
      -
    • Puedes ahorrar dinero en la compra de materiales, al aprovechar al máximo cada tablero o perfil de madera y reducir el desperdicio.
    • -
    • Puedes ahorrar tiempo en el proceso de corte, al tener los planos y las listas de piezas listas para usar y seguir.
    • -
    • Puedes mejorar la calidad de tus productos, al realizar los cortes con precisión y evitar errores o desperfectos.
    • -
    • Puedes aumentar la satisfacción de tus clientes, al ofrecerles productos a medida y personalizados según sus necesidades y gustos.
    • -
    • Puedes diferenciarte de la competencia, al usar un programa profesional y avanzado que te da ventaja sobre otros carpinteros o profesionales del sector de la madera.
    • -
    - -

    ¿Qué desventajas tiene usar Opticut 5.20 para tus proyectos de carpintería?

    - -

    Usar Opticut 5.20 para tus proyectos de carpintería también tiene algunas desventajas que debes tener en cuenta. Algunas de ellas son:

    - -
      -
    • Necesitas tener un ordenador con los requisitos mínimos para poder instalar y ejecutar el programa sin problemas.
    • -
    • Necesitas tener una conexión a internet para poder descargar el programa y el crack, y para poder acceder a las actualizaciones o al soporte técnico.
    • -
    • Necesitas tener un conocimiento básico de informática para poder usar el programa correctamente y sacarle el máximo partido.
    • -
    • Necesitas tener una impresora para poder imprimir los planos de corte y las listas de piezas que te genera el programa.
    • -
    • Necesitas tener una máquina de corte que sea compatible con el programa y que pueda seguir las instrucciones que le envía.
    • -
    - -

    Estas desventajas pueden ser un inconveniente para algunos usuarios, pero también pueden ser fácilmente solucionables si se cuenta con los recursos y las habilidades necesarias.

    -

    Conclusión

    - -

    Opticut 5.20 es un programa que te permite optimizar los cortes de madera y reducir los costes de material en tus proyectos de carpintería. Es uno de los más completos y fáciles de usar de su categoría, y puedes descargarlo con crack gratis siguiendo unos sencillos pasos.

    - -

    Pero descargar Opticut 5.20 con crack gratis también tiene sus riesgos y desventajas, como problemas legales, virus, falta de soporte o daño al desarrollador. Por eso, te recomendamos que seas prudente y responsable al hacerlo, y que consideres otras alternativas gratuitas o legales si quieres evitar estos inconvenientes.

    - -

    Esperamos que este artículo te haya sido útil y que hayas aprendido todo lo que necesitas saber sobre descargar Opticut 5.20 con crack gratis. Si te ha gustado, compártelo con tus amigos y déjanos un comentario con tu opinión.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/vinic1999/foodvisionbig/README.md b/spaces/vinic1999/foodvisionbig/README.md deleted file mode 100644 index 8e694dffae67545f660c7335346b5a5b7bced705..0000000000000000000000000000000000000000 --- a/spaces/vinic1999/foodvisionbig/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Foodvisionbig -emoji: 🍔 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/vishnu0001/text2mesh/shap_e/models/nn/utils.py b/spaces/vishnu0001/text2mesh/shap_e/models/nn/utils.py deleted file mode 100644 index 76998a3311b1c2a5e1ff4c0fc8681af1d21ce9de..0000000000000000000000000000000000000000 --- a/spaces/vishnu0001/text2mesh/shap_e/models/nn/utils.py +++ /dev/null @@ -1,37 +0,0 @@ -from typing import Iterable, Union - -import numpy as np -import torch - -ArrayType = Union[np.ndarray, Iterable[int], torch.Tensor] - - -def to_torch(arr: ArrayType, dtype=torch.float): - if isinstance(arr, torch.Tensor): - return arr - return torch.from_numpy(np.array(arr)).to(dtype) - - -def sample_pmf(pmf: torch.Tensor, n_samples: int) -> torch.Tensor: - """ - Sample from the given discrete probability distribution with replacement. - - The i-th bin is assumed to have mass pmf[i]. - - :param pmf: [batch_size, *shape, n_samples, 1] where (pmf.sum(dim=-2) == 1).all() - :param n_samples: number of samples - - :return: indices sampled with replacement - """ - - *shape, support_size, last_dim = pmf.shape - assert last_dim == 1 - - cdf = torch.cumsum(pmf.view(-1, support_size), dim=1) - inds = torch.searchsorted(cdf, torch.rand(cdf.shape[0], n_samples, device=cdf.device)) - - return inds.view(*shape, n_samples, 1).clamp(0, support_size - 1) - - -def safe_divide(a, b, epsilon=1e-6): - return a / torch.where(b < 0, b - epsilon, b + epsilon) diff --git a/spaces/wanghuoto/gogoai/src/components/chat-panel.tsx b/spaces/wanghuoto/gogoai/src/components/chat-panel.tsx deleted file mode 100644 index 1fbc3c2bf05b914e0c229661832fbb560745f488..0000000000000000000000000000000000000000 --- a/spaces/wanghuoto/gogoai/src/components/chat-panel.tsx +++ /dev/null @@ -1,153 +0,0 @@ -'use client' - -import * as React from 'react' -import Image from 'next/image' -import Textarea from 'react-textarea-autosize' -import { useAtomValue } from 'jotai' -import { useEnterSubmit } from '@/lib/hooks/use-enter-submit' -import { cn } from '@/lib/utils' - -import BrushIcon from '@/assets/images/brush.svg' -import ChatIcon from '@/assets/images/chat.svg' -import VisualSearchIcon from '@/assets/images/visual-search.svg' -import SendIcon from '@/assets/images/send.svg' -import PinIcon from '@/assets/images/pin.svg' -import PinFillIcon from '@/assets/images/pin-fill.svg' - -import { useBing } from '@/lib/hooks/use-bing' -import { voiceListenAtom } from '@/state' -import Voice from './voice' -import { ChatImage } from './chat-image' -import { ChatAttachments } from './chat-attachments' - -export interface ChatPanelProps - extends Pick< - ReturnType, - | 'generating' - | 'input' - | 'setInput' - | 'sendMessage' - | 'resetConversation' - | 'isSpeaking' - | 'attachmentList' - | 'uploadImage' - | 'setAttachmentList' - > { - id?: string - className?: string -} - -export function ChatPanel({ - isSpeaking, - generating, - input, - setInput, - className, - sendMessage, - resetConversation, - attachmentList, - uploadImage, - setAttachmentList -}: ChatPanelProps) { - const inputRef = React.useRef(null) - const {formRef, onKeyDown} = useEnterSubmit() - const [focused, setFocused] = React.useState(false) - const [active, setActive] = React.useState(false) - const [pin, setPin] = React.useState(false) - const [tid, setTid] = React.useState() - const voiceListening = useAtomValue(voiceListenAtom) - - const setBlur = React.useCallback(() => { - clearTimeout(tid) - setActive(false) - const _tid = setTimeout(() => setFocused(false), 2000); - setTid(_tid) - }, [tid]) - - const setFocus = React.useCallback(() => { - setFocused(true) - setActive(true) - clearTimeout(tid) - inputRef.current?.focus() - }, [tid]) - - React.useEffect(() => { - if (input) { - setFocus() - } - }, [input]) - - return ( -
    { - e.preventDefault() - if (generating) { - return; - } - if (!input?.trim()) { - return - } - setInput('') - setPin(false) - await sendMessage(input) - }} - ref={formRef} - > -
    -
    -
    -
    -
    -
    -
    - -
    -
    -
    -
    - chat -