diff --git a/spaces/101-5/gpt4free/g4f/Provider/Providers/Theb.py b/spaces/101-5/gpt4free/g4f/Provider/Providers/Theb.py deleted file mode 100644 index aa43ebc55d74ffaa722fe008424fce97c622a323..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/Provider/Providers/Theb.py +++ /dev/null @@ -1,28 +0,0 @@ -import os -import json -import time -import subprocess - -from ...typing import sha256, Dict, get_type_hints - -url = 'https://theb.ai' -model = ['gpt-3.5-turbo'] -supports_stream = True -needs_auth = False - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - - path = os.path.dirname(os.path.realpath(__file__)) - config = json.dumps({ - 'messages': messages, - 'model': model}, separators=(',', ':')) - - cmd = ['python3', f'{path}/helpers/theb.py', config] - - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - - for line in iter(p.stdout.readline, b''): - yield line.decode('utf-8') - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Armi Project Cairo International Airport Heca Fs2004 _BEST_.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Armi Project Cairo International Airport Heca Fs2004 _BEST_.md deleted file mode 100644 index f8f80ee80926348060df7e6fb5eb7d2854a5f031..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Armi Project Cairo International Airport Heca Fs2004 _BEST_.md +++ /dev/null @@ -1,123 +0,0 @@ - -

Armi Project Cairo International Airport HECA FS2004 Review

-

Introduction

-

If you are looking for a realistic and detailed rendition of Cairo International Airport (IATA: CAI, ICAO: HECA) for FS2004, you might want to check out the Armi Project scenery. In this review, we will take a look at the features, performance and compatibility of this addon, and see if it is worth adding to your virtual hangar.

-

armi project cairo international airport heca fs2004


Download File ——— https://byltly.com/2uKyLi



-

What is Armi Project?

-

Armi Project is a scenery developer that specializes in creating airports for FS2004 and FSX. They have released several sceneries for Middle Eastern and Asian airports, such as Baghdad, Tehran, Kuwait, Riyadh, Muscat, Amman and Bangkok. Their sceneries are known for their accuracy, detail and realism.

-

What is Cairo International Airport HECA?

-

Cairo International Airport is the international airport of Cairo and the busiest in Egypt. It serves as the primary hub for EgyptAir and EgyptAir Express as well as several other airlines. The airport is located to the northeast of the city around 15 kilometres (9.3 mi) from the business area of the city and has an area of approximately 37 square kilometres (14 sq mi). The terminal facilities include Departure Hall 1, International Hall 3, and Hall 4 for private and non-commercial aircraft services. As part of the recent upgrading and facility improvement scheme, the CAA demolished the old hall 3, previously used for domestic arrivals and departures, to reconstruct a new hall to be used for international arrivals. Terminal 1 is locally known as the "Old Airport," although its facilities were recently given a complete overhaul and are newer than those of Terminal 2, which is still known as the "New Airport."

-

Features of the scenery

-

Gmax models of main buildings

-

The Armi Project scenery features Gmax models of all main buildings at Cairo International Airport, such as the terminal, control tower, cargo and military hangars and more. The models are accurate and detailed, with realistic textures and shadows. The jetways are also modeled with Gmax and can be moved with CTRL+J.

-

Detailed jetways and bridges

-

The scenery also features detailed jetways and bridges that connect the concourses to the terminal. The jetways have photo real textures and custom animations. The bridges have transparent windows that allow you to see inside them.

-

Photo real texture and custom ground texture

-

The ground texture of the scenery is based on satellite photos that give you a realistic feeling of being at the airport. The texture is custom made with lines, taxiways and taxi lights. The aprons have realistic markings and signs that help you navigate around the airport.

-

Animated skytrain and static objects

-

The scenery also features an animated skytrain that runs behind the concourses. The skytrain has realistic sounds and movements that add life to the airport. The scenery also has static objects such as local ground service equipment, cars and planes that populate the airport.

-

Surrounding area and landmarks

-

The scenery also covers the surrounding area of the airport, including military hangars, fuel tank facilities, VIP terminal, royal terminal, cargo bay and more. The scenery also includes some landmarks near the airport such as a major hotel (the Sheraton), a mosque and a pyramid.

-

Performance and compatibility

-

System requirements

-

The system requirements for this scenery are:

-

-

armi project heca scenery for fs2004
-cairo international airport fs2004 download
-armi project cairo airport review
-heca airport code fs2004
-armi project fs2004 airports
-cairo international airport scenery fs9
-armi project heca fsx
-fs2004 cairo airport update
-armi project egypt airports fs2004
-heca airport charts fs2004
-armi project cairo international v2 fs2004
-fs2004 cairo photoreal scenery
-armi project heca p3d
-fs2004 cairo airport freeware
-armi project fs9 scenery
-cairo international airport terminal 2 fs2004
-armi project heca x-plane
-fs2004 cairo airport traffic
-armi project egypt fs2004
-heca airport map fs2004
-armi project cairo international v3 fs2004
-fs2004 cairo mesh scenery
-armi project heca afcad
-fs2004 cairo airport lights
-armi project fsx scenery
-cairo international airport terminal 3 fs2004
-armi project heca crack
-fs2004 cairo airport weather
-armi project egyptian airports pack fs2004
-heca airport elevation fs2004
-armi project cairo international v1 fs2004
-fs2004 cairo landmarks scenery
-armi project heca patch
-fs2004 cairo airport runway length
-armi project p3d scenery
-cairo international airport terminal 1 fs2004
-armi project heca manual
-fs2004 cairo airport ils frequency
-armi project egyptian airports bundle fs2004
-heca airport name fs2004
-armi project cairo international v4 fs2004
-fs2004 cairo vector scenery
-armi project heca update
-fs2004 cairo airport taxiway signs
-armi project x-plane scenery
-cairo international airport terminal 5 fs2004
-armi project heca serial number
-fs2004 cairo airport atis frequency
-armi project egyptian airports collection fs2004
-heca airport location fs2004

-

Frame rate and VAS usage

-

The frame rate of this scenery is very good considering the amount of detail and objects it has. The VAS usage is also reasonable and does not cause any out-of-memory errors. However, you might want to adjust your settings according to your system specifications to get the best performance.

-

Compatibility with other addons

-

The scenery is compatible with most addons that enhance FS2004, such as mesh, landclass, weather, traffic etc. However, you might need to disable some conflicting files or adjust some settings to avoid any issues.

-

Conclusion

-

Pros and cons

-

The pros of this scenery are:

-

-

The cons of this scenery are:

-

-

Rating and recommendation

-

I would rate this scenery 4 out of 5 stars. It is a very good representation of Cairo International Airport for FS2004 that offers a lot of features, realism and detail. It is also well optimized for performance and compatibility. However, it also suffers from some limitations that are inherent to FS2004 itself. Therefore, I would recommend this scenery to anyone who still uses FS2004 and wants to fly to or from Cairo International Airport.

- - - - - - - - - - - - -
FeatureRating (out of 5)
Gmax models of main buildings5
Detailed jetways and bridges5
Photo real texture and custom ground texture5
Animated skytrain and static objects5
Surrounding area and landmarks5
Performance 4
Compatibility 4
Total 33/40 = 82.5%
Average 4/5 = 80%
Rounded 4 stars ⭐⭐⭐⭐
- FAQs: Q: Where can I buy this scenery? A: You can buy this scenery from simMarket.com for €17. Q: How do I install this scenery? A: You can install this scenery by running the setup.exe file that comes with the download. Q: How do I activate this scenery? A: You can activate this scenery by entering your email address and serial number that you received after purchasing. Q: How do I uninstall this scenery? A: You can uninstall this scenery by running the uninstall.exe file that comes with the download. Q: How do I contact Armi Project for support or feedback? A: You can contact Armi Project by sending an email to vtbs_support@hotmail.com.

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Ezycracks.com How to Crack Any Software in Minutes.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Ezycracks.com How to Crack Any Software in Minutes.md deleted file mode 100644 index f273b7f9971eb48cd1aa1e4f8f7d72deceb6035d..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Ezycracks.com How to Crack Any Software in Minutes.md +++ /dev/null @@ -1,28 +0,0 @@ - -```html -

How to Find the Best Software Cracks on Ezycracks.com

-

If you are looking for a way to use premium software without paying for a license, you might be interested in ezycracks.com. This website offers a huge collection of software cracks, patches, keygens, and serial keys for various applications and games. You can download them for free and enjoy the full features of your favorite software.

-

However, not all software cracks are created equal. Some of them might be outdated, infected with malware, or not compatible with your system. That's why you need to be careful when choosing a software crack from ezycracks.com. Here are some tips to help you find the best software cracks on this website.

-

ezycracks.com


Downloadhttps://byltly.com/2uKxEa



- -

By following these tips, you can find the best software cracks on ezycracks.com and enjoy using premium software for free. However, you should also be aware of the risks and legal issues that come with using software cracks. Software piracy is illegal and unethical, and it can harm the developers and the industry. You should always support the original creators of the software by buying a legitimate license if you can afford it.

-``` - -```html -

How to Use Software Cracks Safely and Effectively

-

Using software cracks from ezycracks.com can be a great way to save money and access premium features. However, you should also be careful and responsible when using them. Here are some tips to help you use software cracks safely and effectively.

- -

By following these tips, you can use software cracks from ezycracks.com safely and effectively. However, you should also remember that using software cracks is not a long-term solution. You should always respect the rights and efforts of the software developers and buy a genuine license if you can afford it.

-```

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Windows Mobile 7 Samsung Omnia i900 Get Ready for a Faster and Smoother Experience.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Windows Mobile 7 Samsung Omnia i900 Get Ready for a Faster and Smoother Experience.md deleted file mode 100644 index 8a7c12a86909f1a3b7dec17f1751b701f1ce4a29..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Windows Mobile 7 Samsung Omnia i900 Get Ready for a Faster and Smoother Experience.md +++ /dev/null @@ -1,175 +0,0 @@ - -

How to Free Download Windows Mobile 7 for Samsung Omnia i900

-

Are you looking for a way to upgrade your Samsung Omnia i900 to a newer and better operating system? If so, you might be interested in Windows Mobile 7, the latest version of Microsoft's mobile OS that offers a sleek and intuitive user interface, enhanced performance and security, and a rich selection of apps and games. In this article, we will show you how to free download Windows Mobile 7 for Samsung Omnia i900 and how to install it on your device. We will also give you some tips and tricks on how to make the most out of your new OS.

-

What is Windows Mobile 7?

-

Windows Mobile 7 is the seventh generation of Microsoft's mobile operating system that was released in October 2010. It is designed to provide a seamless integration with other Microsoft products and services, such as Windows Live, Xbox Live, Zune, Bing, Office, etc. It also features a new user interface called Metro, which consists of colorful tiles that display live information and notifications. Windows Mobile 7 also supports multitouch gestures, voice commands, social networking integration, cloud computing, and more.

-

free download windows mobile 7 samsung omnia i900


Download ……… https://byltly.com/2uKvm1



-

What is Samsung Omnia i900?

-

Samsung Omnia i900 is a smartphone that was released in June 2008. It runs on Windows Mobile 6.1 Professional and has a 3.2-inch touchscreen display with a resolution of 240 x 400 pixels. It also has a 5-megapixel camera with autofocus and flash, a microSD card slot, Wi-Fi, Bluetooth, GPS, FM radio, and a stylus. It has a battery capacity of 1440 mAh and a weight of 122 grams.

-

Why upgrade to Windows Mobile 7?

-

If you are still using Windows Mobile 6.1 on your Samsung Omnia i900, you might be missing out on some of the advantages that Windows Mobile 7 can offer. Here are some of the reasons why you should consider upgrading:

- -

How to free download Windows Mobile 7 for Samsung Omnia i900?

-

If you are ready to upgrade your Samsung Omnia i900 to Windows Mobile 7, you will need to follow these steps:

-

How to get windows mobile 7 on samsung omnia i900 for free
-Samsung omnia i900 windows mobile 7 upgrade download link
-Windows mobile 7 rom for samsung omnia i900 free installation guide
-Samsung omnia i900 windows mobile 7 firmware update free download
-Free windows mobile 7 apps for samsung omnia i900
-Samsung omnia i900 windows mobile 7 themes free download
-Windows mobile 7 launcher for samsung omnia i900 free apk
-Samsung omnia i900 windows mobile 7 drivers free download
-Windows mobile 7 emulator for samsung omnia i900 free software
-Samsung omnia i900 windows mobile 7 games free download
-Windows mobile 7 custom rom for samsung omnia i900 free flash tool
-Samsung omnia i900 windows mobile 7 review and features
-Windows mobile 7 tips and tricks for samsung omnia i900 users
-Samsung omnia i900 windows mobile 7 comparison with other devices
-Windows mobile 7 backup and restore for samsung omnia i900 free tutorial
-Samsung omnia i900 windows mobile 7 battery life and performance
-Windows mobile 7 sync and transfer for samsung omnia i900 free app
-Samsung omnia i900 windows mobile 7 camera and video quality
-Windows mobile 7 security and privacy for samsung omnia i900 free settings
-Samsung omnia i900 windows mobile 7 keyboard and input options
-Windows mobile 7 widgets and shortcuts for samsung omnia i900 free customization
-Samsung omnia i900 windows mobile 7 browser and internet speed
-Windows mobile 7 email and messaging for samsung omnia i900 free setup
-Samsung omnia i900 windows mobile 7 contacts and calendar management
-Windows mobile 7 maps and navigation for samsung omnia i900 free offline mode
-Samsung omnia i900 windows mobile 7 music and video player features
-Windows mobile 7 radio and podcast for samsung omnia i900 free streaming
-Samsung omnia i900 windows mobile 7 social media and networking apps
-Windows mobile 7 news and weather for samsung omnia i900 free updates
-Samsung omnia i900 windows mobile 7 productivity and office tools
-Windows mobile 7 ebook and pdf reader for samsung omnia i900 free download
-Samsung omnia i900 windows mobile 7 photo and video editor apps
-Windows mobile 7 file manager and explorer for samsung omnia i900 free access
-Samsung omnia i900 windows mobile 7 calculator and converter tools
-Windows mobile 7 clock and alarm for samsung omnia i900 free customization
-Samsung omnia i900 windows mobile 7 voice recorder and memo app
-Windows mobile 7 flashlight and compass for samsung omnia i900 free utility
-Samsung omnia i900 windows mobile 7 qr code and barcode scanner app
-Windows mobile 7 remote control and tv guide for samsung omnia i900 free app
-Samsung omnia i900 windows mobile 7 fitness and health tracker apps
-Windows mobile 7 travel and local guide for samsung omnia i900 free app
-Samsung omnia i900 windows mobile 7 shopping and coupon apps
-Windows mobile 7 education and learning for samsung omnia i900 free app
-Samsung omnia i900 windows mobile 7 fun and entertainment apps
-Windows mobile 7 lifestyle and personalization for samsung omnia i900 free app
-Samsung omnia i900 windows mobile 7 troubleshooting and support forum
-Windows mobile 7 developer and modding for samsung omnia i900 free resources
-Samsung omnia i900 windows mobile 7 specifications and price list
-Windows mobile 7 history and evolution for samsung omnia i900 free article

-

Step 1: Backup your data

-

Before you start the upgrade process, you should backup your data on your phone. This includes your contacts, messages, photos, videos, music, documents, etc. You can use various methods to backup your data, such as syncing with your PC or using online services like Google Drive or Dropbox.

-

Step 2: Download the Windows Mobile 7 ROM

-

The next step is to download the Windows Mobile 7 ROM for Samsung Omnia i900. A ROM is a file that contains the operating system and other software for your device. You can find various sources online where you can download the ROM for free. One of them is this forum thread, where you can find links to different versions of the ROM.

-

Make sure you download the ROM that matches your device model and region. Also make sure you scan the ROM for viruses before installing it.

-

Step 3: Flash the Windows Mobile 7 ROM

-

The final step is to flash the Windows Mobile I'll try to continue the article.

The final step is to flash the Windows Mobile 7 ROM on your Samsung Omnia i900. Flashing means installing the ROM on your device's memory, replacing the existing OS. To flash the Windows Mobile 7 ROM, you will need a PC and a USB cable. Here are the steps to follow:

-
    -
  1. Connect your Samsung Omnia i900 to your PC using the USB cable.
  2. -
  3. Run the Windows Phone Image Designer tool on your PC. You can download it from this link.
  4. -
  5. Select "Flash a Windows Phone image onto your phone" and click next.
  6. -
  7. Select your device from the list and click next.
  8. -
  9. Browse to the folder where you downloaded the Windows Mobile 7 ROM and select it.
  10. -
  11. Click next and confirm that you want to flash the ROM.
  12. -
  13. Wait for the flashing process to complete. It may take several minutes.
  14. -
  15. When the flashing is done, your phone will reboot automatically.
  16. -
-

Step 4: Enjoy your new OS

-

Congratulations! You have successfully upgraded your Samsung Omnia i900 to Windows Mobile 7. You can now enjoy all the features and benefits of your new OS. You can customize your home screen, sync your data with Microsoft services, use voice commands and gestures, download apps from the Marketplace, and more.

-

Tips and tricks for using Windows Mobile 7 on Samsung Omnia i900

-

To help you get started with Windows Mobile 7 on your Samsung Omnia i900, here are some tips and tricks that you can use:

-

Tip 1: Customize your home screen

-

Your home screen is where you can access your most frequently used apps and settings. You can customize it by changing the tiles, colors, and themes. To do so, follow these steps:

- -

Tip 2: Sync your data with Microsoft services

-

One of the advantages of Windows Mobile 7 is that it integrates well with other Microsoft products and services, such as Outlook, OneDrive, Office, etc. You can sync your contacts, calendar, email, photos, documents, etc. with these services and access them from any device. To do so, follow these steps:

- -

Tip 3: Use voice commands and gestures

-

Windows Mobile 7 supports voice commands and gestures that allow you to control your phone without touching it. You can use voice commands to make calls, send texts, search the web, open apps, etc. You can use gestures to answer calls, mute calls, snooze alarms, etc. To do so, follow these steps:

- -

Tip 4: Download apps from the Marketplace

-

Windows Mobile 7 has a large and diverse collection of apps that you can download from the Marketplace. You can find apps for productivity I'll try to continue the article.

Windows Mobile 7 has a large and diverse collection of apps that you can download from the Marketplace. You can find apps for productivity, entertainment, education, health, finance, and more. Here are some of the best apps for Windows Mobile 7 that you should try:

-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - I'll try to continue the article.

Tip 5: Update your phone regularly

-

Windows Mobile 7 is no longer supported by Microsoft, which means it won't receive any new features or security updates. However, you can still check for any available updates that you might have missed before. To do so, follow these steps:

- -

Conclusion

-

Windows Mobile 7 is a great operating system that can give your Samsung Omnia i900 a new lease of life. It has a beautiful and user-friendly interface, a fast and smooth performance, a high level of security, and a wide range of apps and games. In this article, we showed you how to free download Windows Mobile 7 for Samsung Omnia i900 and how to install it on your device. We also gave you some tips and tricks on how to make the most out of your new OS. We hope you found this article helpful and informative. If you have any questions or feedback, feel free to leave a comment below.

-

FAQs

-

Here are some frequently asked questions about Windows Mobile 7 and Samsung Omnia i900:

-
    -
  1. Will Windows Mobile 7 work on any Samsung Omnia model?
    No, Windows Mobile 7 will only work on Samsung Omnia i900. Other models, such as Samsung Omnia i910 or Samsung Omnia II, are not compatible with Windows Mobile 7.
  2. -
  3. Will I lose any data or settings when I upgrade to Windows Mobile 7?
    Yes, upgrading to Windows Mobile 7 will erase all your data and settings on your Samsung Omnia i900. That's why it's important to backup your data before you start the upgrade process.
  4. -
  5. Can I downgrade back to Windows Mobile 6.1 if I don't like Windows Mobile 7?
    Yes, you can downgrade back to Windows Mobile 6.1 if you want to. You will need to flash the original Windows Mobile 6.1 ROM on your Samsung Omnia i900 using the same method as flashing the Windows Mobile 7 ROM.
  6. -
  7. Can I use Google services on Windows Mobile 7?
    Yes, you can use Google services on Windows Mobile 7, such as Gmail, Google Maps, Google Drive, etc. You will need to download the Google apps from the Marketplace or use the web browser to access them.
  8. -
  9. Can I use dual SIM cards on Samsung Omnia i900?
    No, Samsung Omnia i900 does not support dual SIM cards. It only has one SIM card slot.
  10. -
-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free LINK Winzip Full Version Download.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free LINK Winzip Full Version Download.md deleted file mode 100644 index f6b35e8c8413664535c4ce6348619412942b4155..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free LINK Winzip Full Version Download.md +++ /dev/null @@ -1,19 +0,0 @@ -
-

How to Get Free WinZip Full Version Download for Windows 10

-

WinZip is one of the most popular and trusted software for compressing and decompressing files. It can help you save disk space, reduce file transfer time, and protect your files with encryption and password. WinZip supports various formats, such as ZIP, RAR, 7Z, TAR, GZIP, and more. However, WinZip is not a free software, and you need to pay a license fee to use it without any limitations.

-

That's why some people look for free WinZip full version download for Windows 10 online. They want to enjoy the benefits of WinZip without spending any money. However, this is not a good idea. Downloading WinZip from unofficial sources can expose you to various risks and problems. Here are some of them:

-

free winzip full version download


Download File ✓✓✓ https://byltly.com/2uKA0F



- -

Therefore, we do not recommend using free WinZip full version download for Windows 10. Instead, we suggest you to try some of the legitimate ways to get WinZip for free or at a lower cost. Here are some of them:

- -

We hope this article has helped you understand why you should avoid using free WinZip full version download for Windows 10 and what are some of the alternatives you can try. Remember that using pirated software is not only unethical but also dangerous. It is better to use legal and safe ways to get WinZip and enjoy its advantages.

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/GTA 5 Key How to Access the Most Epic Game Ever.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/GTA 5 Key How to Access the Most Epic Game Ever.md deleted file mode 100644 index 2495075d0f08ebac270943a3e9f6903a1c5e0821..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/GTA 5 Key How to Access the Most Epic Game Ever.md +++ /dev/null @@ -1,14 +0,0 @@ -
-

How to Get a GTA 5 Key for Free

-

GTA 5 is one of the most popular and successful video games of all time. It offers an immersive open-world experience, where you can explore the city of Los Santos and its surrounding areas, engage in various missions and activities, and customize your character and vehicles. GTA 5 also has an online mode, where you can play with other players from around the world, join crews, participate in heists, races, deathmatches, and more.

-

However, GTA 5 is not a cheap game. It usually costs around $60 on various platforms, such as Steam, Epic Games Store, PlayStation Store, and Xbox Store. If you want to play GTA 5 without spending a dime, you might be wondering if there is a way to get a GTA 5 key for free.

-

crack gta 5 key


DOWNLOADhttps://byltly.com/2uKzvP



-

The answer is yes, but it is not easy or guaranteed. There are some methods that might work for you, but they also come with some risks and drawbacks. Here are some of the ways you can try to get a GTA 5 key for free:

- -

In conclusion, getting a GTA 5 key for free is possible but not easy or safe. You might end up wasting your time, money, or security by trying some of the methods mentioned above. The best way to enjoy GTA 5 is to buy it from a legitimate source when it is on sale or discounted. This way, you can support the developers and publishers of the game and have a smooth and hassle-free gaming experience.

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Commodore 64 Roms Pack !!LINK!! Download.md b/spaces/1gistliPinn/ChatGPT4/Examples/Commodore 64 Roms Pack !!LINK!! Download.md deleted file mode 100644 index 2b6271fa7961a6e5368ea40156e9249d2c5db083..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Commodore 64 Roms Pack !!LINK!! Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

Commodore 64 roms pack download


Download Zip » https://imgfil.com/2uxXO7



- -We offer fast servers so you can Download N64 ROMs and start playing ... I've been using the 188 rom pack from EWJ for quite awhile. ... COM is a C64 site dedicated to just about everything that is connected to the Commodore 64 (C64). 1fdad05405
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Download Script Frost Dragon Okolnir Elfbot WORK.md b/spaces/1gistliPinn/ChatGPT4/Examples/Download Script Frost Dragon Okolnir Elfbot WORK.md deleted file mode 100644 index b2328a702ee261965b683a91fd10e506c105acbd..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Download Script Frost Dragon Okolnir Elfbot WORK.md +++ /dev/null @@ -1,6 +0,0 @@ -

download script frost dragon okolnir elfbot


DOWNLOAD ✶✶✶ https://imgfil.com/2uy1su



-
-Programming can elf scripts be posted there ? :). Reply ... We Should not support bots, or download & run crap. ... Try Okolnir frost dragons. 1fdad05405
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Free Download Myob Accounting Versi 17 Full 32 Fixed.md b/spaces/1gistliPinn/ChatGPT4/Examples/Free Download Myob Accounting Versi 17 Full 32 Fixed.md deleted file mode 100644 index 2c214f0df5a29613ff3aeffa35b7692e2f05f6bf..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Free Download Myob Accounting Versi 17 Full 32 Fixed.md +++ /dev/null @@ -1,6 +0,0 @@ -

Free Download Myob Accounting Versi 17 Full 32


Download ::: https://imgfil.com/2uy1JS



-
- 3cee63e6c2
-
-
-

diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Attack on Titan 2 Final Battle - The Ultimate Challenge for Fans of the Anime.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Attack on Titan 2 Final Battle - The Ultimate Challenge for Fans of the Anime.md deleted file mode 100644 index 043c45b83af69ad01714abe5a2d2957dd77cb19e..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Attack on Titan 2 Final Battle - The Ultimate Challenge for Fans of the Anime.md +++ /dev/null @@ -1,129 +0,0 @@ - -

Attack on Titan Game: A Guide for Fans and Newcomers

-

If you are a fan of the hit anime and manga series Attack on Titan, or if you are curious about what it is all about, you might want to check out Attack on Titan Game, a thrilling action game based on the popular franchise. In this article, we will give you a comprehensive guide on what Attack on Titan is, what Attack on Titan Game is, why you should play it, and where you can get it. Whether you are a seasoned fan or a newcomer, this article will help you enjoy Attack on Titan Game more.

-

What is Attack on Titan?

-

Attack on Titan is a Japanese anime and manga series created by Hajime Isayama. It is set in a world where humanity lives inside walled cities to protect themselves from giant humanoid creatures called Titans, who devour humans without reason. The story follows Eren Yeager, a young boy who vows to exterminate all Titans after his mother is killed by one. He joins the Survey Corps, an elite military unit that fights Titans outside the walls, along with his friends Mikasa Ackerman and Armin Arlert.

-

attack on titan game


Download Ziphttps://urlin.us/2uSZB8



-

The story and the characters of the anime and manga

-

The anime and manga series of Attack on Titan have been praised for their gripping story, complex characters, and stunning visuals. The series has four seasons of anime adaptation, with the final season currently airing. The manga has 34 volumes as of June 2021, with the final chapter published in April 2021. The series has won several awards, such as the Kodansha Manga Award, the Harvey Award, and the Micheluzzi Award.

-

The series has a large cast of characters, each with their own personality, backstory, and motivation. Some of the main characters are:

- -

The themes and the messages of the series

-

Attack on Titan explores various themes and messages, such as freedom, oppression, war, morality, identity, loyalty, betrayal, revenge, hope, despair, and more. The series challenges its characters and its audience to question their beliefs, values, and actions in a cruel and complex world. The series also shows how humans can overcome their fears and limitations by fighting for their ideals and dreams.What is Attack on Titan Game? -

Attack on Titan Game is a video game based on the anime and manga series of the same name. It is developed by Omega Force, a subsidiary of Koei Tecmo, and published by Koei Tecmo in Japan and by Tecmo Koei America in North America and Europe. The game was released for PlayStation 3, PlayStation 4, PlayStation Vita, Xbox One, and Microsoft Windows in 2016, and for Nintendo Switch in 2018.

-

The gameplay and the features of the game

-

The game is an action game that lets you play as various characters from the series, such as Eren, Mikasa, Levi, Hange, and more. You can also create your own custom character and join the Survey Corps. The game follows the story of the anime and manga from the beginning until the end of season one, with some original scenarios and characters added. You can also play online co-op missions with up to four players.

-

The game's main feature is the omni-directional mobility gear (ODM), which allows you to swing around the environment and attack Titans with your blades. You can target different parts of a Titan's body, such as the arms, legs, eyes, or nape, and sever them to weaken or kill them. You can also use items such as gas canisters, blades, guns, bombs, and traps to aid you in combat. You have to manage your resources carefully, as running out of gas or blades can leave you vulnerable.

-

The game also has a town mode, where you can interact with other characters, upgrade your equipment, buy items, and access side missions. You can also view your stats, achievements, gallery, and encyclopedia in this mode.

-

The differences and the similarities between the game and the anime/manga

-

The game is faithful to the anime and manga in terms of the story, the characters, the visuals, and the sound. The game uses cel-shaded graphics to recreate the style of the anime, and features voice acting from the original cast. The game also uses music from the anime's soundtrack, composed by Hiroyuki Sawano.

-

The game also adds some new elements that are not present in the anime or manga. For example, the game introduces some original characters that are part of your squad, such as Ian Dietrich, Rico Brzenska, Mitabi Jarnach, and Gelgar. The game also has some original scenarios that expand on the events of the anime or manga, such as a mission where you have to rescue civilians from a Titan-infested town.

-

Attack on Titan / A.O.T. Wings of Freedom on Steam
-Attack on Titan 2 - A.O.T.2 - Demo Download
-List of Attack Mode Missions (Attack on Titan Game)
-Attack on Titan 2: Final Battle Upgrade Pack
-Attack on Titan Tribute Game by Feng
-Attack on Titan Tactics - Mobile Strategy Game
-Attack on Titan: Humanity in Chains for Nintendo 3DS
-Attack on Titan VR by Kosma - Oculus Quest
-Attack on Titan: Assault - RPG Runner Game
-Attack on Titan: The Last Stand - Board Game
-Attack on Titan: Escape from Certain Death for Nintendo Switch
-Attack on Titan: No Regrets - Visual Novel Game
-Attack on Titan: Lost Girls - Interactive Video Game
-Attack on Titan: Before the Fall - Online Game
-Attack on Titan: Junior High - Mini Game Collection
-Attack on Titan: The Anime Guide - Official Game Book
-Attack on Titan: The Harsh Mistress of the City - Text Adventure Game
-Attack on Titan: Chronicle - Movie Tie-in Game
-Attack on Titan: Wings of Counterattack Online - Browser Game
-Attack on Titan: Roar to Freedom - Mobile Simulation Game
-Attack on Titan: End of the World - Live Action Game
-Attack on Titan: Guren no Yumiya - Arcade Game
-Attack on Titan: Shichi Kara no Dasshutsu - Escape Room Game
-Attack on Titan: Team Battle - Multiplayer Online Game
-Attack on Titan: Brave Order - Mobile RPG Game
-Attack on Titan: The Final Season - Anime Streaming Game
-Attack on Titan: Beyond the Wall - Mobile Card Game
-Attack on Titan: Shadow of Freedom - Fan-made Game
-Attack on Titan: Birth of Levi - Spin-off Game
-Attack on Titan: Wall Sina, Goodbye - Side Story Game
-Attack on Titan: Clash of Titans - Mobile Action Game
-Attack on Titan: Dawn of Humanity - VR Experience Game
-Attack on Titan: Crimson Bow and Arrow - Movie Quiz Game
-Attack on Titan: The Real - Universal Studios Japan Game
-Attack on Titan: Spoof on Titan - Parody Game
-Attack on Titan: Colossal Edition - Manga Box Set Game
-Attack on Titan: Original Soundtrack - Music Album Game
-Attack on Titan: Garrison Regiment Training Camp - VR Training Game
-Attack on Titan: Survey Corps Expedition - VR Exploration Game
-Attack on Titan: Military Police Brigade Investigation - VR Mystery Game
-Attack on Titan: Levi vs Beast Titan - VR Battle Game
-Attack on Titan: Eren's Basement Key - VR Puzzle Game
-Attack on Titan: Mikasa's Scarf - VR Romance Game
-Attack on Titan: Armin's Colossal Plan - VR Strategy Game
-Attack on Titan: Erwin's Sacrifice - VR Drama Game
-Attack on Titan: Hange's Experiments - VR Science Game
-Attack on Titan: Sasha's Potato Snack - VR Cooking Game

-

The game also has some differences from the anime or manga in terms of the gameplay. For example, the game allows you to play as characters that are not playable in the anime or manga, such as Hange or Erwin. The game also gives you more freedom in how you approach each mission, as you can choose your own route and strategy. The game also has some features that are not realistic or consistent with the anime or manga's logic, such as being able to use guns or bombs against Titans.

Why should you play Attack on Titan Game?

-

If you are a fan of Attack on Titan, playing Attack on Titan Game is a great way to experience the story and the world of the series in a new and immersive way. You can relive the epic moments of the anime and manga, such as the fall of Shiganshina, the battle of Trost, the female Titan chase, and more. You can also explore the details and the secrets of the series, such as the history of the walls, the origin of the Titans, and the identity of the enemy.

-

If you are new to Attack on Titan, playing Attack on Titan Game is a great way to get introduced to the series and its characters. You can learn about the plot and the setting of the series, as well as the personalities and the relationships of the characters. You can also enjoy the action and the thrill of fighting Titans, as well as the drama and the emotion of the story.

-

The benefits and the challenges of playing the game

-

Playing Attack on Titan Game has many benefits, such as:

- -

Playing Attack on Titan Game also has some challenges, such as:

- -

The tips and the tricks for enjoying the game more

-

To enjoy Attack on Titan Game more, here are some tips and tricks that you can follow:

- -

Where can you get Attack on Titan Game?

-

Attack on Titan Game is available for various platforms and devices, such as PlayStation 3, PlayStation 4, PlayStation Vita, Xbox One, Microsoft Windows, and Nintendo Switch. You can buy or download the game from different sources, such as online stores, physical stores, or official websites. Here is a table that shows some examples of where you can get Attack on Titan Game, along with their prices and discounts:

-
AppDescription
WhatsAppA popular messaging app that lets you chat, call, and share media with your contacts for free.
SkypeA video calling app that lets you connect with your friends and family across the world.
FacebookThe official app for the social media giant that lets you stay in touch with your friends, post updates, check news, and more.
InstagramA photo-sharing app that lets you capture and edit your moments, follow your favorite celebrities, and discover new trends.
TwitterA micro-blogging app that lets you follow the latest news, opinions, and trends from around the world.
ViberA messaging and calling app that lets you communicate with your contacts for free, with features like group chats, stickers, and voice messages.
BingA search engine app that lets you find what you need on the web, with features like voice search, image search, maps, and more.
ZuneA music and video app that lets you enjoy your favorite tunes and shows, with features like playlists, podcasts, radio, and more.
Xbox LiveA gaming app that lets you play high-quality games on your phone, with features like achievements, leaderboards, multiplayer, and more.
Office MobileA productivity app that lets you create and edit documents, spreadsheets, and presentations on your phone.
- - - - - - -
Platform/DeviceSourcePriceDiscount
PlayStation 4Amazon.com$29.99$10.00 (25% off)
Xbox OneMicrosoft Store$59.99$0.00 (0% off)
Nintendo SwitchNintendo eShop$59.99$0.00 (0% off)
Microsoft WindowsSteam$59.99$17.99 (70% off)
PlayStation VitaPlayStation Store
-

Conclusion

-

Attack on Titan Game is a game that every fan of Attack on Titan should play, and every newcomer should try. It is a game that lets you experience the story and the world of the series in a new and immersive way. It is a game that challenges you to fight Titans and survive in a cruel and complex world. It is a game that entertains you and makes you happy, as well as frustrates you and stresses you out. It is a game that has many benefits and challenges, as well as tips and tricks for enjoying it more. It is a game that is available for various platforms and devices, at different prices and discounts.

-

If you are interested in playing Attack on Titan Game, you can get it from the sources listed above, or from other sources that you prefer. You can also check out the official website of the game, or the official social media accounts of the game, for more information and updates. You can also watch the trailer of the game, or read some reviews of the game, to get a better idea of what it is like.

-

Whether you are a fan or a newcomer, we hope that this article has helped you learn more about Attack on Titan Game, and that you will enjoy playing it. Thank you for reading, and have fun!

-

Frequently Asked Questions

-

Here are some frequently asked questions about Attack on Titan Game, along with their answers:

-
    -
  1. Is Attack on Titan Game suitable for children?
  2. -

    Attack on Titan Game is rated M for Mature by the ESRB, 18 by PEGI, and Z by CERO. This means that the game contains violence, blood, gore, and language that may not be appropriate for children. The game also deals with dark and mature themes that may be disturbing or upsetting for some players. Therefore, we recommend that parents or guardians supervise their children if they want to play the game, or avoid the game altogether if they are not comfortable with its content.

    -
  3. How long does it take to finish Attack on Titan Game?
  4. -

    The length of Attack on Titan Game depends on how you play it, and how much content you want to explore. According to HowLongToBeat.com, the average time to complete the main story of the game is about 10 hours, while the average time to complete all the extra content of the game is about 25 hours. However, your time may vary depending on your skill level, your difficulty setting, your pace, and your choices.

    -
  5. Does Attack on Titan Game have multiplayer mode?
  6. -

    Attack on Titan Game has online co-op mode, where you can play with up to three other players in various missions. You can either join a random lobby, or create your own lobby and invite your friends. You can also chat with other players using voice or text messages. However, the game does not have local co-op mode or competitive mode.

    -
  7. Does Attack on Titan Game have DLCs or updates?
  8. -

    Attack on Titan Game has several DLCs or downloadable content that you can purchase separately or as part of a season pass. These DLCs include additional costumes, weapons, scenarios, characters, and modes. The game also has free updates that fix bugs, improve performance, and add new features.

    -
  9. Does Attack on Titan Game have any sequels or spin-offs?
  10. -

    Attack on Titan Game has a sequel called Attack on Titan 2, which was released in 2018. The sequel covers the events of season two and three of the anime, as well as some original content. The sequel also has improved graphics, gameplay, and customization options. The sequel also has a spin-off called Attack on Titan 2: Final Battle, which was released in 2019. The spin-off adds more content from season three of the anime, as well as new modes and features.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/2023Liu2023/bingo/src/pages/api/blob.ts b/spaces/2023Liu2023/bingo/src/pages/api/blob.ts deleted file mode 100644 index fecd48031916b2284b8958892196e0a1ad420421..0000000000000000000000000000000000000000 --- a/spaces/2023Liu2023/bingo/src/pages/api/blob.ts +++ /dev/null @@ -1,40 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { Readable } from 'node:stream' -import { fetch } from '@/lib/isomorphic' - -const API_DOMAIN = 'https://www.bing.com' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const { bcid } = req.query - - const { headers, body } = await fetch(`${API_DOMAIN}/images/blob?bcid=${bcid}`, - { - method: 'GET', - headers: { - "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"", - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": "\"Windows\"", - "Referrer-Policy": "origin-when-cross-origin", - }, - }, - ) - - res.writeHead(200, { - 'Content-Length': headers.get('content-length')!, - 'Content-Type': headers.get('content-type')!, - }) - // @ts-ignore - return Readable.fromWeb(body!).pipe(res) - } catch (e) { - console.log('Error', e) - return res.json({ - result: { - value: 'UploadFailed', - message: `${e}` - } - }) - } -} diff --git a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/tts/ps_adv.py b/spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/tts/ps_adv.py deleted file mode 100644 index fbc1e5133ddf26f2dfac598028b8e3db01ec638e..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/tts/ps_adv.py +++ /dev/null @@ -1,372 +0,0 @@ -import os -import torch -import torch.nn.functional as F -import torch.nn as nn -import numpy as np - -from modules.portaspeech.portaspeech import PortaSpeech -from modules.syntaspeech.multi_window_disc import Discriminator -from tasks.tts.fs2 import FastSpeech2Task -from utils.hparams import hparams -from utils.tts_utils import get_focus_rate, get_phone_coverage_rate, get_diagonal_focus_rate, mel2token_to_dur -from utils import num_params, tensors_to_scalars -from utils.pitch_utils import denorm_f0, norm_f0 -from data_gen.tts.data_gen_utils import get_pitch -from utils.dtw import dtw as DTW - -from utils.plot import spec_to_figure -from utils.text.text_encoder import build_token_encoder - - -class PortaSpeechAdvTask(FastSpeech2Task): - def __init__(self): - super().__init__() - data_dir = hparams['binary_data_dir'] - self.word_encoder = build_token_encoder(f'{data_dir}/word_set.json') - self.build_disc_model() - self.mse_loss_fn = torch.nn.MSELoss() - - def build_tts_model(self): - ph_dict_size = len(self.token_encoder) - word_dict_size = len(self.word_encoder) - self.model = PortaSpeech(ph_dict_size, word_dict_size, hparams) - - self.gen_params = [p for p in self.model.parameters() if p.requires_grad] - self.dp_params = [p for k, p in self.model.named_parameters() if (('dur_predictor' in k) and p.requires_grad)] - self.gen_params_except_dp = [p for k, p in self.model.named_parameters() if (('dur_predictor' not in k) and p.requires_grad)] - self.bert_params = [p for k, p in self.model.named_parameters() if (('bert' in k) and p.requires_grad)] - self.gen_params_except_bert_and_dp = [p for k, p in self.model.named_parameters() if ('dur_predictor' not in k) and ('bert' not in k) and p.requires_grad ] - - self.use_bert = True if len(self.bert_params) > 0 else False - - def build_disc_model(self): - disc_win_num = hparams['disc_win_num'] - h = hparams['mel_disc_hidden_size'] - self.mel_disc = Discriminator( - time_lengths=[32, 64, 128][:disc_win_num], - freq_length=80, hidden_size=h, kernel=(3, 3) - ) - self.disc_params = list(self.mel_disc.parameters()) - - def on_train_start(self): - super().on_train_start() - for n, m in self.model.named_children(): - num_params(m, model_name=n) - if hasattr(self.model, 'fvae'): - for n, m in self.model.fvae.named_children(): - num_params(m, model_name=f'fvae.{n}') - - def _training_step(self, sample, batch_idx, optimizer_idx): - loss_output = {} - loss_weights = {} - disc_start = self.global_step >= hparams["disc_start_steps"] and hparams['lambda_mel_adv'] > 0 - if optimizer_idx == 0: - ####################### - # Generator # - ####################### - loss_output, model_out = self.run_model(sample, infer=False) - self.model_out_gt = self.model_out = \ - {k: v.detach() for k, v in model_out.items() if isinstance(v, torch.Tensor)} - if disc_start: - mel_p = model_out['mel_out'] - if hasattr(self.model, 'out2mel'): - mel_p = self.model.out2mel(mel_p) - o_ = self.mel_disc(mel_p) - p_, pc_ = o_['y'], o_['y_c'] - if p_ is not None: - loss_output['a'] = self.mse_loss_fn(p_, p_.new_ones(p_.size())) - loss_weights['a'] = hparams['lambda_mel_adv'] - if pc_ is not None: - loss_output['ac'] = self.mse_loss_fn(pc_, pc_.new_ones(pc_.size())) - loss_weights['ac'] = hparams['lambda_mel_adv'] - else: - ####################### - # Discriminator # - ####################### - if disc_start and self.global_step % hparams['disc_interval'] == 0: - model_out = self.model_out_gt - mel_g = sample['mels'] - mel_p = model_out['mel_out'] - o = self.mel_disc(mel_g) - p, pc = o['y'], o['y_c'] - o_ = self.mel_disc(mel_p) - p_, pc_ = o_['y'], o_['y_c'] - if p_ is not None: - loss_output["r"] = self.mse_loss_fn(p, p.new_ones(p.size())) - loss_output["f"] = self.mse_loss_fn(p_, p_.new_zeros(p_.size())) - if pc_ is not None: - loss_output["rc"] = self.mse_loss_fn(pc, pc.new_ones(pc.size())) - loss_output["fc"] = self.mse_loss_fn(pc_, pc_.new_zeros(pc_.size())) - total_loss = sum([loss_weights.get(k, 1) * v for k, v in loss_output.items() if isinstance(v, torch.Tensor) and v.requires_grad]) - loss_output['batch_size'] = sample['txt_tokens'].size()[0] - return total_loss, loss_output - - def run_model(self, sample, infer=False, *args, **kwargs): - txt_tokens = sample['txt_tokens'] - word_tokens = sample['word_tokens'] - spk_embed = sample.get('spk_embed') - spk_id = sample.get('spk_ids') - if not infer: - output = self.model(txt_tokens, word_tokens, - ph2word=sample['ph2word'], - mel2word=sample['mel2word'], - mel2ph=sample['mel2ph'], - word_len=sample['word_lengths'].max(), - tgt_mels=sample['mels'], - pitch=sample.get('pitch'), - spk_embed=spk_embed, - spk_id=spk_id, - infer=False, - global_step=self.global_step, - graph_lst=sample['graph_lst'], - etypes_lst=sample['etypes_lst'], - bert_feats=sample.get("bert_feats"), - cl_feats=sample.get("cl_feats") - ) - losses = {} - losses['kl_v'] = output['kl'].detach() - losses_kl = output['kl'] - losses_kl = torch.clamp(losses_kl, min=hparams['kl_min']) - losses_kl = min(self.global_step / hparams['kl_start_steps'], 1) * losses_kl - losses_kl = losses_kl * hparams['lambda_kl'] - losses['kl'] = losses_kl - - self.add_mel_loss(output['mel_out'], sample['mels'], losses) - if hparams['dur_level'] == 'word': - self.add_dur_loss( - output['dur'], sample['mel2word'], sample['word_lengths'], sample['txt_tokens'], losses) - self.get_attn_stats(output['attn'], sample, losses) - else: - super(PortaSpeechAdvTask, self).add_dur_loss(output['dur'], sample['mel2ph'], sample['txt_tokens'], losses) - return losses, output - else: - use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur']) - output = self.model( - txt_tokens, word_tokens, - ph2word=sample['ph2word'], - word_len=sample['word_lengths'].max(), - pitch=sample.get('pitch'), - mel2ph=sample['mel2ph'] if use_gt_dur else None, - mel2word=sample['mel2word'] if use_gt_dur else None, - tgt_mels=sample['mels'], - infer=True, - spk_embed=spk_embed, - spk_id=spk_id, - graph_lst=sample['graph_lst'], - etypes_lst=sample['etypes_lst'], - bert_feats=sample.get("bert_feats"), - cl_feats=sample.get("cl_feats") - ) - return output - - def add_dur_loss(self, dur_pred, mel2token, word_len, txt_tokens, losses=None): - T = word_len.max() - dur_gt = mel2token_to_dur(mel2token, T).float() - nonpadding = (torch.arange(T).to(dur_pred.device)[None, :] < word_len[:, None]).float() - dur_pred = dur_pred * nonpadding - dur_gt = dur_gt * nonpadding - wdur = F.l1_loss((dur_pred + 1).log(), (dur_gt + 1).log(), reduction='none') - wdur = (wdur * nonpadding).sum() / nonpadding.sum() - - if hparams['lambda_word_dur'] > 0: - losses['wdur'] = wdur * hparams['lambda_word_dur'] - if hparams['lambda_sent_dur'] > 0: - sent_dur_p = dur_pred.sum(-1) - sent_dur_g = dur_gt.sum(-1) - sdur_loss = F.l1_loss(sent_dur_p, sent_dur_g, reduction='mean') - losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur'] - - with torch.no_grad(): - # calculate word-level abs_dur_error in micro-second - abs_word_dur_error = F.l1_loss(dur_pred , dur_gt, reduction='none') - abs_word_dur_error = (abs_word_dur_error * nonpadding).sum() / nonpadding.sum() - abs_word_dur_error = abs_word_dur_error * hparams['hop_size'] / hparams['audio_sample_rate'] * 1000 - losses['abs_word_dur_error'] = abs_word_dur_error - # calculate word-level abs_dur_error in second - sent_dur_p = dur_pred.sum(-1) - sent_dur_g = dur_gt.sum(-1) - abs_sent_dur_error = F.l1_loss(sent_dur_p, sent_dur_g, reduction='mean').mean() - abs_sent_dur_error = abs_sent_dur_error * hparams['hop_size'] / hparams['audio_sample_rate'] - losses['abs_sent_dur_error'] = abs_sent_dur_error - - def validation_step(self, sample, batch_idx): - outputs = {} - outputs['losses'] = {} - outputs['losses'], model_out = self.run_model(sample) - outputs['total_loss'] = sum(outputs['losses'].values()) - outputs['nsamples'] = sample['nsamples'] - outputs = tensors_to_scalars(outputs) - if self.global_step % hparams['valid_infer_interval'] == 0 \ - and batch_idx < hparams['num_valid_plots']: - valid_results = self.save_valid_result(sample, batch_idx, model_out) - wav_gt = valid_results['wav_gt'] - mel_gt = valid_results['mel_gt'] - wav_pred = valid_results['wav_pred'] - mel_pred = valid_results['mel_pred'] - f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams) - f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams) - manhattan_distance = lambda x, y: np.abs(x - y) - dist, cost, acc, path = DTW(f0_pred_, f0_gt_, manhattan_distance) - outputs['losses']['f0_dtw'] = dist / len(f0_gt_) - return outputs - - def save_valid_result(self, sample, batch_idx, model_out): - sr = hparams['audio_sample_rate'] - f0_gt = None - mel_out = model_out['mel_out'] - if sample.get('f0') is not None: - f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu()) - self.plot_mel(batch_idx, sample['mels'], mel_out, f0s=f0_gt) - - # if self.global_step > 0: - wav_pred = self.vocoder.spec2wav(mel_out[0].cpu(), f0=f0_gt) - self.logger.add_audio(f'wav_val_{batch_idx}', wav_pred, self.global_step, sr) - # with gt duration - model_out = self.run_model(sample, infer=True, infer_use_gt_dur=True) - dur_info = self.get_plot_dur_info(sample, model_out) - del dur_info['dur_pred'] - wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt) - self.logger.add_audio(f'wav_gdur_{batch_idx}', wav_pred, self.global_step, sr) - self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_gdur_{batch_idx}', - dur_info=dur_info, f0s=f0_gt) - - # with pred duration - if not hparams['use_gt_dur']: - model_out = self.run_model(sample, infer=True, infer_use_gt_dur=False) - dur_info = self.get_plot_dur_info(sample, model_out) - self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_pdur_{batch_idx}', - dur_info=dur_info, f0s=f0_gt) - wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt) - self.logger.add_audio(f'wav_pdur_{batch_idx}', wav_pred, self.global_step, sr) - # gt wav - mel_gt = sample['mels'][0].cpu() - wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt) - if self.global_step <= hparams['valid_infer_interval']: - self.logger.add_audio(f'wav_gt_{batch_idx}', wav_gt, self.global_step, sr) - - # add attn plot - if self.global_step > 0 and hparams['dur_level'] == 'word': - self.logger.add_figure(f'attn_{batch_idx}', spec_to_figure(model_out['attn'][0]), self.global_step) - - return {'wav_gt': wav_gt, 'wav_pred': wav_pred, 'mel_gt': mel_gt, 'mel_pred': model_out['mel_out'][0].cpu()} - - def get_attn_stats(self, attn, sample, logging_outputs, prefix=''): - # diagonal_focus_rate - txt_lengths = sample['txt_lengths'].float() - mel_lengths = sample['mel_lengths'].float() - src_padding_mask = sample['txt_tokens'].eq(0) - target_padding_mask = sample['mels'].abs().sum(-1).eq(0) - src_seg_mask = sample['txt_tokens'].eq(self.seg_idx) - attn_ks = txt_lengths.float() / mel_lengths.float() - - focus_rate = get_focus_rate(attn, src_padding_mask, target_padding_mask).mean().data - phone_coverage_rate = get_phone_coverage_rate( - attn, src_padding_mask, src_seg_mask, target_padding_mask).mean() - diagonal_focus_rate, diag_mask = get_diagonal_focus_rate( - attn, attn_ks, mel_lengths, src_padding_mask, target_padding_mask) - logging_outputs[f'{prefix}fr'] = focus_rate.mean().data - logging_outputs[f'{prefix}pcr'] = phone_coverage_rate.mean().data - logging_outputs[f'{prefix}dfr'] = diagonal_focus_rate.mean().data - - def get_plot_dur_info(self, sample, model_out): - if hparams['dur_level'] == 'word': - T_txt = sample['word_lengths'].max() - dur_gt = mel2token_to_dur(sample['mel2word'], T_txt)[0] - dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt - txt = sample['ph_words'][0].split(" ") - else: - T_txt = sample['txt_tokens'].shape[1] - dur_gt = mel2token_to_dur(sample['mel2ph'], T_txt)[0] - dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt - txt = self.token_encoder.decode(sample['txt_tokens'][0].cpu().numpy()) - txt = txt.split(" ") - return {'dur_gt': dur_gt, 'dur_pred': dur_pred, 'txt': txt} - - def build_optimizer(self, model): - - optimizer_gen = torch.optim.AdamW( - self.gen_params, - lr=hparams['lr'], - betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), - weight_decay=hparams['weight_decay']) - - optimizer_disc = torch.optim.AdamW( - self.disc_params, - lr=hparams['disc_lr'], - betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), - **hparams["discriminator_optimizer_params"]) if len(self.disc_params) > 0 else None - - return [optimizer_gen, optimizer_disc] - - def build_scheduler(self, optimizer): - return [ - FastSpeechTask.build_scheduler(self, optimizer[0]), # Generator Scheduler - torch.optim.lr_scheduler.StepLR(optimizer=optimizer[1], # Discriminator Scheduler - **hparams["discriminator_scheduler_params"]), - ] - - def on_before_optimization(self, opt_idx): - if opt_idx == 0: - nn.utils.clip_grad_norm_(self.dp_params, hparams['clip_grad_norm']) - if self.use_bert: - nn.utils.clip_grad_norm_(self.bert_params, hparams['clip_grad_norm']) - nn.utils.clip_grad_norm_(self.gen_params_except_bert_and_dp, hparams['clip_grad_norm']) - else: - nn.utils.clip_grad_norm_(self.gen_params_except_dp, hparams['clip_grad_norm']) - else: - nn.utils.clip_grad_norm_(self.disc_params, hparams["clip_grad_norm"]) - - def on_after_optimization(self, epoch, batch_idx, optimizer, optimizer_idx): - if self.scheduler is not None: - self.scheduler[0].step(self.global_step // hparams['accumulate_grad_batches']) - self.scheduler[1].step(self.global_step // hparams['accumulate_grad_batches']) - - ############ - # infer - ############ - def test_start(self): - super().test_start() - if hparams.get('save_attn', False): - os.makedirs(f'{self.gen_dir}/attn', exist_ok=True) - self.model.store_inverse_all() - - def test_step(self, sample, batch_idx): - assert sample['txt_tokens'].shape[0] == 1, 'only support batch_size=1 in inference' - outputs = self.run_model(sample, infer=True) - text = sample['text'][0] - item_name = sample['item_name'][0] - tokens = sample['txt_tokens'][0].cpu().numpy() - mel_gt = sample['mels'][0].cpu().numpy() - mel_pred = outputs['mel_out'][0].cpu().numpy() - mel2ph = sample['mel2ph'][0].cpu().numpy() - mel2ph_pred = None - str_phs = self.token_encoder.decode(tokens, strip_padding=True) - base_fn = f'[{batch_idx:06d}][{item_name.replace("%", "_")}][%s]' - if text is not None: - base_fn += text.replace(":", "$3A")[:80] - base_fn = base_fn.replace(' ', '_') - gen_dir = self.gen_dir - wav_pred = self.vocoder.spec2wav(mel_pred) - self.saving_result_pool.add_job(self.save_result, args=[ - wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred]) - if hparams['save_gt']: - wav_gt = self.vocoder.spec2wav(mel_gt) - self.saving_result_pool.add_job(self.save_result, args=[ - wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph]) - if hparams.get('save_attn', False): - attn = outputs['attn'][0].cpu().numpy() - np.save(f'{gen_dir}/attn/{item_name}.npy', attn) - # save f0 for pitch dtw - f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams) - f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams) - np.save(f'{gen_dir}/f0/{item_name}.npy', f0_pred_) - np.save(f'{gen_dir}/f0/{item_name}_gt.npy', f0_gt_) - - print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}") - return { - 'item_name': item_name, - 'text': text, - 'ph_tokens': self.token_encoder.decode(tokens.tolist()), - 'wav_fn_pred': base_fn % 'P', - 'wav_fn_gt': base_fn % 'G', - } diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/share.ts b/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/share.ts deleted file mode 100644 index 4587669a10164aa7c961429fbddec9cf438c0eca..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/share.ts +++ /dev/null @@ -1,7 +0,0 @@ -export function share(url: string, title: string) { - if (navigator.share) { - navigator.share({ url, title }); - } else { - prompt("Copy this public url to share:", url); - } -} diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/ninepatch.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/ninepatch.js deleted file mode 100644 index b312ffb33b47f5751afca1aaeb86be8fe4625db2..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/ninepatch.js +++ /dev/null @@ -1,2 +0,0 @@ -import NinePatch from './gameobjects/rendertexture/ninepatch/NinePatch.js'; -export default NinePatch; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fileselectorbutton/FileSelectorButton.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fileselectorbutton/FileSelectorButton.d.ts deleted file mode 100644 index 82bea2324e4be3ee27df75216da5425642e44321..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fileselectorbutton/FileSelectorButton.d.ts +++ /dev/null @@ -1,45 +0,0 @@ -import Label from '../label/Label'; - -export default FileSelectorButton; - -declare namespace FileSelectorButton { - interface IConfig extends Label.IConfig { - accept?: string, - multiple?: boolean, - } -} - -declare class FileSelectorButton extends Label { - constructor( - scene: Phaser.Scene, - config?: FileSelectorButton.IConfig - ); - - readonly files: File[]; - - setAccept(accept: string): this; - - setMultiple(multiple?: boolean): this; - - loadFile( - file: File, - loaderType: string, - key: string, - cacheType?: string - ): this; - - loadFile( - file: File, - loaderType: string, - key: string, - cacheType?: string, - onComplete?: (data: any) => void - ): this; - - loadFilePromise( - file: File, - loaderType: string, - key: string, - cacheType?: string - ): Promise; -} \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pages/Pages.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pages/Pages.d.ts deleted file mode 100644 index e9f43d2e783d35ca1c1aadce66a3b007a85094b1..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pages/Pages.d.ts +++ /dev/null @@ -1,72 +0,0 @@ -// import * as Phaser from 'phaser'; -import OverlapSizer from '../overlapsizer/OverlapSizer'; - - -export default Pages; - -declare namespace Pages { - - type AlignTypes = number | 'center' | 'left' | 'right' | 'top' | 'bottom' | - 'left-top' | 'left-center' | 'left-bottom' | - 'center-top' | 'center-center' | 'center-bottom' | - 'right-top' | 'right-center' | 'right-bottom'; - - type PaddingTypes = number | - { - left?: number, - right?: number, - top?: number, - bottom?: number, - }; - - interface IConfig extends OverlapSizer.IConfig { - fadeIn?: number, - - swapMode?: 0 | 1 | 'invisible' | 'destroy', - } - -} - -declare class Pages extends OverlapSizer { - constructor( - scene: Phaser.Scene, - config?: Pages.IConfig - ); - - setSwapMode( - mode: 0 | 1 | 'invisible' | 'destroy' - ): this; - - addPage( - gameObject: Phaser.GameObjects.GameObject, - config?: { - key?: string, - - align?: Pages.AlignTypes, - - padding?: Pages.PaddingTypes, - - expand: boolean | - { - width?: boolean, - height?: boolean, - }, - - minWidth?: number, - - minHeight?: number - } - ): this; - - swapPage( - key: string, - fadeInDuration?: number - ): this; - currentKey: string; - readonly previousKey: string; - keys: string[]; - - getPage(key: string): Phaser.GameObjects.GameObject; - readonly currentPage: Phaser.GameObjects.GameObject; - readonly previousPage: Phaser.GameObjects.GameObject; -} \ No newline at end of file diff --git a/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/english.py b/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/english.py deleted file mode 100644 index 6817392ba8a9eb830351de89fb7afc5ad72f5e42..0000000000000000000000000000000000000000 --- a/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/english.py +++ /dev/null @@ -1,188 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - - -# Regular expression matching whitespace: - - -import re -import inflect -from unidecode import unidecode -import eng_to_ipa as ipa -_inflect = inflect.engine() -_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])') -_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)') -_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)') -_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)') -_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)') -_number_re = re.compile(r'[0-9]+') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - - -# List of (ipa, lazy ipa) pairs: -_lazy_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('æ', 'e'), - ('ɑ', 'a'), - ('ɔ', 'o'), - ('ð', 'z'), - ('θ', 's'), - ('ɛ', 'e'), - ('ɪ', 'i'), - ('ʊ', 'u'), - ('ʒ', 'ʥ'), - ('ʤ', 'ʥ'), - ('ˈ', '↓'), -]] - -# List of (ipa, lazy ipa2) pairs: -_lazy_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('ð', 'z'), - ('θ', 's'), - ('ʒ', 'ʑ'), - ('ʤ', 'dʑ'), - ('ˈ', '↓'), -]] - -# List of (ipa, ipa2) pairs -_ipa_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('ʤ', 'dʒ'), - ('ʧ', 'tʃ') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def collapse_whitespace(text): - return re.sub(r'\s+', ' ', text) - - -def _remove_commas(m): - return m.group(1).replace(',', '') - - -def _expand_decimal_point(m): - return m.group(1).replace('.', ' point ') - - -def _expand_dollars(m): - match = m.group(1) - parts = match.split('.') - if len(parts) > 2: - return match + ' dollars' # Unexpected format - dollars = int(parts[0]) if parts[0] else 0 - cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 - if dollars and cents: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit) - elif dollars: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - return '%s %s' % (dollars, dollar_unit) - elif cents: - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s' % (cents, cent_unit) - else: - return 'zero dollars' - - -def _expand_ordinal(m): - return _inflect.number_to_words(m.group(0)) - - -def _expand_number(m): - num = int(m.group(0)) - if num > 1000 and num < 3000: - if num == 2000: - return 'two thousand' - elif num > 2000 and num < 2010: - return 'two thousand ' + _inflect.number_to_words(num % 100) - elif num % 100 == 0: - return _inflect.number_to_words(num // 100) + ' hundred' - else: - return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ') - else: - return _inflect.number_to_words(num, andword='') - - -def normalize_numbers(text): - text = re.sub(_comma_number_re, _remove_commas, text) - text = re.sub(_pounds_re, r'\1 pounds', text) - text = re.sub(_dollars_re, _expand_dollars, text) - text = re.sub(_decimal_number_re, _expand_decimal_point, text) - text = re.sub(_ordinal_re, _expand_ordinal, text) - text = re.sub(_number_re, _expand_number, text) - return text - - -def mark_dark_l(text): - return re.sub(r'l([^aeiouæɑɔəɛɪʊ ]*(?: |$))', lambda x: 'ɫ'+x.group(1), text) - - -def english_to_ipa(text): - text = unidecode(text).lower() - text = expand_abbreviations(text) - text = normalize_numbers(text) - phonemes = ipa.convert(text) - phonemes = collapse_whitespace(phonemes) - return phonemes - - -def english_to_lazy_ipa(text): - text = english_to_ipa(text) - for regex, replacement in _lazy_ipa: - text = re.sub(regex, replacement, text) - return text - - -def english_to_ipa2(text): - text = english_to_ipa(text) - text = mark_dark_l(text) - for regex, replacement in _ipa_to_ipa2: - text = re.sub(regex, replacement, text) - return text.replace('...', '…') - - -def english_to_lazy_ipa2(text): - text = english_to_ipa(text) - for regex, replacement in _lazy_ipa2: - text = re.sub(regex, replacement, text) - return text diff --git a/spaces/AlekseyKorshuk/michellejieli-NSFW_text_classifier/README.md b/spaces/AlekseyKorshuk/michellejieli-NSFW_text_classifier/README.md deleted file mode 100644 index 3880097d59f3e2f4a31a5805504928a3a60975f1..0000000000000000000000000000000000000000 --- a/spaces/AlekseyKorshuk/michellejieli-NSFW_text_classifier/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Michellejieli-NSFW Text Classifier -emoji: 🌍 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/ddpm.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/ddpm.md deleted file mode 100644 index 3efa603d1cae45daf9390454c9dcbeb9bf2f86cf..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/ddpm.md +++ /dev/null @@ -1,35 +0,0 @@ - - -# DDPM - -[Denoising Diffusion Probabilistic Models](https://huggingface.co/papers/2006.11239) (DDPM) by Jonathan Ho, Ajay Jain and Pieter Abbeel proposes a diffusion based model of the same name. In the 🤗 Diffusers library, DDPM refers to the *discrete denoising scheduler* from the paper as well as the pipeline. - -The abstract from the paper is: - -*We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality similar to ProgressiveGAN.* - -The original codebase can be found at [hohonathanho/diffusion](https://github.com/hojonathanho/diffusion). - - - -Make sure to check out the Schedulers [guide](/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](/using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -# DDPMPipeline -[[autodoc]] DDPMPipeline - - all - - __call__ - -## ImagePipelineOutput -[[autodoc]] pipelines.ImagePipelineOutput diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/dependency_versions_check.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/dependency_versions_check.py deleted file mode 100644 index 4f8578c52957bf6c06decb0d97d3139437f0078f..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/dependency_versions_check.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import sys - -from .dependency_versions_table import deps -from .utils.versions import require_version, require_version_core - - -# define which module versions we always want to check at run time -# (usually the ones defined in `install_requires` in setup.py) -# -# order specific notes: -# - tqdm must be checked before tokenizers - -pkgs_to_check_at_runtime = "python tqdm regex requests packaging filelock numpy tokenizers".split() -if sys.version_info < (3, 7): - pkgs_to_check_at_runtime.append("dataclasses") -if sys.version_info < (3, 8): - pkgs_to_check_at_runtime.append("importlib_metadata") - -for pkg in pkgs_to_check_at_runtime: - if pkg in deps: - if pkg == "tokenizers": - # must be loaded here, or else tqdm check may fail - from .utils import is_tokenizers_available - - if not is_tokenizers_available(): - continue # not required, check version only if installed - - require_version_core(deps[pkg]) - else: - raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") - - -def dep_version_check(pkg, hint=None): - require_version(deps[pkg], hint) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py deleted file mode 100644 index a02a814fe2f08b464454e8eb6e1c88004ab804f6..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py +++ /dev/null @@ -1,27 +0,0 @@ -_base_ = './fovea_r50_fpn_4x4_1x_coco.py' -model = dict( - pretrained='torchvision://resnet101', - backbone=dict(depth=101), - bbox_head=dict( - with_deform=True, - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -data = dict(train=dict(pipeline=train_pipeline)) -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/fpg/faster_rcnn_r50_fpn_crop640_50e_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/fpg/faster_rcnn_r50_fpn_crop640_50e_coco.py deleted file mode 100644 index 95f4e91f203bad8367942fc24b838da9fbf62947..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/fpg/faster_rcnn_r50_fpn_crop640_50e_coco.py +++ /dev/null @@ -1,68 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -norm_cfg = dict(type='BN', requires_grad=True) -model = dict( - backbone=dict(norm_cfg=norm_cfg, norm_eval=False), - neck=dict(norm_cfg=norm_cfg), - roi_head=dict(bbox_head=dict(norm_cfg=norm_cfg))) -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=(640, 640), - ratio_range=(0.8, 1.2), - keep_ratio=True), - dict(type='RandomCrop', crop_size=(640, 640)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=(640, 640)), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(640, 640), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=64), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=4, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# learning policy -optimizer = dict( - type='SGD', - lr=0.08, - momentum=0.9, - weight_decay=0.0001, - paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=1000, - warmup_ratio=0.1, - step=[30, 40]) -# runtime settings -runner = dict(max_epochs=50) -evaluation = dict(interval=2) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py deleted file mode 100644 index 8b83722197c69a51907f43bcb05883deedc37f0c..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py +++ /dev/null @@ -1,45 +0,0 @@ -_base_ = '../gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py' -# model settings -model = dict( - roi_head=dict( - bbox_roi_extractor=dict( - type='GenericRoIExtractor', - aggregation='sum', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), - out_channels=256, - featmap_strides=[4, 8, 16, 32], - pre_cfg=dict( - type='ConvModule', - in_channels=256, - out_channels=256, - kernel_size=5, - padding=2, - inplace=False, - ), - post_cfg=dict( - type='GeneralizedAttention', - in_channels=256, - spatial_range=-1, - num_heads=6, - attention_type='0100', - kv_stride=2)), - mask_roi_extractor=dict( - type='GenericRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), - out_channels=256, - featmap_strides=[4, 8, 16, 32], - pre_cfg=dict( - type='ConvModule', - in_channels=256, - out_channels=256, - kernel_size=5, - padding=2, - inplace=False, - ), - post_cfg=dict( - type='GeneralizedAttention', - in_channels=256, - spatial_range=-1, - num_heads=6, - attention_type='0100', - kv_stride=2)))) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/yolact/yolact_r50_1x8_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/yolact/yolact_r50_1x8_coco.py deleted file mode 100644 index d0e5ace280e1377ce4bb772df7e132427143bf34..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/yolact/yolact_r50_1x8_coco.py +++ /dev/null @@ -1,160 +0,0 @@ -_base_ = '../_base_/default_runtime.py' - -# model settings -img_size = 550 -model = dict( - type='YOLACT', - pretrained='torchvision://resnet50', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=-1, # do not freeze stem - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=False, # update the statistics of bn - zero_init_residual=False, - style='pytorch'), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_input', - num_outs=5, - upsample_cfg=dict(mode='bilinear')), - bbox_head=dict( - type='YOLACTHead', - num_classes=80, - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=3, - scales_per_octave=1, - base_sizes=[8, 16, 32, 64, 128], - ratios=[0.5, 1.0, 2.0], - strides=[550.0 / x for x in [69, 35, 18, 9, 5]], - centers=[(550 * 0.5 / x, 550 * 0.5 / x) - for x in [69, 35, 18, 9, 5]]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - reduction='none', - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.5), - num_head_convs=1, - num_protos=32, - use_ohem=True), - mask_head=dict( - type='YOLACTProtonet', - in_channels=256, - num_protos=32, - num_classes=80, - max_masks_to_train=100, - loss_mask_weight=6.125), - segm_head=dict( - type='YOLACTSegmHead', - num_classes=80, - in_channels=256, - loss_segm=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0., - ignore_iof_thr=-1, - gt_max_assign_all=False), - # smoothl1_beta=1., - allowed_border=-1, - pos_weight=-1, - neg_pos_ratio=3, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - iou_thr=0.5, - top_k=200, - max_per_img=100)) -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.68, 116.78, 103.94], std=[58.40, 57.12, 57.38], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='FilterAnnotations', min_gt_bbox_wh=(4.0, 4.0)), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), - dict( - type='Expand', - mean=img_norm_cfg['mean'], - to_rgb=img_norm_cfg['to_rgb'], - ratio_range=(1, 4)), - dict( - type='MinIoURandomCrop', - min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), - min_crop_size=0.3), - dict(type='Resize', img_scale=(img_size, img_size), keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(img_size, img_size), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=4, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -# optimizer -optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.1, - step=[20, 42, 49, 52]) -runner = dict(type='EpochBasedRunner', max_epochs=55) -cudnn_benchmark = True -evaluation = dict(metric=['bbox', 'segm']) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context.py b/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context.py deleted file mode 100644 index 68e2b072e4b8d076e8c3e929dfdc73bcd24ce859..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3plus_r50-d8_480x480_40k_pascal_context.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_769x769_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 6a9efc55ad2062facf3a568f8cdbba76c8c55950..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './psanet_r50-d8_769x769_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_512x1024_40k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 6671fcb4bf8430bc0128cd93a4b8cedea1856b03..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/psanet_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r50-d8_480x480_40k_pascal_context_59.py b/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r50-d8_480x480_40k_pascal_context_59.py deleted file mode 100644 index 88041c6817d2cb152a979b71a2ce56a9e30b87b5..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r50-d8_480x480_40k_pascal_context_59.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = [ - '../_base_/models/pspnet_r50-d8.py', - '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(num_classes=59), - auxiliary_head=dict(num_classes=59), - test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) -optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/multimodal_embedder.py b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/multimodal_embedder.py deleted file mode 100644 index 626077cb80987d66af90f390e31aa2f2def76fec..0000000000000000000000000000000000000000 --- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/multimodal_embedder.py +++ /dev/null @@ -1,178 +0,0 @@ -import base64 -import re -from dataclasses import dataclass -from io import BytesIO -from typing import Any, List, Optional - -import torch -from PIL import Image - -from extensions.multimodal.pipeline_loader import load_pipeline -from modules import shared -from modules.logging_colors import logger -from modules.text_generation import encode, get_max_prompt_length - - -@dataclass -class PromptPart: - text: str - image: Optional[Image.Image] = None - is_image: bool = False - input_ids: Optional[torch.Tensor] = None - embedding: Optional[torch.Tensor] = None - - -class MultimodalEmbedder: - def __init__(self, params: dict): - pipeline, source = load_pipeline(params) - self.pipeline = pipeline - logger.info(f'Multimodal: loaded pipeline {self.pipeline.name()} from pipelines/{source} ({self.pipeline.__class__.__name__})') - - def _split_prompt(self, prompt: str, load_images: bool = False) -> List[PromptPart]: - """Splits a prompt into a list of `PromptParts` to separate image data from text. - It will also append `image_start` and `image_end` before and after the image, and optionally parse and load the images, - if `load_images` is `True`. - """ - parts: List[PromptPart] = [] - curr = 0 - while True: - match = re.search(r'', prompt[curr:]) - if match is None: - # no more image tokens, append the rest of the prompt - if curr > 0: - # add image end token after last image - parts.append(PromptPart(text=self.pipeline.image_end() + prompt[curr:])) - else: - parts.append(PromptPart(text=prompt)) - break - # found an image, append image start token to the text - if match.start() > 0: - parts.append(PromptPart(text=prompt[curr:curr + match.start()] + self.pipeline.image_start())) - else: - parts.append(PromptPart(text=self.pipeline.image_start())) - # append the image - parts.append(PromptPart( - text=match.group(0), - image=Image.open(BytesIO(base64.b64decode(match.group(1)))) if load_images else None, - is_image=True - )) - curr += match.end() - return parts - - def _len_in_tokens_prompt_parts(self, parts: List[PromptPart]) -> int: - """Total length in tokens of all `parts`""" - tokens = 0 - for part in parts: - if part.is_image: - tokens += self.pipeline.num_image_embeds() - elif part.input_ids is not None: - tokens += len(part.input_ids) - else: - tokens += len(encode(part.text)[0]) - return tokens - - def len_in_tokens(self, prompt: str) -> int: - """Total length in tokens for a given text `prompt`""" - parts = self._split_prompt(prompt, False) - return self._len_in_tokens_prompt_parts(parts) - - def _encode_single_text(self, part: PromptPart, add_bos_token: bool) -> PromptPart: - """Encode a single prompt `part` to `input_ids`. Returns a `PromptPart`""" - if part.is_image: - placeholders = torch.ones((self.pipeline.num_image_embeds())) * self.pipeline.placeholder_token_id() - part.input_ids = placeholders.to(shared.model.device, dtype=torch.int64) - else: - part.input_ids = encode(part.text, add_bos_token=add_bos_token)[0].to(shared.model.device, dtype=torch.int64) - return part - - @staticmethod - def _num_images(parts: List[PromptPart]) -> int: - count = 0 - for part in parts: - if part.is_image: - count += 1 - return count - - def _encode_text(self, state, parts: List[PromptPart]) -> List[PromptPart]: - """Encode text to token_ids, also truncate the prompt, if necessary. - - The chat/instruct mode should make prompts that fit in get_max_prompt_length, but if max_new_tokens are set - such that the context + min_rows don't fit, we can get a prompt which is too long. - We can't truncate image embeddings, as it leads to broken generation, so remove the images instead and warn the user - """ - encoded: List[PromptPart] = [] - for i, part in enumerate(parts): - encoded.append(self._encode_single_text(part, i == 0 and state['add_bos_token'])) - - # truncation: - max_len = get_max_prompt_length(state) - removed_images = 0 - - # 1. remove entire text/image blocks - while self._len_in_tokens_prompt_parts(encoded[1:]) > max_len: - if encoded[0].is_image: - removed_images += 1 - encoded = encoded[1:] - - # 2. check if the last prompt part doesn't need to get truncated - if self._len_in_tokens_prompt_parts(encoded) > max_len: - if encoded[0].is_image: - # don't truncate image embeddings, just remove the image, otherwise generation will be broken - removed_images += 1 - encoded = encoded[1:] - elif len(encoded) > 1 and encoded[0].text.endswith(self.pipeline.image_start()): - # see if we can keep image_start token - len_image_start = len(encode(self.pipeline.image_start(), add_bos_token=state['add_bos_token'])[0]) - if self._len_in_tokens_prompt_parts(encoded[1:]) + len_image_start > max_len: - # we can't -> remove this text, and the image - encoded = encoded[2:] - removed_images += 1 - else: - # we can -> just truncate the text - trunc_len = self._len_in_tokens_prompt_parts(encoded) - max_len - encoded[0].input_ids = encoded[0].input_ids[trunc_len:] - elif len(encoded) > 0: - # only one text left, truncate it normally - trunc_len = self._len_in_tokens_prompt_parts(encoded) - max_len - encoded[0].input_ids = encoded[0].input_ids[trunc_len:] - - # notify user if we truncated an image - if removed_images > 0: - logger.warning(f"Multimodal: removed {removed_images} image(s) from prompt. Try decreasing max_new_tokens if generation is broken") - - return encoded - - def _embed(self, parts: List[PromptPart]) -> List[PromptPart]: - # batch images - image_indicies = [i for i, part in enumerate(parts) if part.is_image] - embedded = self.pipeline.embed_images([parts[i].image for i in image_indicies]) - for i, embeds in zip(image_indicies, embedded): - parts[i].embedding = embeds - # embed text - for (i, part) in enumerate(parts): - if not part.is_image: - parts[i].embedding = self.pipeline.embed_tokens(part.input_ids) - return parts - - def _remove_old_images(self, parts: List[PromptPart], params: dict) -> List[PromptPart]: - if params['add_all_images_to_prompt']: - return parts - already_added = False - for i, part in reversed(list(enumerate(parts))): - if part.is_image: - if already_added: - parts[i].embedding = self.pipeline.placeholder_embeddings() - else: - already_added = True - return parts - - def forward(self, prompt: str, state: Any, params: dict): - prompt_parts = self._split_prompt(prompt, True) - prompt_parts = self._encode_text(state, prompt_parts) - prompt_parts = self._embed(prompt_parts) - prompt_parts = self._remove_old_images(prompt_parts, params) - embeds = tuple(part.embedding for part in prompt_parts) - ids = tuple(part.input_ids for part in prompt_parts) - input_embeds = torch.cat(embeds, dim=0) - input_ids = torch.cat(ids, dim=0) - return prompt, input_ids, input_embeds, self._num_images(prompt_parts) diff --git a/spaces/Anonymous-sub/Rerender/gmflow_module/gmflow/__init__.py b/spaces/Anonymous-sub/Rerender/gmflow_module/gmflow/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Arnx/MusicGenXvAKN/audiocraft/quantization/__init__.py b/spaces/Arnx/MusicGenXvAKN/audiocraft/quantization/__init__.py deleted file mode 100644 index 836d6eb518978480c6b95d6f29ce4f84a9428793..0000000000000000000000000000000000000000 --- a/spaces/Arnx/MusicGenXvAKN/audiocraft/quantization/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# flake8: noqa -from .vq import ResidualVectorQuantizer -from .base import BaseQuantizer, DummyQuantizer, QuantizedResult diff --git a/spaces/Arnx/MusicGenXvAKN/tests/models/test_musicgen.py b/spaces/Arnx/MusicGenXvAKN/tests/models/test_musicgen.py deleted file mode 100644 index d43cf73763f6c690ab0b277227ac225b286fa143..0000000000000000000000000000000000000000 --- a/spaces/Arnx/MusicGenXvAKN/tests/models/test_musicgen.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import pytest -import torch - -from audiocraft.models import MusicGen - - -class TestSEANetModel: - def get_musicgen(self): - mg = MusicGen.get_pretrained(name='debug', device='cpu') - mg.set_generation_params(duration=2.0, extend_stride=2.) - return mg - - def test_base(self): - mg = self.get_musicgen() - assert mg.frame_rate == 25 - assert mg.sample_rate == 32000 - assert mg.audio_channels == 1 - - def test_generate_unconditional(self): - mg = self.get_musicgen() - wav = mg.generate_unconditional(3) - assert list(wav.shape) == [3, 1, 64000] - - def test_generate_continuation(self): - mg = self.get_musicgen() - prompt = torch.randn(3, 1, 32000) - wav = mg.generate_continuation(prompt, 32000) - assert list(wav.shape) == [3, 1, 64000] - - prompt = torch.randn(2, 1, 32000) - wav = mg.generate_continuation( - prompt, 32000, ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 64000] - - prompt = torch.randn(2, 1, 32000) - with pytest.raises(AssertionError): - wav = mg.generate_continuation( - prompt, 32000, ['youpi', 'lapin dort', 'one too many']) - - def test_generate(self): - mg = self.get_musicgen() - wav = mg.generate( - ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 64000] - - def test_generate_long(self): - mg = self.get_musicgen() - mg.max_duration = 3. - mg.set_generation_params(duration=4., extend_stride=2.) - wav = mg.generate( - ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 32000 * 4] diff --git a/spaces/Artrajz/vits-simple-api/bert_vits2/text/bert_handler.py b/spaces/Artrajz/vits-simple-api/bert_vits2/text/bert_handler.py deleted file mode 100644 index fb5c79090966eda18c7e932e2ad0636452ac06ad..0000000000000000000000000000000000000000 --- a/spaces/Artrajz/vits-simple-api/bert_vits2/text/bert_handler.py +++ /dev/null @@ -1,33 +0,0 @@ -import importlib - - -class BertHandler: - _bert_functions = {} - - BERT_IMPORT_MAP = { - "zh": "bert_vits2.text.chinese_bert.get_bert_feature", - "en": "bert_vits2.text.english_bert_mock.get_bert_feature", - "ja": "bert_vits2.text.japanese_bert.get_bert_feature", - } - - def __init__(self, languages): - for lang in languages: - if lang not in BertHandler._bert_functions: - self.load_bert_function(lang) - - def load_bert_function(self, language): - if language not in BertHandler.BERT_IMPORT_MAP: - raise ValueError(f"Unsupported language: {language}") - - module_path, function_name = BertHandler.BERT_IMPORT_MAP[language].rsplit('.', 1) - module = importlib.import_module(module_path, package=__package__) - bert_function = getattr(module, function_name) - - BertHandler._bert_functions[language] = bert_function - - def get_bert(self, norm_text, word2ph, language): - if language not in BertHandler._bert_functions: - raise ValueError(f"BERT for {language} has not been initialized. Please initialize first.") - - bert_func = BertHandler._bert_functions[language] - return bert_func(norm_text, word2ph) diff --git a/spaces/Atualli/yoloxTeste/configs/yolox_m.py b/spaces/Atualli/yoloxTeste/configs/yolox_m.py deleted file mode 100644 index 9666a31177b9cc1c94978f9867aaceac8ddebce2..0000000000000000000000000000000000000000 --- a/spaces/Atualli/yoloxTeste/configs/yolox_m.py +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -# Copyright (c) Megvii, Inc. and its affiliates. - -import os - -from yolox.exp import Exp as MyExp - - -class Exp(MyExp): - def __init__(self): - super(Exp, self).__init__() - self.depth = 0.67 - self.width = 0.75 - self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] diff --git a/spaces/Ayushnangia/Whispercpp_yt/README.md b/spaces/Ayushnangia/Whispercpp_yt/README.md deleted file mode 100644 index 4d6aca0fe068683bd50677305d410a25820b1d54..0000000000000000000000000000000000000000 --- a/spaces/Ayushnangia/Whispercpp_yt/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Whispercpp Yt -emoji: 🐠 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Banbri/zcvzcv/src/components/ui/separator.tsx b/spaces/Banbri/zcvzcv/src/components/ui/separator.tsx deleted file mode 100644 index a6ed83ef827829cf42a7b27d1d5714b4473bd1c5..0000000000000000000000000000000000000000 --- a/spaces/Banbri/zcvzcv/src/components/ui/separator.tsx +++ /dev/null @@ -1,31 +0,0 @@ -"use client" - -import * as React from "react" -import * as SeparatorPrimitive from "@radix-ui/react-separator" - -import { cn } from "@/lib/utils" - -const Separator = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->( - ( - { className, orientation = "horizontal", decorative = true, ...props }, - ref - ) => ( - - ) -) -Separator.displayName = SeparatorPrimitive.Root.displayName - -export { Separator } diff --git a/spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/nets_537227KB.py b/spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/nets_537227KB.py deleted file mode 100644 index a1bb530e006482704f234c2e739a695174142941..0000000000000000000000000000000000000000 --- a/spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/nets_537227KB.py +++ /dev/null @@ -1,123 +0,0 @@ -import torch -import numpy as np -from torch import nn -import torch.nn.functional as F - -from . import layers_537238KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 64) - self.stg1_high_band_net = BaseASPPNet(2, 64) - - self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(32, 64) - - self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(64, 128) - - self.out = nn.Conv2d(128, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(64, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(64, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/spaces/BernardoOlisan/vqganclip/CLIP/data/yfcc100m.md b/spaces/BernardoOlisan/vqganclip/CLIP/data/yfcc100m.md deleted file mode 100644 index 575c54bc4bab3972878291c8d227a313c9fc766e..0000000000000000000000000000000000000000 --- a/spaces/BernardoOlisan/vqganclip/CLIP/data/yfcc100m.md +++ /dev/null @@ -1,14 +0,0 @@ -# The YFCC100M Subset - -In the paper, we performed a dataset ablation using a subset of the YFCC100M dataset and showed that the performance remained largely similar. - -The subset contains 14,829,396 images, about 15% of the full dataset, which have been filtered to only keep those with natural languag titles and/or descriptions in English. - -We provide the list of (line number, photo identifier, photo hash) of each image contained in this subset. These correspond to the first three columns in the dataset's metadata TSV file. - -``` -wget https://openaipublic.azureedge.net/clip/data/yfcc100m_subset_data.tsv.bz2 -bunzip2 yfcc100m_subset_data.tsv.bz2 -``` - -Use of the underlying media files is subject to the Creative Commons licenses chosen by their creators/uploaders. For more information about the YFCC100M dataset, visit [the official website](https://multimediacommons.wordpress.com/yfcc100m-core-dataset/). \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/locations/base.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/locations/base.py deleted file mode 100644 index 3f9f896e632e929a63e9724ab80ecdfc9761b795..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/locations/base.py +++ /dev/null @@ -1,81 +0,0 @@ -import functools -import os -import site -import sys -import sysconfig -import typing - -from pip._internal.exceptions import InstallationError -from pip._internal.utils import appdirs -from pip._internal.utils.virtualenv import running_under_virtualenv - -# Application Directories -USER_CACHE_DIR = appdirs.user_cache_dir("pip") - -# FIXME doesn't account for venv linked to global site-packages -site_packages: str = sysconfig.get_path("purelib") - - -def get_major_minor_version() -> str: - """ - Return the major-minor version of the current Python as a string, e.g. - "3.7" or "3.10". - """ - return "{}.{}".format(*sys.version_info) - - -def change_root(new_root: str, pathname: str) -> str: - """Return 'pathname' with 'new_root' prepended. - - If 'pathname' is relative, this is equivalent to os.path.join(new_root, pathname). - Otherwise, it requires making 'pathname' relative and then joining the - two, which is tricky on DOS/Windows and Mac OS. - - This is borrowed from Python's standard library's distutils module. - """ - if os.name == "posix": - if not os.path.isabs(pathname): - return os.path.join(new_root, pathname) - else: - return os.path.join(new_root, pathname[1:]) - - elif os.name == "nt": - (drive, path) = os.path.splitdrive(pathname) - if path[0] == "\\": - path = path[1:] - return os.path.join(new_root, path) - - else: - raise InstallationError( - f"Unknown platform: {os.name}\n" - "Can not change root path prefix on unknown platform." - ) - - -def get_src_prefix() -> str: - if running_under_virtualenv(): - src_prefix = os.path.join(sys.prefix, "src") - else: - # FIXME: keep src in cwd for now (it is not a temporary folder) - try: - src_prefix = os.path.join(os.getcwd(), "src") - except OSError: - # In case the current working directory has been renamed or deleted - sys.exit("The folder you are executing pip from can no longer be found.") - - # under macOS + virtualenv sys.prefix is not properly resolved - # it is something like /path/to/python/bin/.. - return os.path.abspath(src_prefix) - - -try: - # Use getusersitepackages if this is present, as it ensures that the - # value is initialised properly. - user_site: typing.Optional[str] = site.getusersitepackages() -except AttributeError: - user_site = site.USER_SITE - - -@functools.lru_cache(maxsize=None) -def is_osx_framework() -> bool: - return bool(sysconfig.get_config_var("PYTHONFRAMEWORK")) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/highlighter.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/highlighter.py deleted file mode 100644 index c2646794a98578bdb735f5047dbc6b1d50b90230..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/highlighter.py +++ /dev/null @@ -1,232 +0,0 @@ -import re -from abc import ABC, abstractmethod -from typing import List, Union - -from .text import Span, Text - - -def _combine_regex(*regexes: str) -> str: - """Combine a number of regexes in to a single regex. - - Returns: - str: New regex with all regexes ORed together. - """ - return "|".join(regexes) - - -class Highlighter(ABC): - """Abstract base class for highlighters.""" - - def __call__(self, text: Union[str, Text]) -> Text: - """Highlight a str or Text instance. - - Args: - text (Union[str, ~Text]): Text to highlight. - - Raises: - TypeError: If not called with text or str. - - Returns: - Text: A test instance with highlighting applied. - """ - if isinstance(text, str): - highlight_text = Text(text) - elif isinstance(text, Text): - highlight_text = text.copy() - else: - raise TypeError(f"str or Text instance required, not {text!r}") - self.highlight(highlight_text) - return highlight_text - - @abstractmethod - def highlight(self, text: Text) -> None: - """Apply highlighting in place to text. - - Args: - text (~Text): A text object highlight. - """ - - -class NullHighlighter(Highlighter): - """A highlighter object that doesn't highlight. - - May be used to disable highlighting entirely. - - """ - - def highlight(self, text: Text) -> None: - """Nothing to do""" - - -class RegexHighlighter(Highlighter): - """Applies highlighting from a list of regular expressions.""" - - highlights: List[str] = [] - base_style: str = "" - - def highlight(self, text: Text) -> None: - """Highlight :class:`rich.text.Text` using regular expressions. - - Args: - text (~Text): Text to highlighted. - - """ - - highlight_regex = text.highlight_regex - for re_highlight in self.highlights: - highlight_regex(re_highlight, style_prefix=self.base_style) - - -class ReprHighlighter(RegexHighlighter): - """Highlights the text typically produced from ``__repr__`` methods.""" - - base_style = "repr." - highlights = [ - r"(?P<)(?P[-\w.:|]*)(?P[\w\W]*)(?P>)", - r'(?P[\w_]{1,50})=(?P"?[\w_]+"?)?', - r"(?P[][{}()])", - _combine_regex( - r"(?P[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})", - r"(?P([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})", - r"(?P(?:[0-9A-Fa-f]{1,2}-){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){3}[0-9A-Fa-f]{4})", - r"(?P(?:[0-9A-Fa-f]{1,2}-){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){2}[0-9A-Fa-f]{4})", - r"(?P[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12})", - r"(?P[\w.]*?)\(", - r"\b(?PTrue)\b|\b(?PFalse)\b|\b(?PNone)\b", - r"(?P\.\.\.)", - r"(?P(?(?\B(/[-\w._+]+)*\/)(?P[-\w._+]*)?", - r"(?b?'''.*?(?(file|https|http|ws|wss)://[-0-9a-zA-Z$_+!`(),.?/;:&=%#]*)", - ), - ] - - -class JSONHighlighter(RegexHighlighter): - """Highlights JSON""" - - # Captures the start and end of JSON strings, handling escaped quotes - JSON_STR = r"(?b?\".*?(?[\{\[\(\)\]\}])", - r"\b(?Ptrue)\b|\b(?Pfalse)\b|\b(?Pnull)\b", - r"(?P(? None: - super().highlight(text) - - # Additional work to handle highlighting JSON keys - plain = text.plain - append = text.spans.append - whitespace = self.JSON_WHITESPACE - for match in re.finditer(self.JSON_STR, plain): - start, end = match.span() - cursor = end - while cursor < len(plain): - char = plain[cursor] - cursor += 1 - if char == ":": - append(Span(start, end, "json.key")) - elif char in whitespace: - continue - break - - -class ISO8601Highlighter(RegexHighlighter): - """Highlights the ISO8601 date time strings. - Regex reference: https://www.oreilly.com/library/view/regular-expressions-cookbook/9781449327453/ch04s07.html - """ - - base_style = "iso8601." - highlights = [ - # - # Dates - # - # Calendar month (e.g. 2008-08). The hyphen is required - r"^(?P[0-9]{4})-(?P1[0-2]|0[1-9])$", - # Calendar date w/o hyphens (e.g. 20080830) - r"^(?P(?P[0-9]{4})(?P1[0-2]|0[1-9])(?P3[01]|0[1-9]|[12][0-9]))$", - # Ordinal date (e.g. 2008-243). The hyphen is optional - r"^(?P(?P[0-9]{4})-?(?P36[0-6]|3[0-5][0-9]|[12][0-9]{2}|0[1-9][0-9]|00[1-9]))$", - # - # Weeks - # - # Week of the year (e.g., 2008-W35). The hyphen is optional - r"^(?P(?P[0-9]{4})-?W(?P5[0-3]|[1-4][0-9]|0[1-9]))$", - # Week date (e.g., 2008-W35-6). The hyphens are optional - r"^(?P(?P[0-9]{4})-?W(?P5[0-3]|[1-4][0-9]|0[1-9])-?(?P[1-7]))$", - # - # Times - # - # Hours and minutes (e.g., 17:21). The colon is optional - r"^(?P